diff --git a/cspell.json b/cspell.json index 22bb21ec9f1b..861b71c85a62 100644 --- a/cspell.json +++ b/cspell.json @@ -40,6 +40,7 @@ "cheatcodes", "checksummed", "cimg", + "ciphertext", "clonedeep", "clonedeepwith", "cmd", @@ -149,6 +150,7 @@ "persistable", "pids", "pkgs", + "plaintext", "Plookup", "pnat", "Pokeable", diff --git a/yellow-paper/docs/addresses-and-keys/diversified-and-stealth.md b/yellow-paper/docs/addresses-and-keys/diversified-and-stealth.md index b39fe8e17274..7e2e849243b3 100644 --- a/yellow-paper/docs/addresses-and-keys/diversified-and-stealth.md +++ b/yellow-paper/docs/addresses-and-keys/diversified-and-stealth.md @@ -1,9 +1,8 @@ --- title: Diversified and Stealth Accounts -sidebar_position: 4 --- -The [keys specification](./specification.md) describes derivation mechanisms for diversified and stealth public keys. However, the protocol requires users to interact with addresses. +The [keys specification](./specification.md) describes derivation mechanisms for diversified and stealth public keys. However, the protocol requires users to interact with addresses. ## Computing Addresses @@ -21,10 +20,10 @@ contract DiversifiedAccount private fn entrypoint(payload: action[]) assert msg_sender == get_owner_address() execute(payload) - + private fn is_valid(message_hash: Field) return get_owner_address().is_valid(message_hash) - + internal private get_owner_address() let address_preimage = pxe.get_address_preimage(this) assert hash(address_preimage) == this @@ -39,4 +38,4 @@ Given the contract does not require initialization since it has no constructor, ## Discarded Approaches -An alternative approach was to introduce a new type of call, a diversified call, that would allow the caller to impersonate any address they can derive from their own, for an enshrined derivation mechanism. Account contracts could use this opcode, as opposed to a regular call, to issue calls on behalf on their diversified and stealth addresses. However, this approach failed to account for calls made back to the account contracts, in particular authwit checks. It also required protocol changes, introducing a new type of call which could be difficult to reason about, and increased attack surface. The only benefit over the approach chosen is that it would require one less extra function call to hop from the user's main account contract to the diversified or stealth one. \ No newline at end of file +An alternative approach was to introduce a new type of call, a diversified call, that would allow the caller to impersonate any address they can derive from their own, for an enshrined derivation mechanism. Account contracts could use this opcode, as opposed to a regular call, to issue calls on behalf on their diversified and stealth addresses. However, this approach failed to account for calls made back to the account contracts, in particular authwit checks. It also required protocol changes, introducing a new type of call which could be difficult to reason about, and increased attack surface. The only benefit over the approach chosen is that it would require one less extra function call to hop from the user's main account contract to the diversified or stealth one. diff --git a/yellow-paper/docs/addresses-and-keys/index.md b/yellow-paper/docs/addresses-and-keys/index.md index ce710146f1b8..89888dacf461 100644 --- a/yellow-paper/docs/addresses-and-keys/index.md +++ b/yellow-paper/docs/addresses-and-keys/index.md @@ -1,15 +1,14 @@ --- title: Addresses and Keys -sidebar_position: 2 --- Aztec has no concept of externally-owned accounts. Every address is meant to identify a smart contract in the network. Addresses are then a commitment to a contract class, a list of constructor arguments, and a set of keys. -Keys in Aztec are used both for authorization and privacy. Authorization keys are managed by account contracts, and not mandated by the protocol. Each account contract may use different authorization keys, if at all, with different signing mechanisms. +Keys in Aztec are used both for authorization and privacy. Authorization keys are managed by account contracts, and not mandated by the protocol. Each account contract may use different authorization keys, if at all, with different signing mechanisms. Privacy keys are used for note encryption, tagging, and nullifying. These are also not enforced by the protocol. However, for facilitating composability, the protocol enshrines a set of well-known encryption and tagging mechanisms, that can be leveraged by applications as they interact with accounts. -The [specification](./specification.md) covers the main requirements for addresses and keys, along with their specification and derivation mechanisms, while the [precompiles](./precompiles.md) section describes well-known contract addresses, with implementations defined by the protocol, used for note encryption and tagging. +The [specification](./specification.md) covers the main requirements for addresses and keys, along with their specification and derivation mechanisms, while the [precompiles](./precompiles.md) section describes well-known contract addresses, with implementations defined by the protocol, used for note encryption and tagging. Last, the [diversified and stealth accounts](./diversified-and-stealth.md) sections describe application-level recommendations for diversified and stealth accounts. diff --git a/yellow-paper/docs/addresses-and-keys/specification.md b/yellow-paper/docs/addresses-and-keys/specification.md index 1fb9d5c262ad..076d1a5b51d6 100644 --- a/yellow-paper/docs/addresses-and-keys/specification.md +++ b/yellow-paper/docs/addresses-and-keys/specification.md @@ -1,6 +1,5 @@ --- title: Specification -sidebar_position: 1 description: Specification of address format in the protocol, default privacy keys format and derivation, and nullifier derivation. --- @@ -94,10 +93,6 @@ $$ ## Requirements for Keys -:::info Disclaimer -This is a draft. These requirements need to be considered by the wider team, and might change significantly before a mainnet release. -::: - ### Scenario A common illustration in this document is Bob sending funds to Alice, by: @@ -594,6 +589,7 @@ Bob wants to send himself a private message (e.g. a record of the outgoing notes > Note: rather than copying the 'shared secret' approach of Bob sending to Alice, we can cut a corner (because Bob is the sender and recipient, and so knows his own secrets). > Note: if Bob has sent a private message to Alice, and he also wants to send himself a corresponding message: +> > - he can likely re-use the ephemeral keypairs for himself. > - he can include $\esk$ in the plaintext that he sends to himself, as a way of reducing the size of his $\ciphertext$ (since the $\esk$ will enable him to access all the information in the ciphertext that was sent to Alice). diff --git a/yellow-paper/docs/bytecode/index.md b/yellow-paper/docs/bytecode/index.md index 10845f6e6aad..cb4266f4f4bc 100644 --- a/yellow-paper/docs/bytecode/index.md +++ b/yellow-paper/docs/bytecode/index.md @@ -2,10 +2,6 @@ title: Bytecode --- -:::info Disclaimer -This is a draft. The public VM and brillig are under heavy development, and specific details about how they are compiled and their bytecode might change in the future. -::: - This section describes how contracts are represented within the protocol for execution. In the context of Aztec, a contract is a set of functions which can be of one of three types: diff --git a/yellow-paper/docs/calls/batched-calls.md b/yellow-paper/docs/calls/batched-calls.md index f3619e020997..001f6819cd5b 100644 --- a/yellow-paper/docs/calls/batched-calls.md +++ b/yellow-paper/docs/calls/batched-calls.md @@ -1,7 +1,3 @@ ---- -sidebar_position: 3 ---- - # Batched calls Calls to private functions can be _batched_ instead of executed [synchronously](./sync-calls.md). When executing a batched call to a private function, the function is not executed on the spot, but enqueued for execution at the end of local execution. Once the private call stack has been emptied, all batched execution requests are grouped by target (contract and function selector), and executed via a single call to each target. @@ -13,7 +9,7 @@ Batched calls are processed by the private kernel circuit. On each kernel circui The arguments for the batched call are arranged in an array with one position for each individual call. Each position within the array is a nested array where the first element is the call context for that individual call, followed by the actual arguments of the call. A batched call is expected to return an array of `PrivateCircuitPublicInputs`, where each public input's call context matches the call context from the corresponding individual call. This allows batched delegate calls, where each individual call processed has a context of its own. This can be used to emit logs on behalf of multiple contracts within a single batched call. - + In pseudocode, the kernel circuit executes the following logic: ``` @@ -28,4 +24,4 @@ loop: break ``` -The rationale for batched calls is to minimize the number of function calls in private execution, in order to reduce total proving times. Batched calls are mostly intended for usage with note delivery precompiles, since these do not require synchronous execution, and allows for processing all notes to be encrypted and tagged with the same mechanism using the same call. Batched calls can also be used for other common functions that do not require to be executed synchronously and are likely to be invoked multiple times. \ No newline at end of file +The rationale for batched calls is to minimize the number of function calls in private execution, in order to reduce total proving times. Batched calls are mostly intended for usage with note delivery precompiles, since these do not require synchronous execution, and allows for processing all notes to be encrypted and tagged with the same mechanism using the same call. Batched calls can also be used for other common functions that do not require to be executed synchronously and are likely to be invoked multiple times. diff --git a/yellow-paper/docs/calls/delegate-calls.md b/yellow-paper/docs/calls/delegate-calls.md index 103cc5ab48aa..ccd0b7011a27 100644 --- a/yellow-paper/docs/calls/delegate-calls.md +++ b/yellow-paper/docs/calls/delegate-calls.md @@ -1,7 +1,3 @@ ---- -sidebar_position: 6 ---- - # Delegate calls Delegate calls are function calls against a contract class identifier instead of an instance. Any call, synchronous or asynchronous, can be made as a delegate call. The behavior of a delegate call is to execute the function code in the specified class identifier but on the context of the current instance. This opens the door to script-like executions and upgradeable contracts. Delegate calls are based on [EIP7](https://eips.ethereum.org/EIPS/eip-7). diff --git a/yellow-paper/docs/calls/enqueued-calls.md b/yellow-paper/docs/calls/enqueued-calls.md index 069a0c0c3020..a654e021dfd2 100644 --- a/yellow-paper/docs/calls/enqueued-calls.md +++ b/yellow-paper/docs/calls/enqueued-calls.md @@ -1,10 +1,7 @@ ---- -sidebar_position: 2 ---- # Enqueued calls Calls from private functions to public functions are asynchronous. Since private and public functions are executed in different domains at different times and in different contexts, as the former are run by the user on a PXE and the latter by the sequencer, it is not possible for a private function to call a public one and await its result. Instead, private functions can _enqueue_ public function calls. The process is analogous to [synchronous calls](./sync-calls.md), but rely on an `enqueuePublicFunctionCall` oracle call that accepts the same arguments. The returned object by the enqueue call is a `PublicCallStackItem` with a flag `is_execution_request` set and empty side effects, to reflect that the stack item has not been executed yet. As with synchronous calls, the caller is responsible for validating the function and arguments in the call stack item, and to push its hash to its public call stack, which represents the list of enqueued public function calls. -As the transaction is received by the sequencer, the public kernel circuit begins processing the enqueued public function calls from the transaction public call stack, pushing new recursive calls as needed, until the public call stack is empty, as described in the [synchronous calls](./sync-calls.md) section. \ No newline at end of file +As the transaction is received by the sequencer, the public kernel circuit begins processing the enqueued public function calls from the transaction public call stack, pushing new recursive calls as needed, until the public call stack is empty, as described in the [synchronous calls](./sync-calls.md) section. diff --git a/yellow-paper/docs/calls/public_private_messaging.md b/yellow-paper/docs/calls/public-private-messaging.md similarity index 92% rename from yellow-paper/docs/calls/public_private_messaging.md rename to yellow-paper/docs/calls/public-private-messaging.md index 9bd9dcca2794..ddcff5a7b0e8 100644 --- a/yellow-paper/docs/calls/public_private_messaging.md +++ b/yellow-paper/docs/calls/public-private-messaging.md @@ -1,29 +1,23 @@ ---- -sidebar_position: 10 ---- - # Inter-Layer Calls ## Public-Private messaging -:::info Disclaimer -This is a draft. These requirements need to be considered by the wider team, and might change significantly before a mainnet release. -::: - -Public state and private state exist in different trees. In a private function you cannot reference or modify public state. +Public state and private state exist in different trees. In a private function you cannot reference or modify public state. Yet, it should be possible for: + 1. private functions to call private or public functions 2. public functions to call private or public functions -Private functions are executed locally by the user and work by providing evidence of correct execution generated locally through kernel proofs. This way, the sequencer doesn't need to have knowledge of everything happening in the transaction, only the results. Public functions, on the other hand, are able to utilize the latest state to manage updates and perform alterations, as they are executed by the sequencer. +Private functions are executed locally by the user and work by providing evidence of correct execution generated locally through kernel proofs. This way, the sequencer doesn't need to have knowledge of everything happening in the transaction, only the results. Public functions, on the other hand, are able to utilize the latest state to manage updates and perform alterations, as they are executed by the sequencer. -Therefore, private functions are always executed first, as they are executed on a state $S_i$, where $i \le n$, with $S_n$ representing the current state where the public functions always operate on the current state $S_n$. +Therefore, private functions are always executed first, as they are executed on a state $S_i$, where $i \le n$, with $S_n$ representing the current state where the public functions always operate on the current state $S_n$. -This enables private functions to enqueue calls to public functions. But vice-versa is not true. Since private functions execute first, it cannot "wait" on the results of any of their calls to public functions. Stated differently, any calls made across domains are unilateral in nature. +This enables private functions to enqueue calls to public functions. But vice-versa is not true. Since private functions execute first, it cannot "wait" on the results of any of their calls to public functions. Stated differently, any calls made across domains are unilateral in nature. The figure below shows the order of function calls on the left-hand side, while the right-hand side shows how the functions will be executed. Notably, the second private function call is independent of the output of the public function and merely occurs after its execution. Tx call order be: + ```mermaid graph TD A[Private Function 1] -->|Calls| B(Public Function 1) @@ -39,17 +33,20 @@ graph TD ``` ## Private to Public Messaging + When a private function calls a public function: + 1. Public function args get hashed together 1. A public call stack item is created with the public function selector, it's contract address and args hash 1. The hash of the item gets enqueued into a separate public call stack and passed as inputs to the private kernel -1. The private kernel pushes these hashes into the public input, which the sequencer can see. +1. The private kernel pushes these hashes into the public input, which the sequencer can see. 1. PXE creates a transaction object as outlined [here](../transactions/tx-object.md) where it passes the hashes and the actual call stack item 1. PXE sends the transaction to the sequencer. -1. Sequencer then picks up the public call stack item and executes each of the functions. +1. Sequencer then picks up the public call stack item and executes each of the functions. 1. The Public VM which executes the methods then verifies that the hash provided by the private kernel matches the current call stack item. ### Handling Privacy Leakage and `msg.sender` + In the above design, the sequencer only sees the public part of the call stack along with any new commitments, nullifiers etc that were created in the private transaction i.e. should learns nothing more of the private transaction (such as its origin, execution logic etc). :::warning @@ -58,16 +55,17 @@ TODO: Haven't finalized what msg.sender will be Within the context of these enqueued public functions, any usage of `msg_sender` should return **TODO**. If the `msg_sender` is the actual user, then it leaks privacy. If `msg_sender` is the contract address, this leaks which contract is calling the public method and therefore leaks which contract the user was interacting with in private land. -Therefore, when the call stack is passed to the kernel circuit, the kernel should assert the `msg_sender` is 0 and hash appropriately. +Therefore, when the call stack is passed to the kernel circuit, the kernel should assert the `msg_sender` is 0 and hash appropriately. ### Reverts If the private part of the transaction reverts, then public calls are never enqueued. But if the public part of the transaction reverts, it should still revert the entire transaction i.e. the sequencer should drop the execution results of the private part of the transaction and not include those in the state transitioner smart contract. However, since the sequencer had to execute your transaction, appropriate fee will be charged. Reverting in public causing the whole transaction to be dropped enables existing paradigms of ethereum where your valid transaction can revert because of altered state e.g., trade incurring too much slippage. ## Public to Private Messaging + Since public functions execute after private functions, it isn't possible for public to call a private function in the same transaction. Nevertheless, it is quite useful for public functions to have a message passing system to private. A public function could add messages to an append only merkle tree to save messages from a public function call, that can later be executed by a private function. Note, only a transaction coming after the one including the message from a public function can consume it. In practice this means that unless you are the sequencer it will not be within the same rollup. -To elaborate, a public function may not have read access to encrypted private state in the note hash tree, but it can write to it. You could create a note in the public domain, compute it's note hash which gets passed to the inputs of the public VM which adds the hash to the note hash tree. The user who wants to redeem the note can add the note preimage to their PXE and then redeem/nullify the note in the private domain at a later time. +To elaborate, a public function may not have read access to encrypted private state in the note hash tree, but it can write to it. You could create a note in the public domain, compute it's note hash which gets passed to the inputs of the public VM which adds the hash to the note hash tree. The user who wants to redeem the note can add the note preimage to their PXE and then redeem/nullify the note in the private domain at a later time. In the picture below, it is worth noting that all data reads performed by private functions are historical in nature, and that private functions are not capable of modifying public storage. Conversely, public functions have the capacity to manipulate private storage (e.g., inserting new commitments, potentially as part of transferring funds from the public domain to the private domain). diff --git a/yellow-paper/docs/calls/static-calls.md b/yellow-paper/docs/calls/static-calls.md index b4a357fee62a..2462d95639a1 100644 --- a/yellow-paper/docs/calls/static-calls.md +++ b/yellow-paper/docs/calls/static-calls.md @@ -1,11 +1,9 @@ ---- -sidebar_position: 5 ---- # Static calls [Synchronous calls](./sync-calls.md), both private and public, can be executed as _static_ calls. This means that the called function, and all nested calls within, cannot emit any modifying side effects, such as creating or consuming notes, writing to storage, or emitting events. The purpose of a static call is to query another contract while ensuring that the call will not modify state. Static calls are based on [EIP214](https://eips.ethereum.org/EIPS/eip-214). In particular, the following fields of the returned `CallStackItem` must be zero or empty in a static call: + - `new_commitments` - `new_nullifiers` - `nullified_commitments` diff --git a/yellow-paper/docs/calls/sync-calls.md b/yellow-paper/docs/calls/sync-calls.md index 7c0ecbdfc064..8b3b8941c6d9 100644 --- a/yellow-paper/docs/calls/sync-calls.md +++ b/yellow-paper/docs/calls/sync-calls.md @@ -1,17 +1,14 @@ ---- -sidebar_position: 1 ---- # Synchronous calls -Calls from a private function to another private function, as well as calls from a public function to another public function, are *synchronous*. When a synchronous function call is found during execution, execution jumps to the target of the call, and returns to the caller with a return value from the function called. This allows easy composability across contracts. +Calls from a private function to another private function, as well as calls from a public function to another public function, are _synchronous_. When a synchronous function call is found during execution, execution jumps to the target of the call, and returns to the caller with a return value from the function called. This allows easy composability across contracts. At the protocol level, each call is represented as a `CallStackItem`, which includes the contract address and function being called, as well as the public inputs `PrivateCircuitPublicInputs` or `PublicCircuitPublicInputs` that are outputted by the execution of the called function. These public inputs include information on the call context, the side effects of the execution, and the block header. -At the contract level, a call is executed via an oracle call `callPrivateFunction` or `callPublicFunction`, both of which accept the contract address to call, the function selector, and a hash of the arguments. The oracle call prompts the executor to pause the current frame, jump to the target of the call, and return its result. The result is a `CallStackItem` that represents the nested execution. +At the contract level, a call is executed via an oracle call `callPrivateFunction` or `callPublicFunction`, both of which accept the contract address to call, the function selector, and a hash of the arguments. The oracle call prompts the executor to pause the current frame, jump to the target of the call, and return its result. The result is a `CallStackItem` that represents the nested execution. The caller is responsible for asserting that the function and arguments in the returned `CallStackItem` match the requested ones, otherwise a malicious oracle could return a `CallStackItem` for a different execution. The caller must also push the hash of the returned `CallStackItem` into the private or public call stack of the current execution context, which is returned as part of the `CircuitPublicInputs` output. The end result is a top-level entrypoint `CallStackItem`, with a stack of nested call stack items to process. -The kernel circuit is then responsible for iteratively processing each `CallStackItem`, pushing new items into the stack as it encounters nested calls, until the stack is empty. The private kernel circuit processes private function calls locally in the PXE, whereas the public kernel circuit processes public function calls on the sequencer. +The kernel circuit is then responsible for iteratively processing each `CallStackItem`, pushing new items into the stack as it encounters nested calls, until the stack is empty. The private kernel circuit processes private function calls locally in the PXE, whereas the public kernel circuit processes public function calls on the sequencer. The private kernel circuit iterations begin with the entrypoint execution, empty output and proof. The public kernel circuit starts with the public call stack in the transaction object, and builds on top of the output and proof of the private kernel circuit. @@ -32,4 +29,4 @@ while call_stack is not empty: kernel_public_inputs, proof = kernel_circuit(call_stack_item, kernel_public_inputs, proof) ``` -The kernel circuit asserts that nested functions and their side effects are processed in order, and that the hash of each nested execution matches the corresponding hash outputted in the call stack by each `CircuitPublicInputs`. \ No newline at end of file +The kernel circuit asserts that nested functions and their side effects are processed in order, and that the hash of each nested execution matches the corresponding hash outputted in the call stack by each `CircuitPublicInputs`. diff --git a/yellow-paper/docs/calls/unconstrained-calls.md b/yellow-paper/docs/calls/unconstrained-calls.md index 0a569e68f9a1..9a90061cc4a9 100644 --- a/yellow-paper/docs/calls/unconstrained-calls.md +++ b/yellow-paper/docs/calls/unconstrained-calls.md @@ -1,7 +1,3 @@ ---- -sidebar_position: 6 ---- - # Unconstrained calls @@ -10,6 +6,6 @@ Private function calls can be executed as _unconstrained_. Unconstrained functio Unconstrained calls are executed via a `unconstrainedCallPrivateFunction` oracle call, which accepts the same arguments as a regular `callPrivateFunction`, and return the result from the function call. Unconstrained calls are not pushed into the `private_call_stack` and do not incur in an additional kernel iteration. -Rationale for unconstrained calls is to allows apps to consume results from functions that do not need to be provable. An example use case for unconstrained calls is unconstrained encryption and note tagging, which can be used when the sender is incentivized to ensure the recipient receives the data sent. +Rationale for unconstrained calls is to allows apps to consume results from functions that do not need to be provable. An example use case for unconstrained calls is unconstrained encryption and note tagging, which can be used when the sender is incentivized to ensure the recipient receives the data sent. -Another motivation for unconstrained calls is for retrieving or computing data where the end result can be more efficiently constrained by the caller. \ No newline at end of file +Another motivation for unconstrained calls is for retrieving or computing data where the end result can be more efficiently constrained by the caller. diff --git a/yellow-paper/docs/circuits/_category_.json b/yellow-paper/docs/circuits/_category_.json deleted file mode 100644 index 11e261e9b2a6..000000000000 --- a/yellow-paper/docs/circuits/_category_.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "label": "Circuits", - "position": 2, - "link": { - "type": "generated-index", - "description": "circuits..." - } -} diff --git a/yellow-paper/docs/circuits/private-function.md b/yellow-paper/docs/circuits/private-function.md index d55191d55902..0e61276f57d9 100644 --- a/yellow-paper/docs/circuits/private-function.md +++ b/yellow-paper/docs/circuits/private-function.md @@ -1,9 +1,5 @@ # Private Function Circuit -:::info Disclaimer -This is a draft. These requirements need to be considered by the wider team, and might change significantly before a mainnet release. -::: - ## Requirements A private function circuit is a custom circuit tailored to the needs of a specific application. This circuit should be designed to handle private data processing while generating public inputs that safeguard the application and account's intentions without compromising sensitive information. @@ -20,6 +16,7 @@ The public inputs of a private function circuit will be incorporated into the pr It must adhere to the following format: + | Field | Type | Description | | ---------------------------------- | -------------------------- | ---------------------------------------------------------------------- | | _call_context_ | _CallContext_ | Context of the call corresponding to this function execution. | diff --git a/yellow-paper/docs/circuits/private-kernel-initial.md b/yellow-paper/docs/circuits/private-kernel-initial.md index 71eeec5cb33a..f3a7c9138975 100644 --- a/yellow-paper/docs/circuits/private-kernel-initial.md +++ b/yellow-paper/docs/circuits/private-kernel-initial.md @@ -1,9 +1,5 @@ # Private Kernel Circuit - Initial -:::info Disclaimer -This is a draft. These requirements need to be considered by the wider team, and might change significantly before a mainnet release. -::: - ## Requirements In the **initial** kernel iteration, the process involves taking a transaction request and private call data, verifying their integrity, and preparing the necessary data for subsequent circuits to operate. This step is particularly beneficial due to its separation from the [inner private kernel circuit](./private-kernel-inner.md), as the first call lacks a "previous kernel" to process. Additionally, it executes tasks that are pertinent to a transaction and need only occur once. diff --git a/yellow-paper/docs/circuits/private-kernel-inner.md b/yellow-paper/docs/circuits/private-kernel-inner.md index 4b0f709d11c2..c53cfc370fe7 100644 --- a/yellow-paper/docs/circuits/private-kernel-inner.md +++ b/yellow-paper/docs/circuits/private-kernel-inner.md @@ -1,9 +1,5 @@ # Private Kernel Circuit - Inner -:::info Disclaimer -This is a draft. These requirements need to be considered by the wider team, and might change significantly before a mainnet release. -::: - ## Requirements Each **inner** kernel iteration processes a private function call and the results of a previous kernel iteration. diff --git a/yellow-paper/docs/circuits/private-kernel-reset.md b/yellow-paper/docs/circuits/private-kernel-reset.md index 1f4547caa640..46cbc5151447 100644 --- a/yellow-paper/docs/circuits/private-kernel-reset.md +++ b/yellow-paper/docs/circuits/private-kernel-reset.md @@ -1,9 +1,5 @@ # Private Kernel Circuit - Reset -:::info Disclaimer -This is a draft. These requirements need to be considered by the wider team, and might change significantly before a mainnet release. -::: - ## Requirements The **reset** circuit is designed to abstain from processing individual private function calls. Instead, it injects the outcomes of an initial or inner private kernel circuit, scrutinizes the public inputs, and resets the verified data within its scope. This circuit can be executed either preceding the tail circuit or as a means to "reset" public inputs, allowing data to accumulate seamlessly in subsequent iterations. diff --git a/yellow-paper/docs/circuits/private-kernel-tail.md b/yellow-paper/docs/circuits/private-kernel-tail.md index b36464a25f33..82371a63a49a 100644 --- a/yellow-paper/docs/circuits/private-kernel-tail.md +++ b/yellow-paper/docs/circuits/private-kernel-tail.md @@ -1,9 +1,5 @@ # Private Kernel Circuit - Tail -:::info Disclaimer -This is a draft. These requirements need to be considered by the wider team, and might change significantly before a mainnet release. -::: - ## Requirements The **tail** circuit abstains from processing individual private function calls. Instead, it incorporates the outcomes of a private kernel circuit and conducts additional processing essential for generating the final public inputs suitable for submission to the transaction pool, subsequently undergoing processing by Sequencers and Provers. The final public inputs must safeguard against revealing any private information unnecessary for the execution of public kernel circuits and rollup circuits. diff --git a/yellow-paper/docs/circuits/public-kernel-iterative.md b/yellow-paper/docs/circuits/public-kernel-iterative.md index 4745ebec8409..67c55c253746 100644 --- a/yellow-paper/docs/circuits/public-kernel-iterative.md +++ b/yellow-paper/docs/circuits/public-kernel-iterative.md @@ -1,9 +1,5 @@ # Public Kernel Circuit - Iterative -:::info Disclaimer -This is a draft. These requirements need to be considered by the wider team, and might change significantly before a mainnet release. -::: - ## Requirements In the public kernel iteration, the process involves taking a previous iteration and public call data, verifying their integrity, and preparing the necessary data for subsequent circuits to operate. diff --git a/yellow-paper/docs/circuits/public-kernel-tail.md b/yellow-paper/docs/circuits/public-kernel-tail.md index 38fd39a28c04..2c2892e15e12 100644 --- a/yellow-paper/docs/circuits/public-kernel-tail.md +++ b/yellow-paper/docs/circuits/public-kernel-tail.md @@ -1,9 +1,5 @@ # Public Kernel Circuit - Tail -:::info Disclaimer -This is a draft. These requirements need to be considered by the wider team, and might change significantly before a mainnet release. -::: - ## Requirements The **tail** circuit refrains from processing individual public function calls. Instead, it integrates the results of iterative public kernel circuit and performs additional verification and processing necessary for generating the final public inputs. diff --git a/yellow-paper/docs/contract-deployment/classes.md b/yellow-paper/docs/contract-deployment/classes.md index 55695d3244a5..bb658c380dc4 100644 --- a/yellow-paper/docs/contract-deployment/classes.md +++ b/yellow-paper/docs/contract-deployment/classes.md @@ -8,15 +8,17 @@ Contract classes simplify the process of reusing code by enshrining implementati :::info Read the following discussions for additional context: + - [Abstracting contract deployment](https://forum.aztec.network/t/proposal-abstracting-contract-deployment/2576) - [Implementing contract upgrades](https://forum.aztec.network/t/implementing-contract-upgrades/2570) - [Contract classes, upgrades, and default accounts](https://forum.aztec.network/t/contract-classes-upgrades-and-default-accounts/433) -::: + ::: ## Structure The structure of a contract class is defined as: + | Field | Type | Description | |----------|----------|----------| | version | u8 | Version identifier. Initially one, bumped for any changes to the contract class struct. | @@ -32,6 +34,7 @@ The structure of a contract class is defined as: ### Private Function + | Field | Type | Description | |----------|----------|----------| | function_selector | u32 | Selector of the function. Calculated as the hash of the method name and arguments. | @@ -42,6 +45,7 @@ The structure of a contract class is defined as: ### Public and Unconstrained Function + | Field | Type | Description | |----------|----------|----------| | function_selector | u32 | Selector of the function. Calculated as the hash of the method name and arguments. | diff --git a/yellow-paper/docs/contract-deployment/instances.md b/yellow-paper/docs/contract-deployment/instances.md index 225436232085..6bf602068be0 100644 --- a/yellow-paper/docs/contract-deployment/instances.md +++ b/yellow-paper/docs/contract-deployment/instances.md @@ -13,6 +13,7 @@ A contract instance is a concrete deployment of a [contract class](./classes.md) The structure of a contract instance is defined as: + | Field | Type | Description | |----------|----------|----------| | version | u8 | Version identifier. Initially one, bumped for any changes to the contract instance struct. | @@ -32,6 +33,7 @@ The address of the contract instance is computed as the hash of all elements in ## Statuses A contract instance at a given address can be in any of the following statuses: + - **Undeployed**: The instance has not yet been deployed. A user who knows the preimage of the address can issue a private call into the contract, as long as it does not require initialization. Public function calls to this address will fail. - **Privately deployed**: The instance constructor has been executed, but its class identifier has not been broadcasted. A user who knows the preimage of the address can issue a private call into the contract. Public function calls to this address will fail. Private deployments are signalled by emitting an initialization nullifier when the constructor runs. - **Publicly deployed**: The instance constructor has been executed, and the address preimage has been broadcasted. All function calls to the address, private or public, are valid. Public deployments are signalled by emitting a public deployment nullifier. @@ -41,6 +43,7 @@ A contract instance at a given address can be in any of the following statuses: ## Constructors Contract constructors are not enshrined in the protocol, but handled at the application circuit level. A contract must satisfy the following requirements: + - The constructor must be invoked exactly once - The constructor must be invoked with the arguments in the address preimage - Functions that depend on contract initialization cannot be invoked until the constructor is run @@ -73,6 +76,7 @@ Additionally, the `Deployer` contract provides two `universal_deploy` functions, ## Verification of Executed Code The kernel circuit, both private and public, is responsible for verifying that the code loaded for a given function execution matches the expected one. This requires the following checks: + - The contract class identifier of the address called is the expected one, verified by hashing the address preimage that includes the class id. - The function identifier being executed is part of the class id, verified via a merkle membership proof. - The function code executed matches the commitment in the function identifier, verified via a merkle membership proof and a bytecode commitment proof. diff --git a/yellow-paper/docs/contracts/da.md b/yellow-paper/docs/cross-chain-communication/da.md similarity index 92% rename from yellow-paper/docs/contracts/da.md rename to yellow-paper/docs/cross-chain-communication/da.md index 938ac45e8da1..cf70f161a286 100644 --- a/yellow-paper/docs/contracts/da.md +++ b/yellow-paper/docs/cross-chain-communication/da.md @@ -13,25 +13,27 @@ Essentially Data Publication $\subset$ Data Availability, since if it is availab Progressing the state of the validating light node requires that we can convince it (and therefore the [availability oracle](./index.md#availability-oracle)) that the data was published - as it needs to compute the public inputs for the proof. The exact method of computing these public inputs can vary depending on the data layer, but generally, it would be by providing the data directly or by using data availability sampling or a data availability committee. -The exact mechanism greatly impacts the security and cost of the system, and will be discussed in the following sections. Before that we need to get some definitions in place. +The exact mechanism greatly impacts the security and cost of the system, and will be discussed in the following sections. Before that we need to get some definitions in place. ## Definitions :::warning **Security** -Security is often used quite in an unspecific manner, "good" security etc, without specifying what security is. From distributed systems, the *security* of a protocol or system is defined by: +Security is often used quite in an unspecific manner, "good" security etc, without specifying what security is. From distributed systems, the _security_ of a protocol or system is defined by: + - **Liveness**: Eventually something good will happen. -- **Safety**: Nothing bad will happen. -::: +- **Safety**: Nothing bad will happen. + ::: + +In the context of blockchain, this _security_ is defined by the confirmation rule, while this can be chosen individually by the user, our validating light node (L1 bridge) can be seen as a user, after all, it's "just" another node. For the case of a validity proof based blockchain, a good confirmation rule should satisfy the following sub-properties (inspired by [Sreeram's framing](https://twitter.com/sreeramkannan/status/1683735050897207296)): -In the context of blockchain, this *security* is defined by the confirmation rule, while this can be chosen individually by the user, our validating light node (L1 bridge) can be seen as a user, after all, it's "just" another node. For the case of a validity proof based blockchain, a good confirmation rule should satisfy the following sub-properties (inspired by [Sreeram's framing](https://twitter.com/sreeramkannan/status/1683735050897207296)): - **Liveness**: - Data Availability - The chain data must be available for anyone to reconstruct the state and build blocks - - Ledger Growth - New blocks will be appended to the ledger - - Censorship Resistance - Honest transactions that are willing to pay will be included if the chain progresses. + - Ledger Growth - New blocks will be appended to the ledger + - Censorship Resistance - Honest transactions that are willing to pay will be included if the chain progresses. - **Safety**: - - Re-org Resistance - Confirmed transactions won't be reverted + - Re-org Resistance - Confirmed transactions won't be reverted - Data Publication - The state changes of the block is published for validation check - - State Validity - State changes along with validity proof allow anyone to check that new state *ROOTS* are correct. + - State Validity - State changes along with validity proof allow anyone to check that new state _ROOTS_ are correct. Notice, that safety relies on data publication rather than availability. This might sound strange, but since the validity proof can prove that the state transition function was followed and what changes were made, we strictly don't need the entire state to be available for safety. @@ -41,30 +43,33 @@ In particular, we will be looking at what is required to give observers (nodes) ## Quick Catch-up -A rollup is broadly speaking a blockchain that put its blocks on some other chain (the host) to make them available to its nodes. Most rollups have a contract on this host blockchain which validates its state transitions (through fault proofs or validity proofs) taking the role of a full-validating light-node, increasing the accessibility of running a node on the rollup chain, making any host chain node indirectly validate its state. +A rollup is broadly speaking a blockchain that put its blocks on some other chain (the host) to make them available to its nodes. Most rollups have a contract on this host blockchain which validates its state transitions (through fault proofs or validity proofs) taking the role of a full-validating light-node, increasing the accessibility of running a node on the rollup chain, making any host chain node indirectly validate its state. With its state being validated by the host chain, the security properties can eventually be enforced by the host-chain if the rollup chain itself is not progressing. Bluntly, the rollup is renting security from the host. The essential difference between a L1 and a rollup then comes down to who are required for block production (liveness) and to convince the validating light-node (security), for the L1 it is the nodes of the L1, and for the Rollup the nodes of its host (eventually). This in practice means that we can get some better properties for how easy it is to get sufficient assurance that no trickery is happening. + | |Security| Accessibility| :-----------: | :-----------: | :-----------: | Full node| 😃 | 😦 | Full-verifier light node (L1 state transitioner)| 😃 | 😃 | -With that out the way, we can draw out a model of the rollup as a two-chain system, what Jon calls the *dynamically available ledger* and the *finalized prefix ledger*. The point where we jump from one to the other depends on the confirmation rules applied. In Ethereum the *dynamically available* chain follows the [LMD-ghost](https://eth2book.info/capella/part2/consensus/lmd_ghost/) fork choice rule and is the one block builders are building on top of. Eventually consensus forms and blocks from the *dynamic* chain gets included in the *finalized* chain ([Gasper](https://eth2book.info/capella/part2/consensus/casper_ffg/)). Below image is from [Bridging and Finality: Ethereum](https://jumpcrypto.com/writing/bridging-and-finality-ethereum/). +With that out the way, we can draw out a model of the rollup as a two-chain system, what Jon calls the _dynamically available ledger_ and the _finalized prefix ledger_. The point where we jump from one to the other depends on the confirmation rules applied. In Ethereum the _dynamically available_ chain follows the [LMD-ghost](https://eth2book.info/capella/part2/consensus/lmd_ghost/) fork choice rule and is the one block builders are building on top of. Eventually consensus forms and blocks from the _dynamic_ chain gets included in the _finalized_ chain ([Gasper](https://eth2book.info/capella/part2/consensus/casper_ffg/)). Below image is from [Bridging and Finality: Ethereum](https://jumpcrypto.com/writing/bridging-and-finality-ethereum/). ![](https://jumpcrypto-com.ghost.io/content/images/2023/03/ZK-Bridging-4--1-.png) -In rollup land, the *available* chain will often live outside the host where it is built upon before blocks make their way onto the host DA and later get *finalized* by the the validating light node that lives on the host as a smart contract. +In rollup land, the _available_ chain will often live outside the host where it is built upon before blocks make their way onto the host DA and later get _finalized_ by the the validating light node that lives on the host as a smart contract. > Depending on the rollup mechanism, rollup full nodes will be able to finalize their own view of the chain as soon as data is available on the host. Since the rollup cannot add invalid state transitions to the finalized chain due to the validating light node on the host, rollups can be built with or without a separate consensus mechanism for security. -One of the places where the existence of consensus make a difference for the rollup chain is how far you can build ahead, and who can do it. +One of the places where the existence of consensus make a difference for the rollup chain is how far you can build ahead, and who can do it. ### Consensus + For a consensus based rollup you can run LMD-Ghost similarly to Ethereum, new blocks are built like Ethereum, and then eventually reach the host chain where the light client should also validate the consensus rules before progressing state. In this world, you have a probability of re-orgs trending down as blocks are built upon while getting closer to the finalization. Users can then rely on their own confirmation rules to decide when they deem their transaction confirmed. You could say that the transactions are pre-confirmed until they convince the validating light-client on the host. ### No-consensus + If there is no explicit consensus for the Rollup, staking can still be utilized for leader selection, picking a distinct sequencer which will have a period to propose a block and convince the validating light-client. The user can as earlier define his own confirmation rules and could decide that if the sequencer acknowledge his transaction, then he sees it as confirmed. This can be done fully on trust, of with some signed message the user could take to the host and "slash" the sequencer for not upholding his part of the deal. :::info Fernet @@ -82,9 +87,9 @@ With that out the way, I think it would be prudent to elaborate on my definition - **Data Availability**: The data is available to me right now - **Data Publication**: The data was available for a period when it was published. - With this split, we can map the methods of which we can include data for our rollup. Below we have included only systems that are live or close to live where we have good ideas around the throughput and latency of the data. The latency is based on using Ethereum L1 as the home of the validating light node, and will therefore be the latency from being included in the data layer until statements can be included in the host chain. + |Method | Publication | Availability | Quantity | Latency | Description | | ------- | :----------: | :----------: | :----------: | :-------: | :-------: | |calldata| Eth L1 | Eth L1 | $78,125~\dfrac{byte}{s}$ | None | Part of the transaction payload required to execute history, if you can sync an Ethereum node from zero, this is available. Essentially, if Ethereum lives this is available. Have to compete against everything on Ethereum for blockspace. | @@ -95,17 +100,19 @@ With this split, we can map the methods of which we can include data for our rol ### Data Layer outside host -When using a data layer that is not the host chain, cost (and safety guarantees) are reduced, and we rely on some "bridge" to tell the host chain about the data. This must happen before our validating light node can progress the block, hence the block must be published, and the host must know about it before the host can use it as input to block validation. +When using a data layer that is not the host chain, cost (and safety guarantees) are reduced, and we rely on some "bridge" to tell the host chain about the data. This must happen before our validating light node can progress the block, hence the block must be published, and the host must know about it before the host can use it as input to block validation. This influences how blocks can practically be built, since short "cycles" of publishing and then including blocks might not be possible for bridges with significant delay. This means that a suitable data layer has both sufficient data throughput but also low (enough) latency at the bridge level. Briefly the concerns we must have for any supported data layer that is outside the host chain is: + - What are the security assumptions of the data layer itself - What are the security assumptions of the bridge - What is the expected data throughput (kb/s) - What is the expected delay (mins) of the bridge #### Celestia + Celestia mainnet is starting with a limit of 2 mb/block with 12 second blocks supporting ~166 KB/s. But they are working on increasing this to 8 mb/block. As Celestia has just recently launched, it is unclear how much competition there will be for the data throughput, and thereby how much we could expect to get a hold of. Since the security assumptions differ greatly from the host chain (Ethereum) few L2's have been built on top of it yet, and the demand is to be gauged in the future. @@ -125,13 +132,14 @@ From their [benchmarks](https://docs.espressosys.com/sequencer/releases/doppio-t While the committee is small, it seems like they can ensure honesty through the other nodes. But the nodes active here might need a lot of bandwidth to handle both DA Proposals and VID chunks. -It is not fully clear how often blocks would be relayed to the hotshot contract for consumption by our rollup, but the team says it should be frequent. Cost is estimated to be ~400K gas. +It is not fully clear how often blocks would be relayed to the hotshot contract for consumption by our rollup, but the team says it should be frequent. Cost is estimated to be ~400K gas. ## Aztec Specific Data -As part of figuring out the data throughput requirements, we need to know what data we need to publish. In Aztec we have a bunch of data with varying importance; some being important to **everyone** and some being important to **someone**. +As part of figuring out the data throughput requirements, we need to know what data we need to publish. In Aztec we have a bunch of data with varying importance; some being important to **everyone** and some being important to **someone**. The things that are important to **everyone** are the things that we have directly in state, meaning the: + - leaves of the note hash tree - nullifiers - public state leafs @@ -143,11 +151,11 @@ Some of these can be moved around between layers, and others are hard-linked to We need to know what these things are to be able to progress the state. Without having the state, we don't know how the output of a state transition should look and cannot prove it. -Beyond the above data that is important to everyone, we also have data that is important to *someone*, these are the logs, both unencrypted and encrypted. Knowing the historic logs are not required to progress the chain, but are important for the users to ensure that they learn about their notes etc. +Beyond the above data that is important to everyone, we also have data that is important to _someone_, these are the logs, both unencrypted and encrypted. Knowing the historic logs are not required to progress the chain, but are important for the users to ensure that they learn about their notes etc. -A few transaction examples based on our E2E tests have the following data footprints. We will need a few more bytes to specify the sizes of these lists but it will land us in the right ball park. +A few transaction examples based on our E2E tests have the following data footprints. We will need a few more bytes to specify the sizes of these lists but it will land us in the right ball park. ->These were made back in august and are a bit outdated. They should be updated to also include more complex transactions. +> These were made back in august and are a bit outdated. They should be updated to also include more complex transactions. ``` Tx ((Everyone, Someone) bytes). @@ -167,21 +175,24 @@ Tx ((544, 32) bytes): comms=0, nulls=1, pubs=8, l2_to_l1=0, e_logs=8, u_logs=24 Tx ((480, 40) bytes): comms=0, nulls=1, pubs=7, l2_to_l1=0, e_logs=12, u_logs=28 Average bytes, (rounded up): -Everyone: 311 bytes +Everyone: 311 bytes Someone: 787 bytes Total: 1098 bytes ``` -For a more liberal estimation, lets suppose we emit 4 nullifiers, 4 new commitments, and 4 public data writes instead per transaction. +For a more liberal estimation, lets suppose we emit 4 nullifiers, 4 new commitments, and 4 public data writes instead per transaction. + ```python Tx ((512, 1036) bytes): comms=4, nulls=4, pubs=4, l2_to_l1=0, e_logs=988, u_logs=48 ``` + Assuming that this is a decent guess, and we can estimate the data requirements at different transaction throughput. ### Throughput Requirements Using the values from just above for transaction data requirements, we can get a ball park estimate of what we can expect to require at different throughput levels. + |Throughput | Everyone | Someone | Total | |:-----:|:-----:|:-----:|:-----:| | 1 TPS | $512 \dfrac{byte}{s}$ | $1036 \dfrac{byte}{s}$ | $1548 \dfrac{byte}{s}$ | @@ -193,6 +204,7 @@ Assuming that we are getting $\frac{1}{9}$ of the blob-space or $\frac{1}{20}$ o For every throughput column, we insert 3 marks, for everyone, someone and the total. e.g., ✅✅✅ meaning that the throughput can be supported when publishing data for everyone, someone and the total. 💀💀💀 meaning that none of it can be supported. + |Space| Aztec Available | 1 TPS | 10 TPS | 50 TPS | 100 Tps | |:---:|:---:|:---:|:---:|:---:|:---:| |Calldata| $3,906 \frac{byte}{s}$ | ✅✅✅ |💀💀💀 | 💀💀💀 | 💀💀💀 @@ -200,30 +212,31 @@ For every throughput column, we insert 3 marks, for everyone, someone and the to |64 blob danksharding | $75,245 \dfrac{byte}{s}$ | ✅✅✅ | ✅✅✅ | ✅✅✅ | ✅✅💀 |Celestia (2mb/12s blocks)| $17,924 \dfrac{byte}{s}$ | ✅✅✅ | ✅✅✅ | 💀💀💀 | 💀💀💀 |Celestia (8mb/13s blocks)| $68,376 \dfrac{byte}{s}$ | ✅✅✅ | ✅✅✅ | ✅✅💀 | ✅💀💀 -|Espresso| Unclear but at least 1 mb per second | ✅✅✅ | ✅✅✅ | ✅✅✅| ✅✅✅ +|Espresso| Unclear but at least 1 mb per second | ✅✅✅ | ✅✅✅ | ✅✅✅| ✅✅✅ + > **Disclaimer**: Remember that these fractions for available space are pulled out of my ass. With these numbers at hand, we can get an estimate of our throughput in transactions based on our storage medium. ## One or multiple data layers? -From the above estimations, it is unlikely that our data requirements can be met by using only data from the host chain. It is therefore to be considered whether data can be split across more than one data layer. +From the above estimations, it is unlikely that our data requirements can be met by using only data from the host chain. It is therefore to be considered whether data can be split across more than one data layer. The main concerns when investigating if multiple layers should be supported simultaneously are: + - **Composability**: Applications should be able to integrate with one another seamlessly and synchronously. If this is not supported, they might as well be entirely separate deployments. - **Ossification**: By ossification we mean changing the assumptions of the deployments, for example, if an application was deployed at a specific data layer, changing the layer underneath it would change the security assumptions. This is addressed through the [Upgrade mechanism](../decentralisation/governance.md). - **Security**: Applications that depend on multiple different data layers might rely on all its layers to work to progress its state. Mainly the different parts of the application might end up with different confirmation rules (as mentioned earlier) degrading it to the least secure possibly breaking the liveness of the application if one of the layers is not progressing. -The security aspect in particular can become a problem if users deploy accounts to a bad data layer for cost savings, and then cannot access their funds (or other assets) because that data layer is not available. This can be a problem, even though all the assets of the user lives on a still functional data layer. +The security aspect in particular can become a problem if users deploy accounts to a bad data layer for cost savings, and then cannot access their funds (or other assets) because that data layer is not available. This can be a problem, even though all the assets of the user lives on a still functional data layer. Since the individual user burden is high with multi-layer approach, we discard it as a viable option, as the probability of user failure is too high. Instead, the likely design, will be that an instance has a specific data layer, and that "upgrading" to a new instance allows for a new data layer by deploying an entire instance. This ensures that composability is ensured as everything lives on the same data layer. Ossification is possible hence the [upgrade mechanism](../decentralisation/governance.md) doesn't "destroy" the old instance. This means that applications can be built to reject upgrades if they believe the new data layer is not secure enough and simple continue using the old. - ## Privacy is Data Hungry - What choices do we really have? -With the target of 10 transactions per second at launch, in which the transactions are likely to be more complex than the simple ones estimated here, some of the options simply cannot satisfy our requirements. +With the target of 10 transactions per second at launch, in which the transactions are likely to be more complex than the simple ones estimated here, some of the options simply cannot satisfy our requirements. For one, EIP-4844 is out of the picture, as it cannot support the data requirements for 10 TPS, neither for everyone or someone data. @@ -232,6 +245,7 @@ At Danksharding with 64 blobs, we could theoretically support 50 tps, but will n With the current target, data cannot fit on the host, and we must work to integrate with external data layers. Of these, Celestia has the current most "out-the-box" solution, but Eigen-da and other alternatives are expected to come online in the future. ## References + - https://dba.xyz/do-rollups-inherit-security/ - https://ethereum.org/en/roadmap/danksharding/ - https://eips.ethereum.org/EIPS/eip-4844 diff --git a/yellow-paper/docs/contracts/images/com-abs-6.png b/yellow-paper/docs/cross-chain-communication/images/com-abs-6.png similarity index 100% rename from yellow-paper/docs/contracts/images/com-abs-6.png rename to yellow-paper/docs/cross-chain-communication/images/com-abs-6.png diff --git a/yellow-paper/docs/contracts/index.md b/yellow-paper/docs/cross-chain-communication/index.md similarity index 82% rename from yellow-paper/docs/contracts/index.md rename to yellow-paper/docs/cross-chain-communication/index.md index f17b2733f969..aedce6ea8cc3 100644 --- a/yellow-paper/docs/contracts/index.md +++ b/yellow-paper/docs/cross-chain-communication/index.md @@ -1,17 +1,17 @@ --- title: Cross-chain communication -sidebar_position: 100 --- -This section describes what our L1 contracts do, what they are responsible for and how they interact with the circuits. +This section describes what our L1 contracts do, what they are responsible for and how they interact with the circuits. Note that the only reason that we even have any contracts is to facilitate cross-chain communication. The contracts are not required for the rollup to function, but required to bridge assets and to reduce the cost of light nodes. :::info Purpose of contracts The purpose of the L1 contracts are simple: + - Facilitate cross-chain communication such that L1 liquidity can be used on L2 - Act as a validating light node for L2 that every L1 node implicitly run -::: + ::: ## Overview @@ -24,17 +24,18 @@ def process(block: ProvenBlock, archive: Fr, proof: Proof): assert self.inbox.consume(block.l1_to_l2_msgs) for tx in block.body.txs: assert self.outbox.insert(tx.l2_to_l1_msgs) - + self.archive = archive ``` -While the `ProvenBlock` must be published and available for nodes to build the state of the rollup, we can build the validating light node (the contract) such that as long as the node can be *convinced* that the data is available we can progress the state. This means our light node can be built to only require a subset of the `ProvenBlock` to be published to Ethereum L1 and use a different data availability layer for most of the block body. Namely, we need the cross-chain messages to be published to L1, but the rest of the block body can be published to a different data availability layer. +While the `ProvenBlock` must be published and available for nodes to build the state of the rollup, we can build the validating light node (the contract) such that as long as the node can be _convinced_ that the data is available we can progress the state. This means our light node can be built to only require a subset of the `ProvenBlock` to be published to Ethereum L1 and use a different data availability layer for most of the block body. Namely, we need the cross-chain messages to be published to L1, but the rest of the block body can be published to a different data availability layer. :::info Validium or Rollup If a different data availability layer than Ethereum is used for the block body, we are effectively building a Validium. If we use Ethereum for the block body, we are building a Rollup. ::: Using the data structures defined throughout the [rollup circuits](./../rollup-circuits/index.md) section, we can outline the validating light node structure as follows: + ```mermaid classDiagram @@ -66,18 +67,18 @@ StateTransitioner --> Outbox: insert() StateTransitioner --> Verifier: verify() ``` - ### State transitioner + The state transitioner is the heart of the validating light node for the L2. The contract keeps track of the current state of the L2 and progresses this state when a valid L2 block is received. It also facilitates cross-chain communication (communication between the L1 inbox and outbox contracts). ```python class StateTransitioner: def __init__( - self, + self, verifier: Verifier, registry: Registry, - version: Fr, + version: Fr, archive: Snapshot ): self.verifier = verifier @@ -89,11 +90,11 @@ class StateTransitioner: def process( self, - header: Header, - archive: Fr, - txs_hash: Fr, - l1_to_l2_msgs: Fr[], - l2_to_l1_msgs: Fr[], + header: Header, + archive: Fr, + txs_hash: Fr, + l1_to_l2_msgs: Fr[], + l2_to_l1_msgs: Fr[], proof: Proof ): assert self.body_available(header, txs_hash, l1_to_l2_msgs, l2_to_l1_msgs) @@ -105,9 +106,9 @@ class StateTransitioner: def body_available( self, - content_hash: Fr, - txs_hash: Fr, - l1_to_l2_msgs: Fr[], + content_hash: Fr, + txs_hash: Fr, + l1_to_l2_msgs: Fr[], l2_to_l1_msgs: Fr[] ) -> bool: assert self.registry.availability_oracle.is_available(txs_hash) @@ -122,7 +123,7 @@ class StateTransitioner: ) -> bool: assert header.global_variables.block_number = self.block_number + 1 assert header.global_variables.chain_id == block.chain_id - assert header.global_variables.version == self.version + assert header.global_variables.version == self.version assert header.global_variables.timestamp < block.timestamp assert header.global_variables.timestamp > self.last_block_ts assert header.archive == self.archive @@ -130,32 +131,32 @@ class StateTransitioner: return True ``` - ### Availability Oracle -The state transitioner should be connected to an oracle which addresses the availability condition. -For the case of a rollup, this "oracle" will be deriving the `TxsHash` from calldata and blobs. For a validium it should be connected to a bridge that it can use to verify that the data is available on the other chain. +The state transitioner should be connected to an oracle which addresses the availability condition. + +For the case of a rollup, this "oracle" will be deriving the `TxsHash` from calldata and blobs. For a validium it should be connected to a bridge that it can use to verify that the data is available on the other chain. For a generic DA that publishes data commitments to Ethereum, the oracle could be a snark proof that opens the data commitment from the bridge and computes the `TxsHash` from it. -By having the availability oracle be independent from state progression we can even do multi-transaction blocks, e.g., use multiple transactions or commitments from other DA layers to construct the `TxsHash` for a large block. +By having the availability oracle be independent from state progression we can even do multi-transaction blocks, e.g., use multiple transactions or commitments from other DA layers to construct the `TxsHash` for a large block. For more information around the requirements we have for the availability oracle, see [Data Availability](./da.md). ### Registry -To keep one location where all the core rollup contracts can be found, we have a registry contract. The registry is a contract that holds the current and historical addresses of the core rollup contracts. The addresses of a rollup deployment are contained in a snapshot, and the registry is tracking version-snapshot pairs. Depending on the upgrade scheme, it might be used to handle upgrades, or it could entirely be removed. It is generally the one address that a node MUST know about, as it can then tell the node where to find the remainder of the contracts. This is for example used when looking for the address new L2 blocks should be published to. +To keep one location where all the core rollup contracts can be found, we have a registry contract. The registry is a contract that holds the current and historical addresses of the core rollup contracts. The addresses of a rollup deployment are contained in a snapshot, and the registry is tracking version-snapshot pairs. Depending on the upgrade scheme, it might be used to handle upgrades, or it could entirely be removed. It is generally the one address that a node MUST know about, as it can then tell the node where to find the remainder of the contracts. This is for example used when looking for the address new L2 blocks should be published to. ## Message Bridges -To let users communicate between L1 and the L2, we are using message bridges, namely an L1 inbox that is paired to an L2 outbox, and an L2 inbox that is paired to an L1 outbox. +To let users communicate between L1 and the L2, we are using message bridges, namely an L1 inbox that is paired to an L2 outbox, and an L2 inbox that is paired to an L1 outbox. ![Alt text](images/com-abs-6.png) -:::info Naming is based from the PoV of the state transitioner. +:::info Naming is based from the PoV of the state transitioner. ::: -While we logically have 4 boxes, we practically only require 3 of those. The L2 inbox is not real - but only logical. This is due to the fact that they are always inserted and then consumed in the same block! Insertions require a L2 transaction, and it is then to be consumed and moved to the L1 outbox by the state transitioner in the same block. +While we logically have 4 boxes, we practically only require 3 of those. The L2 inbox is not real - but only logical. This is due to the fact that they are always inserted and then consumed in the same block! Insertions require a L2 transaction, and it is then to be consumed and moved to the L1 outbox by the state transitioner in the same block. ### Portals @@ -210,40 +211,45 @@ We are using the `secretHash` to ensure that the user can spend the message priv ::: ### Inbox + When we say inbox, we are generally referring to the L1 contract that handles the L1 to L2 messages. The inbox is logically a [multi-set](https://en.wikipedia.org/wiki/Multiset) that builds messages based on the caller and user-provided content (multi-set meaning that repetitions are allowed). While anyone can insert messages into the inbox, only the recipient state transitioner can consume messages from it (as specified by the version). When the state transitioner is consuming a message, it MUST insert it into the "L2 outbox" ([message tree](./../state/index.md)). When a message is inserted into the inbox, the inbox **MUST** fill in the `sender`: + - `L1Actor.actor`: The sender of the message (the caller), `msg.sender` - `L1Actor.chainId`: The chainId of the L1 chain sending the message, `block.chainId` -We MUST populate these values in the inbox, since we cannot rely on the user providing anything meaningful. From the `L1ToL2Msg` we compute a hash of the message. This hash is what is moved by the state transitioner to the L2 outbox. +We MUST populate these values in the inbox, since we cannot rely on the user providing anything meaningful. From the `L1ToL2Msg` we compute a hash of the message. This hash is what is moved by the state transitioner to the L2 outbox. -Since message from L1 to L2 can be inserted independently of the L2 block, the message transfer (insert into inbox move to outbox) are not synchronous as it is for L2 to L1. This means that the message can be inserted into the inbox, but not yet moved to the outbox. The message will then be moved to the outbox when the state transitioner is consuming the message as part of a block. Since the sequencers are responsible for the ordering of the messages, there is not a known time for this pickup to happen, it is async. +Since message from L1 to L2 can be inserted independently of the L2 block, the message transfer (insert into inbox move to outbox) are not synchronous as it is for L2 to L1. This means that the message can be inserted into the inbox, but not yet moved to the outbox. The message will then be moved to the outbox when the state transitioner is consuming the message as part of a block. Since the sequencers are responsible for the ordering of the messages, there is not a known time for this pickup to happen, it is async. -This is done to ensure that the messages are not used to DOS the state transitioner. If the state transitioner was forced to pick up the messages in a specific order or at a fixed rate, it could be used to DOS the state transitioner by inserting a message just before an L2 block goes through. +This is done to ensure that the messages are not used to DOS the state transitioner. If the state transitioner was forced to pick up the messages in a specific order or at a fixed rate, it could be used to DOS the state transitioner by inserting a message just before an L2 block goes through. While this can be addressed by having a queue of messages and let the sequencer specify the order, this require extra logic and might be difficult to price correctly. To keep this out of protocol, we simply allow the user to attach a fee to the message (see `fee` in `L1ToL2Msg` above). This way, the user can incentivize the sequencer to pick up the message faster. -Since it is possible to land in a case where the sequencer will never pick up the message (e.g., if it is underpriced), the sender must be able to cancel the message. To ensure that this cancellation cannot happen under the feet of the sequencer we use a `deadline`, only after the deadline can it be cancelled. +Since it is possible to land in a case where the sequencer will never pick up the message (e.g., if it is underpriced), the sender must be able to cancel the message. To ensure that this cancellation cannot happen under the feet of the sequencer we use a `deadline`, only after the deadline can it be cancelled. -The contract that sent the message must decide how to handle the cancellation. It could for example ignore the cancelled message, or it could refund the user. This is up to the contract to decide. +The contract that sent the message must decide how to handle the cancellation. It could for example ignore the cancelled message, or it could refund the user. This is up to the contract to decide. :::info Error handling While we have ensured that the message either arrives to the L2 outbox or is cancelled, we have not ensured that the message is consumed by the L2 contract. This is up to the L2 contract to handle. If the L2 contract does not handle the message, it will be stuck in the outbox forever. Similarly, it is up to the L1 contract to handle the cancellation. If the L1 contract does not handle the cancellation, the user might have a message that is pending forever. Error handling is entirely on the contract developer. ::: ##### L2 Inbox + While the L2 inbox is not a real contract, it is a logical contract that apply mutations to the data similar to the L1 inbox to ensure that the sender cannot fake his position. This logic is handled by the kernel and rollup circuits. Just like the L1 variant, we must populate the `sender`: + - `L2Actor.actor`: The sender of the message (the caller) - `L2Actor.version`: The version of the L2 chain sending the message In practice, this is done in the kernel circuit of the L2, and the message hash is a public output of the circuit that is inserted into the L1 outbox for later consumption. ### Outbox -The outboxes are the location where a user can consume messages from. An outbox can only contain elements that have previously been removed from the paired inbox. + +The outboxes are the location where a user can consume messages from. An outbox can only contain elements that have previously been removed from the paired inbox. Our L1 outbox is pretty simple, Like the L1 inbox, it is a multi-set. It should allow the state transitioner to insert messages and the recipient of the message can consume it (removing it from the outbox). @@ -252,18 +258,21 @@ When consuming a message on L1, the portal contract must check that it was sent ::: #### L2 Outbox -The L2 outbox is quite different. It is a merkle tree that is populated with the messages moved by the state transitioner. As mentioned earlier, the messages are consumed on L2 by emitting a nullifier from the application circuit. + +The L2 outbox is quite different. It is a merkle tree that is populated with the messages moved by the state transitioner. As mentioned earlier, the messages are consumed on L2 by emitting a nullifier from the application circuit. This means that all validation is done by the application circuit. The application should: + - Ensure that the message exists in the outbox (message tree) - Ensure that the message sender is the expected contract - Ensure that the message recipient is itself and that the version matches - Ensure that the user knows `secret` that hashes to the `secretHash` of the message - Compute a nullifier that includes the `secret` along with the msg hash and the index of the message in the tree - - The index is included to ensure that the nullifier is unique for each message + - The index is included to ensure that the nullifier is unique for each message ## Validity conditions -While there are multiple contracts, they work in unison to ensure that the rollup is valid and that messages are correctly moved between the chains. In practice this means that the contracts are to ensure that the following constraints are met in order for the validating light node to accept a block. + +While there are multiple contracts, they work in unison to ensure that the rollup is valid and that messages are correctly moved between the chains. In practice this means that the contracts are to ensure that the following constraints are met in order for the validating light node to accept a block. Note that some conditions are marked as SHOULD, which is not strictly needed for security of the rollup, but the security of the individual applications or for UX. Also, some of the conditions are repetitions of what we saw earlier from the [state transitioner](#state-transitioner). @@ -271,42 +280,44 @@ Note that some conditions are marked as SHOULD, which is not strictly needed for - **Header Validation**: See the checks from the [state transitioner](#state-transitioner) - **Proof validation**: The proof MUST be valid when validated with the header and archive. - **Inserting messages**: for messages that are inserted into the inboxes: - - The `sender.actor` MUST be the caller - - The `(sender|recipient).chainId` MUST be the chainId of the L1 where the state transitioner is deployed - - The `(sender|recipient).version` MUST be the version of the state transitioner (the version of the L2 specified in the L1 contract) - - The `content` MUST fit within a field element - - For L1 to L2 messages: - - The `deadline` MUST be in the future, `> block.timestamp` - - The `secretHash` MUST fit in a field element - - The caller MAY append a `fee` to incentivize the sequencer to pick up the message -- **Message Cancellation**: To remove messages from the L1 inbox: - - The message MUST exist in the inbox - - The caller MUST be `sender.actor` + - The `sender.actor` MUST be the caller + - The `(sender|recipient).chainId` MUST be the chainId of the L1 where the state transitioner is deployed + - The `(sender|recipient).version` MUST be the version of the state transitioner (the version of the L2 specified in the L1 contract) + - The `content` MUST fit within a field element + - For L1 to L2 messages: - The `deadline` MUST be in the future, `> block.timestamp` - - The `fee` SHOULD be refunded to the caller + - The `secretHash` MUST fit in a field element + - The caller MAY append a `fee` to incentivize the sequencer to pick up the message +- **Message Cancellation**: To remove messages from the L1 inbox: + - The message MUST exist in the inbox + - The caller MUST be `sender.actor` + - The `deadline` MUST be in the future, `> block.timestamp` + - The `fee` SHOULD be refunded to the caller - **Moving messages**: - - Moves MUST be atomic: - - Any message that is inserted into an outbox MUST be consumed from the matching inbox - - Any message that is consumed from an inbox MUST be inserted into the matching outbox - - Messages MUST be moved by the state transitioner whose `version` match the `version` of the message + - Moves MUST be atomic: + - Any message that is inserted into an outbox MUST be consumed from the matching inbox + - Any message that is consumed from an inbox MUST be inserted into the matching outbox + - Messages MUST be moved by the state transitioner whose `version` match the `version` of the message - **Consuming messages**: for messages that are consumed from the outboxes: - - L2 to L1 messages (on L1): - - The consumer (caller) MUST match the `recipient.actor` - - The consumer chainid MUST match the `recipient.chainId` - - The consumer SHOULD check the `sender` - - L1 to L2 messages (on L2): - - The consumer contract SHOULD check the `sender` details against the `portal` contract - - The consumer contract SHOULD check that the `secret` is known to the caller - - The consumer contract SHOULD check the `recipient` details against its own details - - The consumer contract SHOULD emit a nullifier to preventing double-spending - - The consumer contract SHOULD check that the message exists in the state + - L2 to L1 messages (on L1): + - The consumer (caller) MUST match the `recipient.actor` + - The consumer chainid MUST match the `recipient.chainId` + - The consumer SHOULD check the `sender` + - L1 to L2 messages (on L2): + - The consumer contract SHOULD check the `sender` details against the `portal` contract + - The consumer contract SHOULD check that the `secret` is known to the caller + - The consumer contract SHOULD check the `recipient` details against its own details + - The consumer contract SHOULD emit a nullifier to preventing double-spending + - The consumer contract SHOULD check that the message exists in the state :::info + - For cost purposes, it can be useful to commit to the public inputs to just pass a single value into the circuit. - Time constraints might change depending on the exact sequencer selection mechanism. -::: + ::: ## Logical Execution + Below, we will outline the **LOGICAL** execution of a L2 block and how the contracts interact with the circuits. We will be executing cross-chain communication before and after the block itself. Note that in reality, the L2 inbox does not exists, and its functionality is handled by the kernel and the rollup circuits. ```mermaid @@ -347,12 +358,12 @@ sequenceDiagram end end - loop msg in L2 inbox + loop msg in L2 inbox R2->>O2: Consume msg O2->>O2: Update state (delete) end - loop msg in l1ToL2Msgs + loop msg in l1ToL2Msgs R2->>O2: Insert msg O2->>O2: Update state (insert) end @@ -360,7 +371,7 @@ sequenceDiagram R2->>R: Block (Proof + Data) R->>R: Verify proof - R->>R: Update State + R->>R: Update State R->>Reg: Where is the Inbox? Reg->>R: Here is the address @@ -380,6 +391,7 @@ sequenceDiagram O->>O: Validate msg O->>O: Update state (delete) ``` + We will walk briefly through the steps of the diagram above. The numbering matches the numbering of nodes in the diagram, the start of the action. 1. A portal contract on L1 wants to send a message for L2 @@ -395,7 +407,7 @@ We will walk briefly through the steps of the diagram above. The numbering match 1. The L2 inbox deletes the messages from its storage 1. The L2 block includes messages from the L1 inbox that are to be inserted into the L2 outbox. 1. The L2 outbox state is updated to include the messages -1. The L2 block is submitted to L1 +1. The L2 block is submitted to L1 1. The state transitioner receives the block and verifies the proof + validate constraints on block. 1. The state transitioner updates it state to the ending state of the block 1. The state transitioner ask the registry for the L1 inbox address @@ -411,19 +423,18 @@ We will walk briefly through the steps of the diagram above. The numbering match 1. The L1 outbox updates it local state by deleting the message :::info L2 inbox is not real -As should be clear from above, the L2 inbox doesn't need to exist for itself, it keeps no state between blocks, as every message created in the block will also be consumed in the same block. +As should be clear from above, the L2 inbox doesn't need to exist for itself, it keeps no state between blocks, as every message created in the block will also be consumed in the same block. ::: - ## Future work + - Sequencer selection contract(s) - - Relies on the sequencer selection scheme being more explicitly defined - - Relies on being able to validate the sequencer selection scheme + - Relies on the sequencer selection scheme being more explicitly defined + - Relies on being able to validate the sequencer selection scheme - Improve public inputs hash computation - - Currently it is using calldata and blocks to be passed along with the proof, but it should be adapted to better allow other DA layers. - - Modularize the computation such that the state transitioner need not know the exact computation but merely use a separate contract as an oracle. + - Currently it is using calldata and blocks to be passed along with the proof, but it should be adapted to better allow other DA layers. + - Modularize the computation such that the state transitioner need not know the exact computation but merely use a separate contract as an oracle. - Governance/upgrade contract(s) - - Relies on the governance/upgrade scheme being more explicitly defined + - Relies on the governance/upgrade scheme being more explicitly defined - Forced transaction inclusion - - While we don't have an exact scheme, an outline was made in [hackmd](https://hackmd.io/@aztec-network/S1lRcMkvn?type=view) and the [forum](https://forum.aztec.network/t/forcing-transactions/606) - + - While we don't have an exact scheme, an outline was made in [hackmd](https://hackmd.io/@aztec-network/S1lRcMkvn?type=view) and the [forum](https://forum.aztec.network/t/forcing-transactions/606) diff --git a/yellow-paper/docs/cryptography/_category_.json b/yellow-paper/docs/cryptography/_category_.json deleted file mode 100644 index ee3fba3fe8dd..000000000000 --- a/yellow-paper/docs/cryptography/_category_.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "label": "Cryptography", - "position": 10, - "link": { - "type": "generated-index", - "description": "Aztec cryptography tech stack" - } -} diff --git a/yellow-paper/docs/cryptography/performance-targets.md b/yellow-paper/docs/cryptography/performance-targets.md index 4213b51486f1..27a549310dea 100644 --- a/yellow-paper/docs/cryptography/performance-targets.md +++ b/yellow-paper/docs/cryptography/performance-targets.md @@ -14,13 +14,13 @@ This document is designed to definitively answer the following questions: The following is a list of the relevant properties that affect the performance of the Aztec network: -* Size of a user transaction (in kb) -* Time to generate a user transaction proof -* Memory required to generate a user transaction proof -* Time to generate an Aztec Virtual Machine proof -* Memory required to generate an Aztec Virtual Machine proof -* Time to compute a 2-to-1 rollup proof -* Memory required to compute a 2-to-1 rollup proof +- Size of a user transaction (in kb) +- Time to generate a user transaction proof +- Memory required to generate a user transaction proof +- Time to generate an Aztec Virtual Machine proof +- Memory required to generate an Aztec Virtual Machine proof +- Time to compute a 2-to-1 rollup proof +- Memory required to compute a 2-to-1 rollup proof - "MVP" = minimum standards that we can go to main-net with. +"MVP" = minimum standards that we can go to main-net with. Note: gb = gigabytes (not gigabits, gigibits or gigibytes) + | metric | how to measure | MVP (10tps) | ideal (100tps) | | --- | --- | --- | --- | | proof size | total size of a user tx incl. goblin plonk proofs | 80kb | 8kb | @@ -46,23 +47,24 @@ Note: gb = gigabytes (not gigabits, gigibits or gigibytes) | 2-to-1 rollup proving time | 1 2-to-1 rollup proof | 7.4 seconds | 0.74 seconds | | 2-to-1 rollup memory consumption | 1 2-to-1 rollup proof | 128gb | 16gb | -To come up with the above estimates, we are targetting 10 transactions per second for the MVP and 100 tps for the "ideal" case. We are assuming both block producers and rollup Provers have access to 128-core machines with 128gb of RAM. Additionally, we assume that the various process required to produce a block consume the following: +To come up with the above estimates, we are targetting 10 transactions per second for the MVP and 100 tps for the "ideal" case. We are assuming both block producers and rollup Provers have access to 128-core machines with 128gb of RAM. Additionally, we assume that the various process required to produce a block consume the following: + | process | percent of block production time allocated to process | | --- | --- | | transaction validation | 10% | | block building (tx simulation) | 20% | | public VM proof construction time | 20% | | rollup prover time | 40% | -| UltraPlonk proof compression time | 10% | +| UltraPlonk proof compression time | 10% | These are very rough estimates that could use further evaluation and validation! ### Proof size -The MVP wishes to target a tx through put of 10 tx per second. +The MVP wishes to target a tx through put of 10 tx per second. -Each Aztec node (not sequencer/prover, just a regular node that is sending transactions) needs to download `10*proof_size` bytes of data to keep track of the mempool. However, this is the *best case* scenario. +Each Aztec node (not sequencer/prover, just a regular node that is sending transactions) needs to download `10*proof_size` bytes of data to keep track of the mempool. However, this is the _best case_ scenario. More practically, the data throughput of a p2p network will be less than the bandwidth of participants due to network coordination costs. As a rough heuristic, we assume that network bandwidth will be 10% of p2p user bandwidth. @@ -91,7 +93,7 @@ To perform a private swap, the following must occur: 1. Validate the user's account contract (1 kernel call) 2. Call a swap contract (1 kernel call) 3. The swap contract will initiate `transfer` calls on two token contracts (2 kernel calls) -4. A fee must be paid via our fee abstraction spec (1 kernel call) +4. A fee must be paid via our fee abstraction spec (1 kernel call) 5. A final "cleanup" proof is generated that evaluates state reads and processes the queues that have been constructed by previous kernel circuits (1 kernel call + 1 function call; the cleanup proof) In total we have 6 kernel calls and 6 function calls. @@ -105,7 +107,7 @@ Defining the first function to cost $2^{19}$ constraints is a conservative assum #### Summary of what we are measuring to capture Prover time -1. A mock kernel circuit has a size of $2^{17}$ constraints and folds *two* Honk instances into an accumulator (the prev. kernel and the function being called) +1. A mock kernel circuit has a size of $2^{17}$ constraints and folds _two_ Honk instances into an accumulator (the prev. kernel and the function being called) 2. The Prover must prove 5 mock function circuit proofs of size $2^{17}$ and one mock function proof of size $2^{19}$ 3. The Prover must iteratively prove 6 mock kernel circuit proofs @@ -131,7 +133,7 @@ If the block producer has access to more than one physical machine that they can ### Memory consumption -This is *critical*. Users can tolerate slow proofs, but if Honk consumes too much memory, a user cannot make a proof at all. +This is _critical_. Users can tolerate slow proofs, but if Honk consumes too much memory, a user cannot make a proof at all. safari on iPhone will purge tabs that consume more than 1gb of RAM. The WASM memory cap is 4gb which defines the upper limit for an MVP. @@ -143,7 +145,6 @@ Not a critical metric, but the prover time + prover memory metrics are predicate Our goal is to hit main-net with a network that can support 10 transactions per second. We need to estimate how many VM computation steps will be needed per transaction to determine the required speed of the VM Prover. The following uses very conservative estimations due to the difficulty of estimating this. - An Ethereum block consists of approximately 1,000 transactions, with a block gas limit of roughly 10 million gas. Basic computational steps in the Ethereum Virtual Machine consume 3 gas. If the entire block gas limit is consumed with basic computation steps (not true but let's assume for a moment), this implies that 1,000 transactions consume 3.33 million computation steps. i.e. 10 transactions per second would require roughly 33,000 steps per second and 3,330 steps per transaction. As a conservative estimate, let us assume that every tx in a block will consume 10,000 AVM steps. @@ -161,8 +162,6 @@ If we assume that ~10 seconds is budgeted to the public kernel proof, this would 100 tps requires 1.5 seconds per proof. - - ### AVM Memory consumption A large AWS instance can consume 128Gb of memory which puts an upper limit for AVM RAM consumption. Ideally consumer-grade hardware can be used to generate AVM proofs i.e. 16 Gb. @@ -175,6 +174,7 @@ Note: this excludes network coordination costs, latency costs, block constructio To accomodate the above costs, we assume that we can budget 40% of block production time towards making proofs. Given these constraints, the following table describes maximum allowable proof construction times for a selection of block sizes. + | block size | number of successive 2-to-1 rollup proofs | number of parallel Prover machines required for base layer proofs | time required to construct a rollup proof | | --- | --- | --- | --- | | $1,024$ | $10$ | $512$ | 4.1s | @@ -190,4 +190,3 @@ Supporting a proof construction time of 4.1s would enable us to reduce minimum h ### 2-to-1 rollup memory consumption Same rationale as the public VM proof construction time. - diff --git a/yellow-paper/docs/cryptography/protocol-overview.md b/yellow-paper/docs/cryptography/protocol-overview.md index c2681cc9b10a..565da914b179 100644 --- a/yellow-paper/docs/cryptography/protocol-overview.md +++ b/yellow-paper/docs/cryptography/protocol-overview.md @@ -44,7 +44,7 @@ Verification gas costs are lower for UltraPlonk vs Honk due to the following fac 1. Fewer precomputed selector polynomials, reducing Verifier G1 scalar multiplications 2. UltraPlonk does not use multilinear polynomials, which removes 1 pairing from the Verifier, as well as O(logn) G1 scalar multiplications. -The following sections list the protocol components required to implement client-side IVC. We make heavy use of folding schemes to build an IVC scheme. A folding scheme enables instances of a relation to be folded into a single instance of the original relation, but in a "relaxed" form. Depending on the scheme, restrictions may be placed on the instances that can be folded. +The following sections list the protocol components required to implement client-side IVC. We make heavy use of folding schemes to build an IVC scheme. A folding scheme enables instances of a relation to be folded into a single instance of the original relation, but in a "relaxed" form. Depending on the scheme, restrictions may be placed on the instances that can be folded. The main two families of folding schemes are derived from the [Nova](https://eprint.iacr.org/2021/370) protocol and the [Protostar](https://eprint.iacr.org/2023/620) protocol respectively. @@ -62,7 +62,7 @@ The "Fold" Prover/Verifier validates that `k` instances of a defined relation (i #### Protogalaxy Decider -The "Decider" Prover/Verifier validate whether an accumulator instance correctly satisfies the accumulator relation. The accumulator being satisfiable inductively shows that all instances that have been folded were satisfied as well. (additional protocol checks are required to reason about *which* instances have been folded into the accumulator. See the [IVC specification](https://hackmd.io/h0yTcOHiQWeeTXnxTQhTNQ?view) for more information. (note to zac: put this in the yellow paper!) +The "Decider" Prover/Verifier validate whether an accumulator instance correctly satisfies the accumulator relation. The accumulator being satisfiable inductively shows that all instances that have been folded were satisfied as well. (additional protocol checks are required to reason about _which_ instances have been folded into the accumulator. See the [IVC specification](https://hackmd.io/h0yTcOHiQWeeTXnxTQhTNQ?view) for more information. (note to zac: put this in the yellow paper!) ## Goblin Plonk @@ -78,11 +78,11 @@ This subprotocol aggregates deferred computations from two independent instances #### Elliptic Curve Virtual Machine (ECCVM) Subprotocol -The ECCVM is a Honk circuit with a custom circuit arithmetisation, designed to optimally evaluate elliptic curve arithmetic computations that have been deferred. It is defined over the Grumpkin elliptic curve. +The ECCVM is a Honk circuit with a custom circuit arithmetisation, designed to optimally evaluate elliptic curve arithmetic computations that have been deferred. It is defined over the Grumpkin elliptic curve. #### Translator Subprotocol -The Translator is a Honk circuit, defined over BN254, with a custom circuit arithmetisation, designed to validate that the input commitments of an ECCVM circuit align with the delegated computations described by a Goblin Plonk transcript commitment. +The Translator is a Honk circuit, defined over BN254, with a custom circuit arithmetisation, designed to validate that the input commitments of an ECCVM circuit align with the delegated computations described by a Goblin Plonk transcript commitment. ## Plonk Data Bus @@ -94,7 +94,7 @@ The [Plonk Data Bus](https://aztecprotocol.slack.com/files/U8Q1VAX6Y/F05G2B971FY # Polynomial Commitment Schemes -The UltraPlonk, Honk, Goblin Plonk and Plonk Data Bus protocols utilize Polynomial Interactive Oracle Proofs as a core component, thus requiring the use of polynomial commitment schemes (PCS). +The UltraPlonk, Honk, Goblin Plonk and Plonk Data Bus protocols utilize Polynomial Interactive Oracle Proofs as a core component, thus requiring the use of polynomial commitment schemes (PCS). UltraPlonk and Honk utilize multilinear PCS. The Plonk Data Bus and Goblin Plonk also utilize univariate PCS. diff --git a/yellow-paper/docs/decentralisation/_category_.json b/yellow-paper/docs/decentralisation/_category_.json deleted file mode 100644 index d7a828fecdf5..000000000000 --- a/yellow-paper/docs/decentralisation/_category_.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "label": "Decentralisation", - "position": 4, - "link": { - "type": "generated-index", - "description": "Decentralisation..." - } -} diff --git a/yellow-paper/docs/decentralisation/block_production.md b/yellow-paper/docs/decentralization/block-production.md similarity index 94% rename from yellow-paper/docs/decentralisation/block_production.md rename to yellow-paper/docs/decentralization/block-production.md index ca37a3f70193..0a054954f016 100644 --- a/yellow-paper/docs/decentralisation/block_production.md +++ b/yellow-paper/docs/decentralization/block-production.md @@ -1,7 +1,3 @@ ---- -sidebar_position: 1 ---- - # Aztec Block Production :::info @@ -9,9 +5,11 @@ This document aims to be the latest source of truth for the Fernet sequencer sel ::: ## Overview + This document outlines a proposal for Aztec's block production, integrating immutable smart contracts on Ethereum's mainnet (L1) to establish Aztec as a Layer-2 Ethereum network. Sequencers can register permissionlessly via Aztec's L1 contracts, entering a queue before becoming eligible for a random leader election ("Fernet"). Sequencers are free to leave, adhering to an exit queue or period. Roughly every 7-10 minutes (subject to reduction as proving and execution speeds stabilize and/or improve) sequencers create a random hash using [RANDAO](https://eth2book.info/capella/part2/building_blocks/randomness/#the-randao) and their public keys. The highest-ranking hash determines block proposal eligibility. Selected sequencers either collaborate with third-party proving services or self-prove their block. They commit to a prover's L1 address, which stakes an economic deposit. Failure to submit proofs on time results in deposit forfeiture. Once L1 contracts validate proofs and state transitions, the cycle repeats for subsequent block production (forever, and ever...). ### Full Nodes + Aztec full nodes are nodes that maintain a copy of the current state of the network. They fetch blocks from the data layer, verify and apply them to its local view of the state. They also participate in the [P2P network](./p2p-network.md) to disburse transactions and their proofs. Can be connected to a **PXE** which can build transaction witness using the data provided by the node (data membership proofs). :::info @@ -26,16 +24,19 @@ We should probably introduce the PXE somewhere | RAM | 32gb | 64gb | :::info Estimates + - **CPU**: Help - **Network**: 40KB for a transaction with proof (see [P2P network](./p2p-network.md#network-bandwidth)). Assuming gossiping grows the data upload/download 10x, ~400KB per tx. With 10 tx/s that's 4MB/s or 32mb/s. -- **Storage**: [~1548 bytes per transaction](./../contracts/da.md#aztec-specific-data) + tree overhead, ~ 0.4 TB per year. +- **Storage**: [~1548 bytes per transaction](./../contracts/da.md#aztec-specific-data) + tree overhead, ~ 0.4 TB per year. - **RAM**: Help -::: + ::: ### Sequencers + Aztec Sequencer's are full nodes that propose blocks, execute public functions and choose provers, within the Aztec Network. It is the actor coordinating state transitions and proof production. Aztec is currently planning on implementing a protocol called Fernet (Fair Election Randomized Natively on Ethereum trustlessly), which is permissionless and anyone can participate. Additionally, sequencers play a role participating within Aztec Governance, determining how to manage [protocol upgrades](./governance.md). #### Hardware requirements + | 🖥️ | Minimum | Recommended | | ------- | ------- | ----------- | | CPU | 16cores | 32cores | @@ -48,9 +49,11 @@ Mostly as full nodes. The network requirements might be larger since it needs to ::: ### Provers + An Aztec Prover is a full node that is producing Aztec-specific zero knowledge (zk) proofs ([rollup proofs](./../rollup-circuits/index.md)). The current protocol, called [Sidecar](https://forum.aztec.network/t/proposal-prover-coordination-sidecar/2428), suggests facilitating out of protocol proving, similar to out of protocol [PBS](https://ethresear.ch/t/proposer-block-builder-separation-friendly-fee-market-designs/9725). Provers in this case are fully permissonless and could be anyone - such as a vertically integrated sequencer, or a proving marketplace such as [nil](https://nil.foundation/proof-market), [gevulot](https://www.gevulot.com/), or [kalypso](https://docs.marlin.org/user-guides/kalypso/), as long as they choose to support the latest version of Aztec's proving system. #### Hardware requirements + | 🖥️ | Minimum | Recommended | | ------- | ------- | ----------- | | CPU | 16cores | 32cores | @@ -63,6 +66,7 @@ Mostly as full nodes. The compute and memory requirements might be larger since ::: ### Other types of network nodes + - [Validating Light nodes](./../contracts/index.md) - Maintain a state root and process block headers (validate proofs), but do not store the full state. - The L1 bridge is a validating light node. @@ -75,7 +79,8 @@ Mostly as full nodes. The compute and memory requirements might be larger since - In the current model, it is expected that there are standardized interfaces by which well known sequencers, i.e., those operated by well respected community members or service providers, are frequently and regularly uploading historical copies of the Aztec state to immutable and decentralized storage providers such as: IPFS, Filecoin, Arweave, etc. The specific details of such is TBD and likely to be facilitated via RFP. - Help figure it out by submitting a proposal on the [Aztec research forum](https://forum.aztec.network/)! -## Registration +## Registration + Sequencers must stake an undetermined amount of a native token on Layer-1 to join the protocol, reflecting Aztec's economic security needs. For consensus based network, they enter an _entryPeriod_ before becoming active. This period aims to provide stability (and predictability) to the sequencer set over short time frames which is desirable for PoS based consensus networks when progressing blocks. For non-consensus based networks such as the initial Fernet implementation, an _entryPeriod_ can be used for limiting the ability to quickly get outsized influence over governance decisions, but is not strictly necessary. :::info @@ -89,7 +94,7 @@ Future updates may introduce a registration process for Provers, possibly leadin ```mermaid sequenceDiagram -participant Anyone +participant Anyone participant Contract as Aztec L1 Contract participant Network as Aztec Network @@ -101,25 +106,26 @@ Anyone ->> Network: eligible as a sequencer ## Block production :::danger **TODO** + - The diagram needs to be updated with respect to "VRF". - In **Prover commitment** phase, it is not said what the signature is used for. I'm expecting that it is used to allow the prover to publish the message on behalf of the sequencer, but it is not made clear. - In **Backup** phase, would be useful if we add a comment on the duration - In Diagram - add a dedicated timeline from the block production's PoV - get rid of "pre-confirmed" -::: + ::: ![Governance Summary Image](./images/Aztec-Block-Production-1.png) Every staked sequencers participate in the following phases, comprising an Aztec slot: -1. **Proposal:** Sequencers generate a hash of every other sequencer's public keys and RANDAO values. They then compare and rank these, seeing if they possibly have a "high" ranking random hash. If they do, they may choose to submit a block proposal to Layer-1. The highest ranking proposal will become canonical. +1. **Proposal:** Sequencers generate a hash of every other sequencer's public keys and RANDAO values. They then compare and rank these, seeing if they possibly have a "high" ranking random hash. If they do, they may choose to submit a block proposal to Layer-1. The highest ranking proposal will become canonical. 2. **Prover commitment:** After an off-protocol negotiation with the winning sequencer, a prover submits a commitment to a particular Ethereum address that has intentions to prove the block. This commitment includes a signature from the sequencer and an amount X of funds that get slashed if the block is not finalized. -3. **Reveal:** Sequencer uploads the block contents required for progressing the chain to whatever DA layer is decided to be implemented, e.g., Ethereum's 4844 blobs. - - It is an active area of debate and research whether or not this phase is necessary, without intentions to implement "build ahead" or the ability to propose multiple blocks prior to the previous block being finalized. A possible implementation includes a block reward that incentivizes early reveal, but does not necessarily require it - turning the ability to reveal the block's data into another form of potential timing game. +3. **Reveal:** Sequencer uploads the block contents required for progressing the chain to whatever DA layer is decided to be implemented, e.g., Ethereum's 4844 blobs. + - It is an active area of debate and research whether or not this phase is necessary, without intentions to implement "build ahead" or the ability to propose multiple blocks prior to the previous block being finalized. A possible implementation includes a block reward that incentivizes early reveal, but does not necessarily require it - turning the ability to reveal the block's data into another form of potential timing game. 4. **Proving:** The prover or prover network coordinates out of protocol to build the [recursive proof tree](./../rollup-circuits/index.md). After getting to the last, singular proof that reflects the entire blocks's state transitions they then upload the proof of the block to the L1 smart contracts. -5. **Finalization:** The smart contracts verify the block's proof, which triggers payouts to sequencer and prover, and the address which submits the proofs (likely the prover, but could be anyone such as a relay). Once finalized, the cycle continues! - - For data layers that is not on the host, the host must have learned of the publication from the **Reveal** before the **Finalization** can begin. +5. **Finalization:** The smart contracts verify the block's proof, which triggers payouts to sequencer and prover, and the address which submits the proofs (likely the prover, but could be anyone such as a relay). Once finalized, the cycle continues! + - For data layers that is not on the host, the host must have learned of the publication from the **Reveal** before the **Finalization** can begin. 6. **Backup:** Should no prover commitment be put down, or should the block not get finalized, then an additional phase is opened where anyone can submit a block with its proof, in a based-rollup mode. In the backup phase, the first rollup verified will become canonical. ```mermaid @@ -144,15 +150,16 @@ loop Happy Path Block Production Provers ->> Contract: submits proofs Contract --> Contract: validates proofs and state transition Note right of Contract: "block confirmed!" -end +end ``` ### Constraining Randao + The `RANDAO` values used in the score as part of the Proposal phase must be constrained by the L1 contract to ensure that the computation is stable throughout a block. This is to prevent a sequencer from proposing the same L2 block at multiple L1 blocks to increase their probability of being chosen. Furthermore, we wish to constrain the `RANDAO` ahead of time, such that sequencers will know whether they need to produce blocks or not. This is to ensure that the sequencer can ramp up their hardware in time for the block production. -As only the last `RANDAO` value is available to Ethereum contracts we cannot simply read an old value. Instead, we must compute update it as storage in the contract. +As only the last `RANDAO` value is available to Ethereum contracts we cannot simply read an old value. Instead, we must compute update it as storage in the contract. The simplest way to do so is by storing the `RANDAO` at every block, and then use the `RANDAO` for block number - $n$ when computing the score for block number $n$. For the first $n$ blocks, the value could pre-defined. @@ -160,8 +167,8 @@ The simplest way to do so is by storing the `RANDAO` at every block, and then us Updating the `RANDAO` values used is a potential attack vector since it can be biased. By delaying blocks by an L1 block, it is possible to change the `RANDAO` value stored. Consider how big this issue is, and whether it is required to mitigate it. ::: - ## Exiting + In order to leave the protocol sequencers can exit via another L1 transaction. After signaling their desire to exit, they will no longer be considered `active` and move to an `exiting` status. When a sequencer move to `exiting`, they might have to await for an additional delay before they can exit. This delay is up to the instance itself, and is dependent on whether consensus is used or not and what internal governance the instance supports. Beware that this delay is not the same as the exit delay in [Governance](./governance.md). @@ -175,7 +182,7 @@ When a sequencer move to `exiting`, they might have to await for an additional d ```mermaid sequenceDiagram -participant Anyone as Sequencer +participant Anyone as Sequencer participant Contract as Aztec L1 Contract participant Network as Aztec Network @@ -186,17 +193,18 @@ Anyone ->> Network: exit successful, stake unlocked ``` ## Confirmation rules -There are various stages in the block production lifecycle that a user and/or application developer can gain insights into where their transaction is, and when it is considered confirmed. -Notably there are no consistent, industry wide definitions for confirmation rules. Articulated here is an initial proposal for what the Aztec community could align on in order to best set expectations and built towards a consistent set of user experiences/interactions. Alternative suggestions encouraged! +There are various stages in the block production lifecycle that a user and/or application developer can gain insights into where their transaction is, and when it is considered confirmed. + +Notably there are no consistent, industry wide definitions for confirmation rules. Articulated here is an initial proposal for what the Aztec community could align on in order to best set expectations and built towards a consistent set of user experiences/interactions. Alternative suggestions encouraged! Below, we outline the stages of confirmation. 1. Executed locally 2. Submitted to the network - - users no longer need to actively do anything + - users no longer need to actively do anything 3. In the highest ranking proposed block -4. In the highest ranking proposed block, with a valid prover commitment +4. In the highest ranking proposed block, with a valid prover commitment 5. In the highest ranking proposed block with effects available on the DA Layer 6. In a proven block that has been verified / validated by the L1 rollup contracts 7. In a proven block that has been finalized on the L1 @@ -240,7 +248,8 @@ journey Tx in block finalized on L1: 7: Sequencer ``` -## Economics +## Economics + In the current Aztec model, it's expected that block rewards in the native token are allocated to the sequencer, the prover, and the entity submitting the rollup to L1 for verification. Sequencers retain the block's fees and MEV (Maximal Extractable Value). A potential addition in consideration is the implementation of MEV or fee burn. The ratio of the distribution is to be determined, via modeling and simulation. Future Aztec versions will receive rewards based on their staked amount, as determined by the Aztec governance and [incentives contracts](./governance.md). This ensures that early versions remain eligible for rewards, provided they have active stake. Changes to the network's economic structure, especially those affecting block production and sequencer burden, require thorough consideration due to the network's upgrade and governance model relying on an honest majority assumption and at a credibly neutral sequencer set for "easy" proposals. @@ -250,15 +259,19 @@ With the rest of the protocol _mostly_ well defined, Aztec Labs now expects to b ::: ## Mev-boost + :::success + ##### About MEV on Aztec -Within the Aztec Network, "MEV" (Maximal Extractable Value) can be considered "mitigated", compared to "public" blockchains where all transaction contents and their resulting state transitions are public. In Aztec's case, MEV is _generally_ only applicable to [public functions](#) and those transactions that touch publicly viewable state. + +Within the Aztec Network, "MEV" (Maximal Extractable Value) can be considered "mitigated", compared to "public" blockchains where all transaction contents and their resulting state transitions are public. In Aztec's case, MEV is _generally_ only applicable to [public functions](#) and those transactions that touch publicly viewable state. ::: -It is expected that any Aztec sequencer client software will initially ship with some form of first price or priority gas auction for transaction ordering. Meaning that in general, transactions paying higher fees will get included earlier in the network's transaction history. Similar to Layer-1, eventually an opt-in, open source implementation of "out of protocol proposer builder separation" (PBS) such as [mev-boost](https://boost.flashbots.net/) will likely emerge within the community, giving sequencers an easier to access way to earn more money during their periods as sequencers. This is an active area of research. +It is expected that any Aztec sequencer client software will initially ship with some form of first price or priority gas auction for transaction ordering. Meaning that in general, transactions paying higher fees will get included earlier in the network's transaction history. Similar to Layer-1, eventually an opt-in, open source implementation of "out of protocol proposer builder separation" (PBS) such as [mev-boost](https://boost.flashbots.net/) will likely emerge within the community, giving sequencers an easier to access way to earn more money during their periods as sequencers. This is an active area of research. ## Proof-boost -It is likely that this proving ecosystem will emerge around a [flashbots mev-boost][https://boost.flashbots.net/] like ecosystem, specifically tailored towards the needs of sequencers negotiating the cost for a specific proof or set of proofs. Currently referred to as `proof-boost` or `goblin-boost` (due to goblin plonk..). + +It is likely that this proving ecosystem will emerge around a [flashbots mev-boost][https://boost.flashbots.net/] like ecosystem, specifically tailored towards the needs of sequencers negotiating the cost for a specific proof or set of proofs. Currently referred to as `proof-boost` or `goblin-boost` (due to goblin plonk..). Specifically, Proof boost is expected to be open source software sequencers can optionally run alongside their clients that will facilitate a negotiation for the rights to prove this block, therefore earning block rewards in the form of the native protocol token. After the negotiation, the sequencer will commit to an address, and that address will need to put up an economic commitment (deposit) that will be slashed in the event that the block's proofs are not produced within the alloted timeframe. @@ -271,13 +284,13 @@ Initially it's expected that the negotiations and commitment could be facilitate #### Happy path :::danger TODO -I'm not fully understanding the different groups, is the aztec network just the node software or 👀? Maybe coloring is nice to mark what is contracts and entities or groups of entities. Otherwise seems quite nice. +I'm not fully understanding the different groups, is the aztec network just the node software or 👀? Maybe coloring is nice to mark what is contracts and entities or groups of entities. Otherwise seems quite nice. ::: ```mermaid sequenceDiagram -participant Anyone +participant Anyone participant Contract as Aztec L1 Contract participant Network as Aztec Network participant Sequencers @@ -300,9 +313,9 @@ loop Happy Path Block Production Provers ->> Contract: submits proofs Contract --> Contract: validates proofs and state transition Note right of Contract: "block confirmed!" -end +end Sequencers ->> Contract: exit() -Sequencers --> Sequencers: wait 7 days +Sequencers --> Sequencers: wait 7 days ``` #### Voting on upgrades @@ -331,16 +344,17 @@ loop Happy Path Block Production Provers ->> Contract: submits proofs Contract --> Contract: validates proofs and state transition Note right of Contract: "block confirmed! votes counted for upgrade!" -end +end ``` #### Backup mode -In the event that no one submits a valid block proposal, we introduce a "backup" mode which enables a first come first serve race to submit the first proof to the L1 smart contracts. + +In the event that no one submits a valid block proposal, we introduce a "backup" mode which enables a first come first serve race to submit the first proof to the L1 smart contracts. ```mermaid sequenceDiagram -participant Anyone +participant Anyone participant Contract as Aztec L1 Contract participant Network as Aztec Network participant Sequencers @@ -353,19 +367,19 @@ loop Happy Path Block Production Anyone ->> Contract: submits a rollup... Contract --> Contract: validates proofs and state transition Note right of Contract: "block confirmed!" -end +end ``` :::danger -There is an outstanding concern that this may result in L1 censorship. L1 builders may choose to not allow block proposals to land on the L1 contracts within a sufficient amount of time, triggering "backup" mode - where they could have a block pre-built and proven, waiting L1 submission at their leisure. This scenario requires some careful consideration and modeling. A known and potential mitigiation includes a longer proposal phase, with a relatively long upper bounds to submit a proposal. Given that all sequencers are able to participate, it's effectively a "priority ranked race" within some form of ["timing game"](https://ethresear.ch/t/timing-games-implications-and-possible-mitigations/17612). +There is an outstanding concern that this may result in L1 censorship. L1 builders may choose to not allow block proposals to land on the L1 contracts within a sufficient amount of time, triggering "backup" mode - where they could have a block pre-built and proven, waiting L1 submission at their leisure. This scenario requires some careful consideration and modeling. A known and potential mitigiation includes a longer proposal phase, with a relatively long upper bounds to submit a proposal. Given that all sequencers are able to participate, it's effectively a "priority ranked race" within some form of ["timing game"](https://ethresear.ch/t/timing-games-implications-and-possible-mitigations/17612). ::: -We also introduce a similar backup mode in the event that there is a valid proposal, but no valid prover commitment (deposit) by the end of the prover commitment phase. +We also introduce a similar backup mode in the event that there is a valid proposal, but no valid prover commitment (deposit) by the end of the prover commitment phase. ```mermaid sequenceDiagram -participant Anyone +participant Anyone participant Contract as Aztec L1 Contract participant Network as Aztec Network participant Sequencers @@ -382,11 +396,11 @@ loop Happy Path Block Production Anyone ->> Contract: submits a rollup... Contract --> Contract: validates proofs and state transition Note right of Contract: "block confirmed!" -end +end ``` - ## Glossary + :::danger TO DO - define the things -::: \ No newline at end of file +::: diff --git a/yellow-paper/docs/decentralisation/governance.md b/yellow-paper/docs/decentralization/governance.md similarity index 94% rename from yellow-paper/docs/decentralisation/governance.md rename to yellow-paper/docs/decentralization/governance.md index ba8fa3d06e2c..ba06141990c7 100644 --- a/yellow-paper/docs/decentralisation/governance.md +++ b/yellow-paper/docs/decentralization/governance.md @@ -1,83 +1,88 @@ ---- -sidebar_position: 0 ---- # Governance & Upgrades :::danger -This is a first draft which articulates the latest thinking on governance & upgrades. It is subject to change and further review - ultimately needing team-wide understanding and approval. Please take this as a proposal, not as truth. +This is a first draft which articulates the latest thinking on governance & upgrades. It is subject to change and further review - ultimately needing team-wide understanding and approval. Please take this as a proposal, not as truth. ::: ### Summary -We propose an immutable governance & upgrade mechanism for The Aztec Network ("Aztec") that is comprised of a version registry, which points to deployments ("instances", used interchangeably) of Aztec. -These instances may choose to be immutable themselves, or have governance that evolves over time alongside the community. The governance contract will keep track of governance votes, from the current version of Aztec, as well as direct token votes from the community, in order to provide some form of checks and balances. +We propose an immutable governance & upgrade mechanism for The Aztec Network ("Aztec") that is comprised of a version registry, which points to deployments ("instances", used interchangeably) of Aztec. + +These instances may choose to be immutable themselves, or have governance that evolves over time alongside the community. The governance contract will keep track of governance votes, from the current version of Aztec, as well as direct token votes from the community, in order to provide some form of checks and balances. The version registry will keep track of all historical versions of Aztec & provide them with incentives proportionate to their current stake. Additionally the governance contract will point to what the _current canonical_ version of Aztec is, particularly relevant for 3rd parties to follow, such as centralized exchanges, or portals that wish to follow Aztec governance. -![Governance Summary Image](../decentralisation/images/Aztec-Governance-Summary-1.png) +![Governance Summary Image](./images/Aztec-Governance-Summary-1.png) ### Rewards + We propose introducing a governance "version registry" which keeps track of a) which deployments of Aztec have been canonical, and b) which instances currently have tokens staked to them, specifically in order to issue a consistent, single new token in the form of _incentives_ or "rollup/block rewards". -![Rewards Summary Image](../decentralisation/images/Aztec-Governance-Summary-2.png) +![Rewards Summary Image](./images/Aztec-Governance-Summary-2.png) Given that deployments may be immutable, it is necessary to ensure that there are operators, i.e., sequencers & provers, running the infrastructure for a given deployment as long as users are interested in it. Therefore we suggest a model where all previous canonical instances of Aztec are rewarded pro-rata to their current proportion of stake. -Beyond making it easier to understand for users, having a single token across all deployments is necessary to ensure that all instances are all utilizing the same token due to ecosystem cohesive and business development efforts, for example, having reliable onramps and wallets. +Beyond making it easier to understand for users, having a single token across all deployments is necessary to ensure that all instances are all utilizing the same token due to ecosystem cohesive and business development efforts, for example, having reliable onramps and wallets. ### Initial deployment -Upon initial deployment, there will be an immutable set of governance contracts which maintain the version registry, and an initial immutable instance of the rollup which will be the first "canonical" deployment. + +Upon initial deployment, there will be an immutable set of governance contracts which maintain the version registry, and an initial immutable instance of the rollup which will be the first "canonical" deployment. The initial instance will be called "Aztec v0" and (the current thinking is that v0) will not include the ability to process user transactions. Sequencers can register for Fernet's sequencer selection algorithm by staking tokens to that particular instance, and practice proposing blocks on mainnet prior to deciding to "go live" with v1, which _does_ enable the processing of user transactions. This instance would then _"restake"_ these tokens within the governance contract, to have a voting weight equal to the amount of tokens staked by it's sequencer set. This is in order to ensure that the sequencer selection algorithm is working properly and the community of operators themselves can decide what happens to the network next, i.e., if it's ready to actually "go live" with transactions. It will also serve as a production readiness test of the upgradeability. In the event that these v0 tests are unable to be successfully completed as expected, the community (with potential foundation approval) may need to redeploy and try again. -![Initial Deployment Image](../decentralisation/images/Aztec-Governance-Summary-3.png) +![Initial Deployment Image](./images/Aztec-Governance-Summary-3.png) The ability to upgrade to v1 is articulated below, and should follow a "happy path" upgrade where a majority of the v0 sequencer set must agree to upgrade by voting during their block proposals, similar to what was articulated in [the empire stakes back](https://forum.aztec.network/t/upgrade-proposal-the-empire-stakes-back/626). Additionally, token holders can directly participate in the vote, or choose to delegate a vote with the weight of their tokens to another address, including the v0 rollup. -![Version 1 Deployment Image](../decentralisation/images/Aztec-Governance-Summary-4.png) +![Version 1 Deployment Image](./images/Aztec-Governance-Summary-4.png) ### Proposing a new version + The current canonical rollup ("current rollup") can at any point propose voting on a new instance to become canonical and added to the governance version registry contracts. It can have it's own logic for determining when it makes sense to do so, and trigger the formal governance vote. In the initial deployment it's expected to be done as articulated in the empire stakes back, where a sequencer must flag a desire to upgrade signal as part of Fernet's proposal phase, i.e., they won a random leader election, and a majority of sequencers must do so over a specific time horizon, e.g., 7 days. In addition to the current rollup implementation deciding to propose a vote, token holders can lock a sufficient amount of tokens for a sufficient amount of time in order to bypass the current rollup and propose a new version to become canonical next. This can be used in the scenario that the rollup implementation is so buggy it is unable to propose a new rollup to replace itself, or is due to potential community disagreement. In this scenario of disagreement, it is likely to be a very contentious action - as it implies a large token holder actively disagrees with the current rollup's sequencer set. + - Current thinking is this would require locking 1% of _total supply_ for 2 years. - These tokens must be eligible for voting, as defined below. -In a worst case scenario, the rollup's sequencer set could be malicious and censor potentially honest upgrade proposals from going through. In this scenario, there needs to be the ability to add a proposal "to the queue" via the token locking mechanism articulated above which is guaranteed to be executed when the previous vote completes. +In a worst case scenario, the rollup's sequencer set could be malicious and censor potentially honest upgrade proposals from going through. In this scenario, there needs to be the ability to add a proposal "to the queue" via the token locking mechanism articulated above which is guaranteed to be executed when the previous vote completes. ### Voting #### Participation -Aztec's governance voting occurs within the governance contract, and the tokens being utilized must be "locked within governance" i.e., non-transferable. - + +Aztec's governance voting occurs within the governance contract, and the tokens being utilized must be "locked within governance" i.e., non-transferable. + Any token holder is able to directly vote via an interaction with the governance contract. Specifically, this includes those with locked, non-circulating tokens. The current canonical rollup can choose to implement its internal voting however it would like, with the weight of the tokens staked in that instance. This is likely to be a majority of voting weight, which we can reliably assume will vote each time. Generally this addresses the problems of low token holder participation! In the initial instance, we envision a version of the Empire Stakes back, where sequencers are voting during part of their block proposal phases. Not all sequencers will win a block proposal/election during the time period of the vote, this leads it to being a randomized sampling of the current sequencer set. :::danger -Question: how to implement the votes? +Question: how to implement the votes? -Option 1 - The initial instance's version of the Empire Stakes back could be implemented on a per sequencer vote, where the governance contract see's and calculates each individual vote. +Option 1 - The initial instance's version of the Empire Stakes back could be implemented on a per sequencer vote, where the governance contract see's and calculates each individual vote. -Option 2 - Alternatively the voting could be implemented on a rollup wide basis, where the current rollup calculates the results & votes with the weight of the entire rollup in a singular call to the governance contract. +Option 2 - Alternatively the voting could be implemented on a rollup wide basis, where the current rollup calculates the results & votes with the weight of the entire rollup in a singular call to the governance contract. -@Lasse has some opinions +@Lasse has some opinions ::: -#### Exiting -The duration of the token lock depends on the action a user participated in. Tokens that have been locked to vote "yes" to changing the canonical instance are locked within the governance contract until the "upgrade" has been performed *or* when the voting period ends without the proposal gaining sufficient traction to reach quorum. +#### Exiting + +The duration of the token lock depends on the action a user participated in. Tokens that have been locked to vote "yes" to changing the canonical instance are locked within the governance contract until the "upgrade" has been performed _or_ when the voting period ends without the proposal gaining sufficient traction to reach quorum. Tokens whose power did not vote "yes" are free to leave whenever they chose. This ensures that it is always possible to "ragequit" the governance if they disagree with an upgrade, and use or exit from the instance they are using. Rollup instances themselves will need to deposit their stake into the governance, in order to earn rewards and participate within the vote. Further, they can apply their own enter/exit delays on top of the governance contract's. For example to ensure stability of the sequencer set over short timeframes, if using $AZTC stake as a requirement for sequencing, they may wish to impose longer entry and exit queues. #### Results + If the vote fails, there is no action needed. If the vote passes, and a new rollup has been determined to be the next canonical instance, it will become canonical in the amount of days defined within the vote's timelock. It is likely there are defined limitations around this parameter, e.g.,it must be a 3-30 day timelock. This is explained more in the timing section below. At this block height, portals that desire to follow governance should start referencing the new canonical instance to ensure as many bridged assets are backed on the latest version as possible. :::danger -Question: what is needed to pass a vote? +Question: what is needed to pass a vote? Current thinking is that it should likely be the amount expected to be held in the initial instance of the rollup, e.g. 20% circulating & 25% of that is staked -> 5% of total supply, so locked tokens do not need to participate whatsoever in a happy path upgrade to v1. @@ -90,13 +95,16 @@ Lasse: note that if the portals follow the governance registry blindly, they ass ::: ### Timing + #### Phase 1 - Setup + After the current canonical rollup, or a sufficient number of tokens are locked in governance, there is a ~3-7 day preparation period where users get their tokens "ready" for voting. i.e., withdraw & deposit/lock for the vote, or choose a suitable delegate. -#### Phase 2 - Voting -After setup has completed, there is a 7-30 day (TBD) period during which votes can land on the governance contract. In practice, we envision a majority of this voting happening in the current canonical instance and the voting weight of the current canonical instance being sufficient to reach quorum without any additional token delegation. +#### Phase 2 - Voting + +After setup has completed, there is a 7-30 day (TBD) period during which votes can land on the governance contract. In practice, we envision a majority of this voting happening in the current canonical instance and the voting weight of the current canonical instance being sufficient to reach quorum without any additional token delegation. -#### Phase 3 - Execution Delay (Timelock) +#### Phase 3 - Execution Delay (Timelock) If a vote passes, there is a timelocked period before it becomes the new canonical rollup. This specific time period must be more than a minimum, e.g., 3 days, but is defined by the current rollup and in v1 may be controlled by both the sequencers in a happy path, and an emergency security council in a worst case scenario (articulated [below](#Emergency-mode)). In a typical happy path scenario, we suggest this is at least 30 days, and in an emergency, the shortest period possible. @@ -109,9 +117,11 @@ Lasse: We need to also include a maximum value, such that you cannot brick upgra ::: ### Diagrams + Importantly we differentiate between `Aztec Governance`, and the governance of a particular instance of Aztec. This diagram articulates the high level of Aztec Governance, specifically how the network can deploy new versions overtime which will be part of a cohesive ecosystem, sharing a single token. In this case, we are not concerned with how the current canonical rollup chooses to implement it's decision to propose a new version, nor how it implements voting. It can be reasonably assumed that this is a version of The Empire Stakes back, where a majority of the current rollup sequencers are agreeing to propose and want to upgrade. #### Happy path + ```mermaid sequenceDiagram @@ -139,7 +149,8 @@ Sequencers ->> Next Rollup: Proposing new blocks here! ``` #### "Bricked" rollup proposals -In this diagram, we articulate the scenario in which the current canonical rollup contains bugs that result in it being unable to produce not only a block, but a vote of any kind. In this scenario, someone or a group (Lasse refers to as the "unbrick DAO") may lock 1% (specific # TBD) of total supply in order to propose a new canonical rollup. It is expected that this scenario is very unlikely, however, we believe it to be a nice set of checks and balances between the token holders and the decisions of the current rollup implementation. + +In this diagram, we articulate the scenario in which the current canonical rollup contains bugs that result in it being unable to produce not only a block, but a vote of any kind. In this scenario, someone or a group (Lasse refers to as the "unbrick DAO") may lock 1% (specific # TBD) of total supply in order to propose a new canonical rollup. It is expected that this scenario is very unlikely, however, we believe it to be a nice set of checks and balances between the token holders and the decisions of the current rollup implementation. ```mermaid sequenceDiagram @@ -164,13 +175,14 @@ Sequencers ->> Next Rollup: Proposing new blocks here! ``` ### Vote Delegation + Any token holder can delegate their token's voting weight to another address, including the current canonical rollup's, if it wishes to follow along in that addresses' vote. The tokens being delegated will be locked, either within the governance contract or the vesting contract. :::info :bulb: Locked, non-circulating tokens can be delegated! This "economic training wheel" enables Aztec Labs, Foundation, and potential investors to participate responsibly in governance while the protocol is getting off the ground. It is TBD if these locked, non-circulating, delegated tokens will be able to earn incentives, i.e., block rewards. ::: -The diagram below articulates calling delegateTo(address) on both the governance contract and specifying a particular address. Additionally calling delegateTo() on the current canonical rollup if you wish to align with whatever voting mechanism that system currently as in place. +The diagram below articulates calling delegateTo(address) on both the governance contract and specifying a particular address. Additionally calling delegateTo() on the current canonical rollup if you wish to align with whatever voting mechanism that system currently as in place. ```mermaid sequenceDiagram @@ -185,7 +197,7 @@ Current Canonical Rollup ->> Version Registry: proposeCanonicalRollup(nextAddres Note right of Version Registry: Vote starts in N days, e.g.,7 Anyone ->> Version Registry: delegateTo(otherAddress) Anyone ->> Current Canonical Rollup: delegateTo() -Note right of Version Registry: Must be delegated before vote starts +Note right of Version Registry: Must be delegated before vote starts loop Voting loop Canonical Rollup Voting Sequencers ->> Current Canonical Rollup: canonicalVote(nextAddress, yes | no, amount) @@ -203,14 +215,17 @@ Sequencers ->> Next Rollup: Proposing new blocks here! ``` ### Emergency mode -Emergency mode is proposed to be introduced to the initial instance "v0" or "v1" of Aztec, whatever the first instance or deployment is. Emergency mode **will not be included as part of the canonical governance contracts or registry**. If future deployments wish to have a similar security council, they can choose to do so. In this design, the current rollup can determine the timelock period as articulated above, within some predefined constraints, e.g., 3-30 days. Explicitly, the current rollup can give a security council the ability to define what this timelock period may be, and in the case of a potential vulnerability or otherwise, may be well within it's rights to choose the smallest value defined by the immutable governance contract to ensure that the network is able to recover and come back online as quickly as possible. -![Emergency Mode Image](../decentralisation/images/Aztec-Governance-Summary-4.png) +Emergency mode is proposed to be introduced to the initial instance "v0" or "v1" of Aztec, whatever the first instance or deployment is. Emergency mode **will not be included as part of the canonical governance contracts or registry**. If future deployments wish to have a similar security council, they can choose to do so. In this design, the current rollup can determine the timelock period as articulated above, within some predefined constraints, e.g., 3-30 days. Explicitly, the current rollup can give a security council the ability to define what this timelock period may be, and in the case of a potential vulnerability or otherwise, may be well within it's rights to choose the smallest value defined by the immutable governance contract to ensure that the network is able to recover and come back online as quickly as possible. + +![Emergency Mode Image](./images/Aztec-Governance-Summary-4.png) #### Unpausing by default + In the first instance, it's expected that this security council can _only_ pause the rollup instance, not make any other changes to the instance's functionality. It is important that after N days (e.g.,180), or after another rollup has been marked canonical and Y days (e.g.,60), this rollup _must_ become unpaused eventually - otherwise it's practically bricked from the perspective of those users choosing immutable portals, and could leave funds or other things belonging to users (e.g., identity credentials or something wacky) permanently inside of it. The same is true for all future instances that have pause functionalities. #### Removing the emergency mode + The emergency mode articulated here may be implemented as part of the next instance of Aztec - "v1" or whatever it ends up being called, when mainnet blocks are enabled. The current sequencer set on v0 (the initial instance) would then need to vote as outlined above on marking this new deployment as the "canonical v1" or predecessor to the initial instance. This would then have all of the portal contracts follow v1, which may or may not have other [training wheels](https://discourse.aztec.network/t/aztec-upgrade-training-wheels/641). If the community wishes, they can always deploy a new instance of the rollup which removes the emergency mode and therefore the pause-only multisig. ### Contract implementation @@ -223,4 +238,4 @@ TO DO :::danger TO DO -::: \ No newline at end of file +::: diff --git a/yellow-paper/docs/decentralisation/images/Aztec-Block-Production-1.png b/yellow-paper/docs/decentralization/images/Aztec-Block-Production-1.png similarity index 100% rename from yellow-paper/docs/decentralisation/images/Aztec-Block-Production-1.png rename to yellow-paper/docs/decentralization/images/Aztec-Block-Production-1.png diff --git a/yellow-paper/docs/decentralisation/images/Aztec-Block-Production-2.png b/yellow-paper/docs/decentralization/images/Aztec-Block-Production-2.png similarity index 100% rename from yellow-paper/docs/decentralisation/images/Aztec-Block-Production-2.png rename to yellow-paper/docs/decentralization/images/Aztec-Block-Production-2.png diff --git a/yellow-paper/docs/decentralisation/images/Aztec-Block-Production-3.png b/yellow-paper/docs/decentralization/images/Aztec-Block-Production-3.png similarity index 100% rename from yellow-paper/docs/decentralisation/images/Aztec-Block-Production-3.png rename to yellow-paper/docs/decentralization/images/Aztec-Block-Production-3.png diff --git a/yellow-paper/docs/decentralisation/images/Aztec-Governance-Summary-1.png b/yellow-paper/docs/decentralization/images/Aztec-Governance-Summary-1.png similarity index 100% rename from yellow-paper/docs/decentralisation/images/Aztec-Governance-Summary-1.png rename to yellow-paper/docs/decentralization/images/Aztec-Governance-Summary-1.png diff --git a/yellow-paper/docs/decentralisation/images/Aztec-Governance-Summary-2.png b/yellow-paper/docs/decentralization/images/Aztec-Governance-Summary-2.png similarity index 100% rename from yellow-paper/docs/decentralisation/images/Aztec-Governance-Summary-2.png rename to yellow-paper/docs/decentralization/images/Aztec-Governance-Summary-2.png diff --git a/yellow-paper/docs/decentralisation/images/Aztec-Governance-Summary-3.png b/yellow-paper/docs/decentralization/images/Aztec-Governance-Summary-3.png similarity index 100% rename from yellow-paper/docs/decentralisation/images/Aztec-Governance-Summary-3.png rename to yellow-paper/docs/decentralization/images/Aztec-Governance-Summary-3.png diff --git a/yellow-paper/docs/decentralisation/images/Aztec-Governance-Summary-4.png b/yellow-paper/docs/decentralization/images/Aztec-Governance-Summary-4.png similarity index 100% rename from yellow-paper/docs/decentralisation/images/Aztec-Governance-Summary-4.png rename to yellow-paper/docs/decentralization/images/Aztec-Governance-Summary-4.png diff --git a/yellow-paper/docs/decentralisation/images/Aztec-Governance-Summary-5.png b/yellow-paper/docs/decentralization/images/Aztec-Governance-Summary-5.png similarity index 100% rename from yellow-paper/docs/decentralisation/images/Aztec-Governance-Summary-5.png rename to yellow-paper/docs/decentralization/images/Aztec-Governance-Summary-5.png diff --git a/yellow-paper/docs/decentralisation/images/network.png b/yellow-paper/docs/decentralization/images/network.png similarity index 100% rename from yellow-paper/docs/decentralisation/images/network.png rename to yellow-paper/docs/decentralization/images/network.png diff --git a/yellow-paper/docs/decentralisation/p2p-network.md b/yellow-paper/docs/decentralization/p2p-network.md similarity index 85% rename from yellow-paper/docs/decentralisation/p2p-network.md rename to yellow-paper/docs/decentralization/p2p-network.md index 8ffa1179465b..addea0c233eb 100644 --- a/yellow-paper/docs/decentralisation/p2p-network.md +++ b/yellow-paper/docs/decentralization/p2p-network.md @@ -1,15 +1,7 @@ ---- -sidebar_position: 1 ---- - # P2P Network ## Requirements for a P2P Network -:::info Disclaimer -This is a draft. These requirements need to be considered by the wider team, and might change significantly before a mainnet release. -::: - When a rollup is successfully published, the state transitions it produces are published along with it, making them publicly available. This broadcasted state does not depend on the Aztec network for its persistence or distribution. Transient data however, such as pending user transactions for inclusion in future rollups, does rely on the network for this. It is important that the network provides a performant, permissionless and censorship resistant mechanism for the effective propagation of these transactions to all network participants. Without this, transactions may be disadvantaged and the throughput of the network will deteriorate. Other data that may be transmitted over the network are the final rollup proofs to be submitted to the rollup contract, however the size and rate of these payloads should not make any meaningful impact on the bandwidth requirements. @@ -18,12 +10,12 @@ Other data that may be transmitted over the network are the final rollup proofs For the purpose of this discussion, we define the 'Aztec Network' as the set of components required to ensure the continual distribution of user transactions and production of rollups. The participants in such a network are: -* Sequencers - responsible for selecting transactions from the global pool and including them in rollups -* Provers - responsible for generating zk-proofs for the transaction and rollup circuits -* Transaction Pool Nodes - responsible for maintaining a local representation of the pending transaction pool -* Bootnodes - responsible for providing an entrypoint into the network for new participants +- Sequencers - responsible for selecting transactions from the global pool and including them in rollups +- Provers - responsible for generating zk-proofs for the transaction and rollup circuits +- Transaction Pool Nodes - responsible for maintaining a local representation of the pending transaction pool +- Bootnodes - responsible for providing an entrypoint into the network for new participants -Sequencers and Provers will likely run their own transaction pools but it is important that the ability to do so is not limited to these participants. Anyone can operate a transaction pool, providing increased privacy and censorship resistance. +Sequencers and Provers will likely run their own transaction pools but it is important that the ability to do so is not limited to these participants. Anyone can operate a transaction pool, providing increased privacy and censorship resistance. Client PXEs will not interact directly with the network but instead via instances of the Aztec Node and it's JSON RPC interface. The Aztec Node in turn will publish user transactions to the network. @@ -43,16 +35,16 @@ Transactions will need to be propagated throughout the network, to every partici Aztec Node instances will offer a JSON RPC interface for consumption by a user's PXE. Part of this API will facilitate transaction submission directly to the node which will then forward it to the network via the transaction pool. -![P2P Network](../decentralisation/images/network.png) +![P2P Network](./images/network.png) ### Network Bandwidth Transactions are composed of several data elements and can vary in size. The transaction size is determined largely by the private kernel proof and whether the transaction deloys any public bytecode. A typical transaction that emits a private note and an unencrypted log, makes a public call and contains a valid proof would consume approximately 40Kb of data. A transaction that additionally deploys a contract would need to transmit the public bytecode on top of this. -| Element | Size | -| ------- | ---------------- | -| Public Inputs, Public Calls and Emitted Logs | ~8Kb | -| Private Kernel Proof | ~32Kb | +| Element | Size | +| -------------------------------------------- | ----- | +| Public Inputs, Public Calls and Emitted Logs | ~8Kb | +| Private Kernel Proof | ~32Kb | If we take 2 values of transaction throughput of 10 and 100 transactions per second, we can arrive at average network bandwidth requirements of 400Kb and 4000Kb per second respectively. @@ -61,12 +53,3 @@ If we take 2 values of transaction throughput of 10 and 100 transactions per sec Proving is an out-of-protocol activity. The nature of the communication between sequencers and provers will depend entirely on the prover/s selected by the sequencer. Provers may choose to run their own Transaction Pool Node infrastructure so that they are prepared for generating proofs and don't need to receive this data out-of-band. Although proving is an out-of-protocol activity, it may be necessary for the final rollup proof to be gossiped over the P2P network such that anyone can submit it to the rollup contract. - - - - - - - - - diff --git a/yellow-paper/docs/gas-and-fees/_category_.json b/yellow-paper/docs/gas-and-fees/_category_.json deleted file mode 100644 index 09581cd4f5a2..000000000000 --- a/yellow-paper/docs/gas-and-fees/_category_.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "label": "Gas & Fees", - "position": 3, - "link": { - "type": "generated-index", - "description": "Gas and fees on the Aztec network..." - } -} diff --git a/yellow-paper/docs/gas-and-fees/gas-and-fees.md b/yellow-paper/docs/gas-and-fees/gas-and-fees.md index 21ee12c5abb5..53e9c6f3dad2 100644 --- a/yellow-paper/docs/gas-and-fees/gas-and-fees.md +++ b/yellow-paper/docs/gas-and-fees/gas-and-fees.md @@ -1,15 +1,7 @@ ---- -sidebar_position: 2 ---- - # Gas and Fees ## Requirements -:::info Disclaimer -This is a draft. These requirements need to be considered by the wider team, and might change significantly before a mainnet release. -::: - Private state transition execution and proving is performed by the end user. However, once a transaction is submitted to the network, further resource is required to verify the private proofs, effect public state transitions and include the transaction within a rollup. This comes at the expense of the sequencer selected for the current slot. These resources include, but are not limited to: 1. Execution of public function bytecode @@ -26,10 +18,10 @@ We can define a number of requirements that serve to provide a transparent and f 2. Senders need to be assured that they will be charged fees fairly and deterministically for execution of their transaction and inclusion in a rollup. 3. Senders need to be refunded for any unused fee resulting from processing their transaction. 4. Senders need to be able to successfully submit a transaction when they have not previously used Aztec before or possess any funds on the network. -4. Sequencers need to be fairly and deterministically compensated for their expense in including transactions in a rollup. -5. Sequencers require agency in accepting transactions based on the fee that is being paid. -6. Sequencers need certainty that they will be paid for their effort in executing transactions, even if any public component of the transaction fails or insufficient fees are provided for this execution. -7. Sequencers need protection against grief or DOS attacks. More specifically, sequencers need to be confident that they will not be required to expend an unreasonable amount of effort before being able to reliably determine the fee endowed to a transaction. +5. Sequencers need to be fairly and deterministically compensated for their expense in including transactions in a rollup. +6. Sequencers require agency in accepting transactions based on the fee that is being paid. +7. Sequencers need certainty that they will be paid for their effort in executing transactions, even if any public component of the transaction fails or insufficient fees are provided for this execution. +8. Sequencers need protection against grief or DOS attacks. More specifically, sequencers need to be confident that they will not be required to expend an unreasonable amount of effort before being able to reliably determine the fee endowed to a transaction. ## High Level Concepts and Design @@ -61,7 +53,7 @@ Some operations are specific to a transaction, such as public function execution ### Measuring Gas Before Submission -All of the operations listed in the transaction specific table can provide us with deterministic gas values for a transaction. The transaction can be simulated and appropriate gas figures can be calculated before the transaction is sent to the network. The transaction will also need to provide a fee to cover it's portion of the amortized cost. This can be done by deciding on a value of `N`, the number of transactions in a rollup. Of course, the transaction sender can't know in advance how many other transactions will be included in the same rollup but the sender will be able to see how many transactions were included in prior rollups and decide on a value that will give them some certainty of inclusion without overpaying for insufficient amortization. As with all costs, any additional amortization will be refunded to the sender. +All of the operations listed in the transaction specific table can provide us with deterministic gas values for a transaction. The transaction can be simulated and appropriate gas figures can be calculated before the transaction is sent to the network. The transaction will also need to provide a fee to cover it's portion of the amortized cost. This can be done by deciding on a value of `N`, the number of transactions in a rollup. Of course, the transaction sender can't know in advance how many other transactions will be included in the same rollup but the sender will be able to see how many transactions were included in prior rollups and decide on a value that will give them some certainty of inclusion without overpaying for insufficient amortization. As with all costs, any additional amortization will be refunded to the sender. For example, if the previous 10 rollups consist of an average of 5000 transactions, the sender could decide on a value of 1000 for `N` in it's amortization. If the transaction is included in a rollup with > `N` transactions, the fee saved by the additional amortization will be refunded to the sender. If the sequencer chooses to include the transaction in a rollup with < `N` transactions, the sequencer will effectively subsidize that reduced amortization. @@ -140,7 +132,7 @@ With this value defined, a typical fee payment flow might look as follows: ## Transaction and Fee Lifecycle -We will attempt to walk through the process by which a transaction is created with an appropriate fee, accepted by the sequencer and the appropriate fee distribution undertaken. +We will attempt to walk through the process by which a transaction is created with an appropriate fee, accepted by the sequencer and the appropriate fee distribution undertaken. ### User Simulation and Fee Preparation @@ -159,7 +151,6 @@ This would appear to introduce a circular dependency whereby an appropriate fee - **L1FeeDistributionGas** - The amount of L1 gas the transaction is willing to pay for execution of the fee distribution function - **DAFeeDistributionGas** - The amount of DA gas the transaction is willing to pay for execution of the fee distribution function - Simulation of the transaction will provide feedback as to it's gas consumption, this can be repeated to converge on the optimum values of fee and gas limits for the transaction. The private portion of the transaction will be proven via the private kernel circuit resulting in a number of fee related public inputs: - **feeCommitments** - New commitments generated as part of fee preparation @@ -167,7 +158,7 @@ Simulation of the transaction will provide feedback as to it's gas consumption, - **feePreparation** - A single public function call to be made as part of fee preparation - **feeDistribution** - A single public function call to be made as part of fee distribution - **feeEncryptedLogsHash** - The hash of encrypted logs generated by the fee payment -- **feeUnencryptedLogsHash** - The hash of unencrypted logs generated by the fee payment +- **feeUnencryptedLogsHash** - The hash of unencrypted logs generated by the fee payment - **feePerL1Gas** - The fee provided per unit of L1 gas - **feePerL2Gas** - The fee provided per unit of L2 gas - **feePerDAGas** - The fee provided per unit of DA gas @@ -179,7 +170,6 @@ Simulation of the transaction will provide feedback as to it's gas consumption, - **L2FeeDistributionGas** - The amount of L2 gas the transaction is willing to pay for execution of the fee distribution function - **DAFeeDistributionGas** - The amount of DA gas the transaction is willing to pay for execution of the fee distribution function - ### Transaction Selection and Execution Upon retrieving a transaction from the P2P network, the sequencer can check that the transaction contains a fee for an accepted asset. This may require simulation of a whitelisted public function. If this step fails or is not accepted by the sequencer then the transaction can be discarded. Assuming this is successful, the provided fee can be evaluated to see if it large enough. @@ -210,7 +200,7 @@ struct TxContext { The sequencer will need to specify the intended size of the rollup (determined as part of the sequencer selection commitment phase) and use this value to calculate gas amortization. These values of amortized L1 and L2 gas will be added to the `l1GasUsed` and `l2GasUsed` accumulators. These accumulators, along with `dAGasUsed` will need to accurately reflect the gas consumption of the transaction prior to public function execution including state updates produced as part of private execution. -Any enqueued public function calls can be simulated by the sequencer to obtain an accurate gas profile of their execution. This simulation will enable the sequencer to compute the number of additional state updates to be made, the number of public function calls and the L2 gas consumption of each of those calls. If any of the gas limits are breached, simulation will identify where in the execution trace this takes place and so the sequencer will only need to perform iterations of the public VM and public kernel circuits for the calls that either partially or completely succeeded. This ensures that the sequencer is not forced to execute and prove circuits for which they will not be compensated. +Any enqueued public function calls can be simulated by the sequencer to obtain an accurate gas profile of their execution. This simulation will enable the sequencer to compute the number of additional state updates to be made, the number of public function calls and the L2 gas consumption of each of those calls. If any of the gas limits are breached, simulation will identify where in the execution trace this takes place and so the sequencer will only need to perform iterations of the public VM and public kernel circuits for the calls that either partially or completely succeeded. This ensures that the sequencer is not forced to execute and prove circuits for which they will not be compensated. The public VM circuit can now be executed and proven until completion or until a gas limit is reached. Each invocation of the circuit will constrain it's reported usage of all types of gas. @@ -220,7 +210,7 @@ Public kernel circuit iterations will be executed for each public function call 2. Any reverts claimed by the sequencer did indeed occur. 3. After such reverts no unnecessary gas consumption took place. -Once transaction execution is complete, the sequencer will execute the fee distribution function. +Once transaction execution is complete, the sequencer will execute the fee distribution function. ### Fee Distribution @@ -264,5 +254,4 @@ This next example differs in that the refund is performed privately using partia ![Private Refund](../gas-and-fees/images/gas-and-fees/private-refund.jpg) - In both of these examples the fee is effectively escrowed as part of the private portion of fee preparation. The enqueued public function is simply an instruction to increase the balance of the payment asset held by the fee payment contract. The sequencer should be able to inspect the public call instruction, consisting of contract address, function selector and arguments and be confident that this function will not fail. Provided the logic of the fee payment contract is defined correctly, once escrowed, the fee can't be modified by the user's transaction payload. This gives the sequencer the guarantee that they will be paid for the work they perform. Finally, the fee distribution function in either of these examples can be written such that the sequencer can be confident of success. This function simply needs to take the securely escrowed fee, compute the actual fee and subsequent refund before increasing the balance of the 2 parties within the payment asset. diff --git a/yellow-paper/docs/intro.md b/yellow-paper/docs/intro.md index 61043016fd54..9f872b58b815 100644 --- a/yellow-paper/docs/intro.md +++ b/yellow-paper/docs/intro.md @@ -1,7 +1,3 @@ ---- -sidebar_position: 1 ---- - # Editorial guidelines This "yellow paper" is a first attempt to describe the Aztec protocol in its entirety. @@ -25,15 +21,18 @@ The details should be sufficient for some other engineering team to implement th Some of the info we need to populate this document might have already been written in the top-level `docs/` dir of the monorepo. But the target audience is different. Reduce verbose prose. Remove monorepo code snippets (but note that simple pseudocode snippets to explain a protocol concept are fine). Don't describe components of the sandbox (that's an implementation detail and doesn't belong in this doc). ## Diagrams -To increase the probability of diagrams being up to date we encourage you to write them in `mermaid`. Mermaid is a markdown-like language for generating diagrams and is supported by Docusaurus, so it will be rendered automatically for you. + +To increase the probability of diagrams being up to date we encourage you to write them in `mermaid`. Mermaid is a markdown-like language for generating diagrams and is supported by Docusaurus, so it will be rendered automatically for you. You simply create a codeblock specifying the language as `mermaid` and write your diagram in the codeblock. For example: -```txt + +````txt ```mermaid graph LR A --> B B --> C C --> A ``` +```` ```mermaid graph LR @@ -41,11 +40,12 @@ graph LR B --> C C --> A ``` + Mermaid supports multiple types of diagrams, so finding one that suits your needs should be possible. Consult their [documentation](https://mermaid.js.org/intro/getting-started.html) or try out their [live editor](https://mermaid.live/) to see if they've got what you need. When writing class diagrams, we recommend using the `classDiagram` type and composition arrows `*--` to represent extensions. Also for the sake of readability, add all the components in the class itself, including composite types. For example: -```txt +````txt ```mermaid classDiagram class A{ @@ -63,6 +63,7 @@ classDiagram C *-- A: a C *-- B: b ``` +```` ```mermaid classDiagram @@ -83,10 +84,10 @@ classDiagram ``` ### Mermaid doesn't cover my case, what should I do? -If mermaid doesn't cover your case, please add both the rendered image and the source code to the documentation. Most of the tools for diagramming can export a non-rendered representation that can then be updated by other people. Please name it such that it is clear what tool was used. -This should allow us to keep the diagrams up to date, by allowing others to update them. +If mermaid doesn't cover your case, please add both the rendered image and the source code to the documentation. Most of the tools for diagramming can export a non-rendered representation that can then be updated by other people. Please name it such that it is clear what tool was used. +This should allow us to keep the diagrams up to date, by allowing others to update them. ## For each protocol feature diff --git a/yellow-paper/docs/logs/index.md b/yellow-paper/docs/logs/index.md index 2e8f5c2731ad..0df3029c11dd 100644 --- a/yellow-paper/docs/logs/index.md +++ b/yellow-paper/docs/logs/index.md @@ -5,6 +5,7 @@ title: Logs Logs on Aztec are similar to logs on Ethereum and their goal is to allow smart contracts to communicate arbitrary data to the outside world. Logs are events which are emitted during contract function execution. Aztec protocol gives users the following assurances: + 1. The logs get published, 2. log integrity (the logs are not modified once emitted), 3. address of the source contract is verified to be correct (a contract can't impersonate another one). @@ -13,9 +14,11 @@ Aztec protocol gives users the following assurances: ::: # Types + There are 2 kinds of logs in Aztec protocol: unencrypted and encrypted. ## Unencrypted + Unencrypted logs are used to communicate public information out of smart contracts. Unencrypted logs can be emitted from both public and private functions. @@ -24,6 +27,7 @@ Emitting unencrypted logs from private functions can be a privacy leak but we de ::: ## Encrypted + Encrypted logs can be emitted only from private functions. This is because to encrypt the log we need to get a secret and it's impossible to privately manage secrets in public domain. @@ -39,7 +43,8 @@ I (benesjan) am not up-to-date on what is the encryption end-game. ::: # Encoding + Just like on Ethereum, logs are ABI encoded. :::warning As far as I know the encoding will be happening in app circuit and won't be enforced by protocol. Should this section not be here for this reason? -::: \ No newline at end of file +::: diff --git a/yellow-paper/docs/private-message-delivery/_category_.json b/yellow-paper/docs/private-message-delivery/_category_.json deleted file mode 100644 index 35605080a4ef..000000000000 --- a/yellow-paper/docs/private-message-delivery/_category_.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "label": "Private Message Delivery", - "position": 2, - "link": { - "type": "generated-index", - "description": "Private message delivery encompasses the encryption, tagging, and broadcasting of private messages on the Aztec Network." - } -} diff --git a/yellow-paper/docs/private-message-delivery/encryption-and-decryption.md b/yellow-paper/docs/private-message-delivery/encryption-and-decryption.md index fb2ac2363bd6..e42072de18da 100644 --- a/yellow-paper/docs/private-message-delivery/encryption-and-decryption.md +++ b/yellow-paper/docs/private-message-delivery/encryption-and-decryption.md @@ -1,7 +1,3 @@ ---- -sidebar_position: 3 ---- - # Encryption and Decryption Applications should be able to provably encrypt data for a target user, as part of private message delivery. As stated on the Keys section, we define three types of encrypted data, based on the sender and the recipient, from the perspective of a user: @@ -22,4 +18,4 @@ To support different kinds of encryption mechanisms, the protocol does not make ## Provable Decryption -While provable encryption is required to guarantee correct private message delivery, provable decryption is required for disclosing activity within an application. This allows auditability and compliance use cases, as well as being able to prove that a user did not execute certain actions. To support this, encryption precompiles also allow for provable decryption. \ No newline at end of file +While provable encryption is required to guarantee correct private message delivery, provable decryption is required for disclosing activity within an application. This allows auditability and compliance use cases, as well as being able to prove that a user did not execute certain actions. To support this, encryption precompiles also allow for provable decryption. diff --git a/yellow-paper/docs/private-message-delivery/index.md b/yellow-paper/docs/private-message-delivery/index.md new file mode 100644 index 000000000000..679d3e9abb5e --- /dev/null +++ b/yellow-paper/docs/private-message-delivery/index.md @@ -0,0 +1,11 @@ +--- +title: Private Message Delivery +--- + +# Private Message Delivery + +Private message delivery encompasses the encryption, tagging, and broadcasting of private messages on the Aztec Network. + +import DocCardList from '@theme/DocCardList'; + + diff --git a/yellow-paper/docs/private-message-delivery/note-discovery.md b/yellow-paper/docs/private-message-delivery/note-discovery.md index 84dee4abe417..ac5776b09494 100644 --- a/yellow-paper/docs/private-message-delivery/note-discovery.md +++ b/yellow-paper/docs/private-message-delivery/note-discovery.md @@ -1,7 +1,3 @@ ---- -sidebar_position: 2 ---- - # Note Discovery When users interact with contracts they will generate and publish encrypted notes for other network participants. In order for a user to consume notes that belong to them, they need to identify, retrieve and decrypt them. A simple, privacy-preserving approach to this would be to download all of the notes and attempt decryption. However, the total number of encrypted notes published by the network will be substantial, making it infeasible for some users to do this. Those users will want to utilize a note discovery protocol to privately identify their notes. diff --git a/yellow-paper/docs/private-message-delivery/private-message-delivery.md b/yellow-paper/docs/private-message-delivery/private-msg-delivery.md similarity index 99% rename from yellow-paper/docs/private-message-delivery/private-message-delivery.md rename to yellow-paper/docs/private-message-delivery/private-msg-delivery.md index 830edd73f69c..e82ab0f34c3d 100644 --- a/yellow-paper/docs/private-message-delivery/private-message-delivery.md +++ b/yellow-paper/docs/private-message-delivery/private-msg-delivery.md @@ -1,7 +1,3 @@ ---- -sidebar_position: 1 ---- - # Private Message Delivery Maintaining the core tenet of privacy within the Aztec Network imposes a number of requirements related to the transfer of notes from one user to another. If Alice executes a function that generates a note for Bob: diff --git a/yellow-paper/docs/private-message-delivery/registry.md b/yellow-paper/docs/private-message-delivery/registry.md index 2c273e5241b6..d23a717df3c1 100644 --- a/yellow-paper/docs/private-message-delivery/registry.md +++ b/yellow-paper/docs/private-message-delivery/registry.md @@ -1,7 +1,3 @@ ---- -sidebar_position: 4 ---- - # Registry The protocol should allow users to express their preferences in terms of encryption and note tagging mechanisms, and also provably advertise their encryption public keys. A canonical registry contract provides an application-level solution to both problems. @@ -24,17 +20,17 @@ The registry contract exposes functions for setting public keys and encryption m ``` contract Registry - + public mapping(address => { keys, precompile_address }) registry - + public fn set(keys, precompile_address) this.do_set(msg_sender, keys, precompile_address) - + public fn set_from_preimage(address, keys, precompile_address, ...address_preimage) assert address not in registry assert hash(keys, precompile_address, ...address_preimage) == address - this.set(msg_sender, keys, precompile_address) - + this.set(msg_sender, keys, precompile_address) + public fn assert_non_membership(address) assert address not in registry @@ -57,7 +53,7 @@ Note that this optimization may also be included natively into the protocol, [pe While account contracts that belong to individual users have a clear set of public keys to announce, some private contracts may be shared by a group of users, like in a multisig or an escrow contract. In these scenarios, we want all messages intended for the shared contract to actually be delivered to all participants, using the encryption method selected by each. -This can be achieved by having the registry support multiple sets of keys and precompiles for each entry. Applications can then query the registry and obtain a list of recipients, rather than a single one. +This can be achieved by having the registry support multiple sets of keys and precompiles for each entry. Applications can then query the registry and obtain a list of recipients, rather than a single one. The registry limits multi-recipient registrations to no more than `MAX_ENTRIES_PER_ADDRESS` to prevent abuse, since this puts an additional burden on the sender, who needs to emit the same note multiple times, increasing the cost of their transaction. @@ -77,4 +73,4 @@ contract Sample ## Discussion -See [_Addresses, keys, and sending notes (Dec 2023 edition)_](https://forum.aztec.network/t/addresses-keys-and-sending-notes-dec-2023-edition/2633) for relevant discussions on this topic. \ No newline at end of file +See [_Addresses, keys, and sending notes (Dec 2023 edition)_](https://forum.aztec.network/t/addresses-keys-and-sending-notes-dec-2023-edition/2633) for relevant discussions on this topic. diff --git a/yellow-paper/docs/private-message-delivery/send-note-guidelines.md b/yellow-paper/docs/private-message-delivery/send-note-guidelines.md index 2b48c7955e2e..73355e0acbf8 100644 --- a/yellow-paper/docs/private-message-delivery/send-note-guidelines.md +++ b/yellow-paper/docs/private-message-delivery/send-note-guidelines.md @@ -1,7 +1,3 @@ ---- -sidebar_position: 5 ---- - # Guidelines Application contracts are in control of creating, encrypting, tagging, and broadcasting private notes to users. As such, each application is free to follow whatever scheme it prefers, choosing to override user preferences or use custom encryption and note tagging mechanisms. However, this may hinder composability, or not be compatible with existing wallet software. @@ -10,7 +6,7 @@ In order to satisfy the requirements established for private message delivery, w ## Provably Sending a Note -To provably encrypt, tag, and send a note to a recipient, applications should first check the registry. This ensures that the latest preferences for the recipient are honored, in case they rotated their keys. The registry should be queried via a direct storage read and not a function call, in order to save an additional recursion which incurs in extra proving time. +To provably encrypt, tag, and send a note to a recipient, applications should first check the registry. This ensures that the latest preferences for the recipient are honored, in case they rotated their keys. The registry should be queried via a direct storage read and not a function call, in order to save an additional recursion which incurs in extra proving time. If the recipient is not in the registry, then the app should allow the sender to provide the recipient's public key from the recipient's address preimage. This allows users who have never interacted with the chain to receive encrypted notes, though it requires a collaborative sender. @@ -24,11 +20,11 @@ The following pseudocode covers how to provably send a note to a recipient, give ``` fn provably_send_note(recipient, note, encryption_type) - + let block_number = context.latest_block_number let public_state_root = context.roots[block_number].public_state let storage_slot = calculate_slot(registry_address, registry_base_slot, recipient) - + let public_keys, precompile_address if storage_slot in public_state_root context.update_tx_max_valid_block_number(block_number + N) @@ -40,7 +36,7 @@ fn provably_send_note(recipient, note, encryption_type) else registry_address.assert_non_membership(recipient) return - + batch_private_delegate_call(precompile_address.encrypt_and_broadcast, { public_keys, encryption_type, recipient, note }) ``` @@ -52,7 +48,7 @@ This flexibility is useful in scenarios where the sender can be trusted to make ## Delivering Messages for Self -Applications may encrypt, tag, and broadcast messages for the same user who's initiating a transaction, using the outgoing or the incoming internal encryption key. This allows a user to have an on-chain backup of their private transaction history, which they can use to recover state in case they lose their private database. In this scenario, unconstrained message delivery is recommended, since the sender is incentivized to correctly encrypt message for themselves. +Applications may encrypt, tag, and broadcast messages for the same user who's initiating a transaction, using the outgoing or the incoming internal encryption key. This allows a user to have an on-chain backup of their private transaction history, which they can use to recover state in case they lose their private database. In this scenario, unconstrained message delivery is recommended, since the sender is incentivized to correctly encrypt message for themselves. Applications may also choose to query the user wallet software via an oracle call, so the wallet can decide whether to broadcast the note to self on chain based on user preferences. This allows users to save on gas costs by avoiding unnecessary note broadcasts if they rely on other backup strategies. @@ -64,4 +60,4 @@ As an alternative to registering [multiple recipients for a given address](./reg ## Discussions -See [_Addresses, keys, and sending notes (Dec 2023 edition)_](https://forum.aztec.network/t/addresses-keys-and-sending-notes-dec-2023-edition/2633) and [_Broadcasting notes in token contracts_](https://forum.aztec.network/t/broadcasting-notes-in-token-contracts/2658) for relevant discussions on this topic. \ No newline at end of file +See [_Addresses, keys, and sending notes (Dec 2023 edition)_](https://forum.aztec.network/t/addresses-keys-and-sending-notes-dec-2023-edition/2633) and [_Broadcasting notes in token contracts_](https://forum.aztec.network/t/broadcasting-notes-in-token-contracts/2658) for relevant discussions on this topic. diff --git a/yellow-paper/docs/public-vm/InstructionSet.mdx b/yellow-paper/docs/public-vm/InstructionSet.mdx index 141e435dd25c..ce5eb82ffc91 100644 --- a/yellow-paper/docs/public-vm/InstructionSet.mdx +++ b/yellow-paper/docs/public-vm/InstructionSet.mdx @@ -1,5 +1,5 @@ # Instruction Set -import GeneratedInstructionSet from './gen/_InstructionSet.mdx'; +import GeneratedInstructionSet from "./gen/_InstructionSet.mdx"; - \ No newline at end of file + diff --git a/yellow-paper/docs/public-vm/_category_.json b/yellow-paper/docs/public-vm/_category_.json deleted file mode 100644 index b71bfdd8d049..000000000000 --- a/yellow-paper/docs/public-vm/_category_.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "label": "AVM: Aztec's Public VM", - "position": 5, - "link": { - "type": "generated-index", - "description": "Aztec's Public VM..." - } -} diff --git a/yellow-paper/docs/public-vm/avm.md b/yellow-paper/docs/public-vm/avm.md index d79f5898cf94..8eb47bd39b81 100644 --- a/yellow-paper/docs/public-vm/avm.md +++ b/yellow-paper/docs/public-vm/avm.md @@ -1,22 +1,17 @@ ---- -sidebar_position: 0 ---- - # Aztec Virtual Machine -:::important disclaimer -This is a draft. These requirements need to be considered by the wider team, and might change significantly before a mainnet release. -::: :::note reference Many terms and definitions here are borrowed from the [Ethereum Yellow Paper](https://ethereum.github.io/yellowpaper/paper.pdf). ::: ## Introduction + An Aztec transaction may include one or more **public execution requests**. A public execution request represents an initial **message call** to a contract, providing input data and triggering the execution of that contract's public code in the Aztec Virtual Machine. Given a message call to a contract, the AVM executes the corresponding code one instruction at a time, treating each instruction as a transition function on its state. > Public execution requests may originate as [`enqueuedPublicFunctionCalls`](../calls/enqueued-calls.md) triggered during the transaction's private execution. This document contains the following sections: + - [**Public contract bytecode**](#public-contract-bytecode) (aka AVM bytecode) - [**Execution context**](#execution-context), outlining the AVM's environment and state - [**Execution**](#execution), outlining control flow, gas tracking, halting, and reverting @@ -29,6 +24,7 @@ For details on the AVM's "tagged" memory model, refer to the **["AVM Memory Mode > Note: The Aztec Virtual Machine, while designed with a SNARK implementation in mind, is not strictly tied to any particular implementation and therefore is defined without SNARK or circuit-centric verbiage. That being said, considerations for a SNARK implementation are raised or linked when particularly relevant or helpful. ## Public contract bytecode + A contract's public bytecode is a series of execution instructions for the AVM. When a message call is made to a contract, the AVM retrieves the corresponding bytecode from the world state (`worldState.contracts[address].bytecode`) and triggers execution of the first instruction (`bytecode[0]`). The world state is described in more detail later. > Note: While a Noir contract may have multiple public functions, they are inlined so that the **entirety of a contract's public code exists in a single bytecode**. Internal calls to Noir functions within the same contract are compiled to simple program-counter changes, as are internal returns. In a manner similar to the Ethereum Virtual Machine, the AVM is not itself aware of function selectors and internal function calls. The Noir compiler may implement these constructs by treating the first word in a message call's calldata as a function selector, and beginning a contract's bytecode with a series of conditional jumps. @@ -38,11 +34,13 @@ A contract's public bytecode is a series of execution instructions for the AVM. Refer to ["Bytecode"](/docs/bytecode) for more information. ## Execution Context + :::note REMINDER Many terms and definitions here are borrowed from the [Ethereum Yellow Paper](https://ethereum.github.io/yellowpaper/paper.pdf). ::: An **execution context** includes the information necessary to initiate AVM execution along with the state maintained by the AVM throughout execution: + ``` AVMContext { environment: ExecutionEnvironment, @@ -58,6 +56,7 @@ The first two entries, **execution environment** and **machine state**, share th > When a nested message call is made, a new environment and machine state are initialized by the caller. In other words, a nested message call has its own environment and machine state which are _partially_ derived from the caller's context. The **execution environment** is fully specified by a message call's execution agent and remains constant throughout a call's execution. + ``` ExecutionEnvironment { address, @@ -78,6 +77,7 @@ ExecutionEnvironment { ``` **Machine state** is partially specified by the execution agent, and otherwise begins as empty or uninitialized for each message call. This state is transformed on an instruction-per-instruction basis. + ``` MachineState { l1GasLeft, @@ -88,6 +88,7 @@ MachineState { ``` **World state** contains persistable VM state. If a message call succeeds, its world state updates are applied to the calling context (whether that be a parent call's context or the transaction context). If a message call fails, its world state updates are rejected by its caller. When a _transaction_ succeeds, its world state updates persist into future transactions. + ``` WorldState { publicStorage: (address, slot) => value, // read/write @@ -103,6 +104,7 @@ WorldState { > Note: each member of the world state is implemented as an independent merkle tree with different properties. The **accrued substate**, as coined in the [Ethereum Yellow Paper](https://ethereum.github.io/yellowpaper/paper), contains information that is accrued throughout transaction execution to be "acted upon immediately following the transaction." These are append-only arrays containing state that is not relevant to other calls or transactions. Similar to world state, if a message call succeeds, its substate is appended to its calling context, but if it fails its substate is dropped by its caller. + ``` AccruedSubstate { logs: [], // append-only @@ -111,6 +113,7 @@ AccruedSubstate { ``` Finally, when a message call halts, it sets the context's **message call results** to communicate results to the caller. + ``` MessageCallResults { reverted: boolean, @@ -119,8 +122,10 @@ MessageCallResults { ``` ### Context initialization for initial call + This section outlines AVM context initialization specifically for a **public execution request's initial message call** (_i.e._ not a nested message call). Context initialization for nested message calls will be explained [in a later section](#context-initialization-for-a-nested-call). When AVM execution is initiated for a public execution request, the AVM context is initialized as follows: + ``` context = AVMContext { environment: INITIAL_EXECUTION_ENVIRONMENT, @@ -130,9 +135,11 @@ context = AVMContext { results: INITIAL_MESSAGE_CALL_RESULTS, } ``` + > Note: Since world state persists between transactions, the latest state is injected into a new AVM context. Given a `PublicCallRequest` and its parent `TxRequest`, these above-listed "`INITIAL_*`" entries are defined as follows: + ``` INITIAL_EXECUTION_ENVIRONMENT = ExecutionEnvironment { address: PublicCallRequest.contractAddress, @@ -168,9 +175,11 @@ INITIAL_MESSAGE_CALL_RESULTS = MessageCallResults { > Note: unlike memory in the Ethereum Virtual Machine, uninitialized memory in the AVM is not readable! A memory cell must be written (and therefore [type-tagged](./state-model#types-and-tagged-memory)) before it can be read. ## Execution + With an initialized context (and therefore an initial program counter of 0), the AVM can execute a message call starting with the very first instruction in its bytecode. ### Program Counter and Control Flow + The program counter (machine state's `pc`) determines which instruction to execute (`instr = environment.bytecode[pc]`). Each instruction's state transition function updates the program counter in some way, which allows the VM to progress to the next instruction at each step. Most instructions simply increment the program counter by 1. This allows VM execution to flow naturally from instruction to instruction. Some instructions ([`JUMP`](./InstructionSet#isa-section-jump), [`JUMPI`](./InstructionSet#isa-section-jumpi), `INTERNALCALL`) modify the program counter based on inputs. @@ -180,28 +189,36 @@ The `INTERNALCALL` instruction jumps to the destination specified by its input ( > Jump destinations can only be constants from the contract bytecode, or destinations popped from `machineState.internalCallStack`. A jump destination will never originate from main memory. ### Gas limits and tracking + Each instruction has an associated `l1GasCost` and `l2GasCost`. Before an instruction is executed, the VM enforces that there is sufficient gas remaining via the following assertions: + ``` assert machineState.l1GasLeft - instr.l1GasCost > 0 assert machineState.l2GasLeft - instr.l2GasCost > 0 ``` + > Note: many instructions (like arithmetic operations) have 0 `l1GasCost`. Instructions only incur an L1 cost if they modify world state or accrued substate. If these assertions pass, the machine state's gas left is decreased prior to the instruction's core execution: + ``` machineState.l1GasLeft -= instr.l1GasCost machineState.l2GasLeft -= instr.l2GasCost ``` If either of these assertions _fail_ for an instruction, this triggers an exceptional halt. The gas left is set to 0 and execution reverts. + ``` machineState.l1GasLeft = 0 machineState.l2GasLeft = 0 ``` + > Reverting and exceptional halts will be covered in more detail [in a later section](#halting). ### Gas cost notes and examples + A instruction's gas cost is loosely derived from its complexity. Execution complexity of some instructions changes based on inputs. Here are some examples and important notes: + - [`JUMP`](./InstructionSet/#isa-section-jump) is an example of an instruction with constant gas cost. Regardless of its inputs, the instruction always incurs the same `l1GasCost` and `l2GasCost`. - The [`SET`](./InstructionSet/#isa-section-set) instruction operates on a different sized constant (based on its `dst-type`). Therefore, this instruction's gas cost increases with the size of its input. - Instructions that operate on a data range of a specified "size" scale in cost with that size. An example of this is the [`CALLDATACOPY`](./InstructionSet/#isa-section-calldatacopy) argument which copies `copySize` words from `environment.calldata` to memory. @@ -213,45 +230,52 @@ A instruction's gas cost is loosely derived from its complexity. Execution compl > Implementation detail: an instruction's gas cost takes into account the costs of associated downstream computations. So, an instruction that triggers accesses to the public data tree (`SLOAD`/`SSTORE`) incurs a cost that accounts for state access validation in later circuits (public kernel or rollup). An instruction that triggers a nested message call (`CALL`/`STATICCALL`/`DELEGATECALL`) incurs a cost accounting for the nested call's execution and an added execution of the public kernel circuit. ## Halting + A message call's execution can end with a **normal halt** or **exceptional halt**. A halt ends execution within the current context and returns control flow to the calling context. ### Normal halting + A normal halt occurs when the VM encounters an explicit halting instruction ([`RETURN`](./InstructionSet/#isa-section-return) or [`REVERT`](./InstructionSet/#isa-section-revert)). Such instructions consume gas normally and optionally initialize some output data before finally halting execution within the current context. + ``` machineState.l1GasLeft -= instr.l1GasCost machineState.l2GasLeft -= instr.l2GasCost // results.reverted remains false results.output = machineState.memory[instr.args.retOffset:instr.args.retOffset+instr.args.retSize] ``` + > Definitions: `retOffset` and `retSize` here are arguments to the [`RETURN`](./InstructionSet/#isa-section-return) and [`REVERT`](./InstructionSet/#isa-section-revert) instructions. If `retSize` is 0, the context will have no output. Otherwise, these arguments point to a region of memory to output. > Note: `results.output` is only relevant when the caller is a message call itself. When a public execution request's initial message call halts normally, its `results.output` is ignored. ### Exceptional halting + An exceptional halt is not explicitly triggered by an instruction but instead occurs when one of the following halting conditions is met: + 1. **Insufficient gas** - ``` - assert machineState.l1GasLeft - instr.l1GasCost > 0 - assert machineState.l2GasLeft - instr.l2GasCost > 0 - ``` + ``` + assert machineState.l1GasLeft - instr.l1GasCost > 0 + assert machineState.l2GasLeft - instr.l2GasCost > 0 + ``` 1. **Invalid instruction encountered** - ``` - assert environment.bytecode[machineState.pc].opcode <= MAX_AVM_OPCODE - ``` + ``` + assert environment.bytecode[machineState.pc].opcode <= MAX_AVM_OPCODE + ``` 1. **Failed memory tag check** - - Defined per-instruction in the [Instruction Set](./InstructionSet) + - Defined per-instruction in the [Instruction Set](./InstructionSet) 1. **Jump destination past end of bytecode** - ``` - assert machineState.pc >= environment.bytecode.length - ``` + ``` + assert machineState.pc >= environment.bytecode.length + ``` 1. **World state modification attempt during a static call** - ``` - assert !environment.isStaticCall - OR environment.bytecode[machineState.pc].opcode not in WS_MODIFYING_OPS - ``` - > Definition: `WS_MODIFYING_OPS` represents the list of all opcodes corresponding to instructions that modify world state. + ``` + assert !environment.isStaticCall + OR environment.bytecode[machineState.pc].opcode not in WS_MODIFYING_OPS + ``` + > Definition: `WS_MODIFYING_OPS` represents the list of all opcodes corresponding to instructions that modify world state. When an exceptional halt occurs, the context is flagged as consuming all off its allocated gas and marked as `reverted` with no output data, and then execution within the current context ends. + ``` machineState.l1GasLeft = 0 machineState.l2GasLeft = 0 @@ -260,11 +284,13 @@ results.reverted = true ``` ## Nested calls -During a message call's execution, an instruction may be encountered that triggers another message call. A message call triggered in this way may be referred to as a **nested call**. The purpose of the [`CALL`](./InstructionSet/#isa-section-call), [`STATICCALL`](./InstructionSet/#isa-section-staticcall), and `DELEGATECALL` instructions is to initiate nested calls. +During a message call's execution, an instruction may be encountered that triggers another message call. A message call triggered in this way may be referred to as a **nested call**. The purpose of the [`CALL`](./InstructionSet/#isa-section-call), [`STATICCALL`](./InstructionSet/#isa-section-staticcall), and `DELEGATECALL` instructions is to initiate nested calls. ### Context initialization for a nested call + Initiation of a nested call requires the creation of a new context (or **sub-context**). + ``` subContext = AVMContext { environment: nestedExecutionEnvironment, // defined below @@ -274,11 +300,13 @@ subContext = AVMContext { results: INITIAL_MESSAGE_CALL_RESULTS, } ``` + While some context members are initialized as empty (as they are for an initial message call), other entries are derived from the calling context or from the message call instruction's arguments (`instr.args`). The world state is forwarded as-is to the sub-context. Any updates made to the world state before this message call instruction was encountered are carried forward into the sub-context. The environment and machine state for the new sub-context are initialized as shown below. Here, the `callingContext` refers to the context in which the nested message call instruction was encountered. + ``` // some assignments reused below isStaticCall = instr.opcode == STATICCALL_OP @@ -310,15 +338,18 @@ nestedMachineState = MachineState { internalCallStack: empty, } ``` + > Note: the sub-context machine state's `l*GasLeft` is initialized based on the call instruction's `gasOffset` argument. The caller allocates some amount of L1 and L2 gas to the nested call. It does so using the instruction's `gasOffset` argument. In particular, prior to the message call instruction, the caller populates `M[gasOffset]` with the sub-context's initial `l1GasLeft`. Likewise it populates `M[gasOffset+1]` with `l2GasLeft`. > Note: recall that `INITIAL_MESSAGE_CALL_RESULTS` is the same initial value used during [context initialization for a public execution request's initial message call](#context-initialization-for-initial-call). > `STATICCALL_OP` and `DELEGATECALL_OP` refer to the 8-bit opcode values for the `STATICCALL` and `DELEGATECALL` instructions respectively. ### Updating the calling context after nested call halts + When a message call's execution encounters an instruction that itself triggers a message call, the nested call executes until it reaches a halt. At that point, control returns to the caller, and the calling context is updated based on the sub-context and the message call instruction's transition function. The components of that transition function are defined below. The success or failure of the nested call is captured into memory at the offset specified by the call instruction's `successOffset` input: + ``` context.machineState.memory[instr.args.successOffset] = !subContext.results.reverted ``` @@ -328,21 +359,25 @@ Recall that a nested call is allocated some gas. In particular, the call instruc As detailed in [the gas section above](#gas-cost-notes-and-examples), every instruction has an associated `instr.l1GasCost` and `instr.l2GasCost`. A nested call instruction's cost is the same as its initial `l*GasLeft` and `l2GasLeft`. Prior to the nested call's execution, this cost is subtracted from the calling context's remaining gas. When a nested call completes, any of its allocated gas that remains unused is refunded to the caller. + ``` context.l1GasLeft += subContext.machineState.l1GasLeft context.l2GasLeft += subContext.machineState.l2GasLeft ``` If a nested call halts normally with a [`RETURN`](./InstructionSet/#isa-section-return) or [`REVERT`](./InstructionSet/#isa-section-revert), it may have some output data (`subContext.results.output`). The caller's `retOffset` and `retSize` arguments to the nested call instruction specify a region in memory to place output data when the nested call returns. + ``` if instr.args.retSize > 0: context.memory[instr.args.retOffset:instr.args.retOffset+instr.args.retSize] = subContext.results.output ``` As long as a nested call has not reverted, its updates to the world state and accrued substate will be absorbed into the calling context. + ``` if !subContext.results.reverted AND instr.opcode != STATICCALL_OP: context.worldState = subContext.worldState context.accruedSubstate.append(subContext.accruedSubstate) ``` -> Reminder: a nested call cannot make updates to the world state or accrued substate if it is a [`STATICCALL`](./InstructionSet/#isa-section-staticcall). \ No newline at end of file + +> Reminder: a nested call cannot make updates to the world state or accrued substate if it is a [`STATICCALL`](./InstructionSet/#isa-section-staticcall). diff --git a/yellow-paper/docs/public-vm/security.md b/yellow-paper/docs/public-vm/security.md index 1ebc179d1fdd..78199f872e47 100644 --- a/yellow-paper/docs/public-vm/security.md +++ b/yellow-paper/docs/public-vm/security.md @@ -1,4 +1,4 @@ # VM threat model, security requirements An honest Prover must always be able to construct a satisfiable proof for an AVM program, even if the program throws an error. -This implies constraints produced by the AVM **must** be satisfiable. \ No newline at end of file +This implies constraints produced by the AVM **must** be satisfiable. diff --git a/yellow-paper/docs/public-vm/state-model.md b/yellow-paper/docs/public-vm/state-model.md index cc92dc820da2..aa2558aa2596 100644 --- a/yellow-paper/docs/public-vm/state-model.md +++ b/yellow-paper/docs/public-vm/state-model.md @@ -48,6 +48,7 @@ Memory addresses must be tagged to be a `u32` type. ## Types and Tagged Memory ### Terminology/legend + - `M[X]`: main memory cell at offset `X` - `tag`: a value referring to a memory cell's type (its maximum potential value) - `T[X]`: the tag associated with memory cell at offset `X` @@ -102,6 +103,7 @@ M[dstOffset] = M[aOffset] + M[bOffset] // perform the addition #### `MOV` and tag preservation The `MOV` instruction copies data from one memory cell to another, preserving tags. In other words, the destination cell's tag will adopt the value of the source: + ``` # MOV srcOffset dstOffset T[dstOffset] = T[srcOffset] // preserve tag @@ -132,6 +134,7 @@ M[dstOffset] = cast(M[srcOffset]) // perform cast A `MOV` instruction may flag its source and/or destination offsets as "indirect". An indirect memory access performs `M[M[offset]]` instead of the standard `M[offset]`. Memory offsets must be `u32`s since main memory is a 32-bit addressable space, and so indirect memory accesses include additional checks. Additional checks for a `MOV` with an indirect source offset: + ``` # MOV srcOffset dstOffset // with indirect source assert T[srcOffset] == u32 // enforce that `M[srcOffset]` is itself a valid memory offset @@ -140,6 +143,7 @@ M[dstOffset] = M[M[srcOffset]] // perform move from indirect source ``` Additional checks for a `MOV` with an indirect destination offset: + ``` # MOV srcOffset dstOffset // with indirect destination assert T[dstOffset] == u32 // enforce that `M[dstOffset]` is itself a valid memory offset @@ -148,6 +152,7 @@ M[M[dstOffset]] = M[srcOffset] // perform move to indirect destination ``` Additional checks for a `MOV` with both indirect source and destination offsets: + ``` # MOV srcOffset dstOffset // with indirect source and destination assert T[srcOffset] == T[dstOffset] == u32 // enforce that `M[*Offset]` are valid memory offsets @@ -158,12 +163,15 @@ M[M[dstOffset]] = M[M[srcOffset]] // perform move to indirect destinati #### Calldata/returndata and tag conversions All elements in calldata/returndata are implicitly tagged as field elements (i.e. maximum value is $p - 1$). To perform a tag conversion, calldata/returndata must be copied into main memory (via [`CALLDATACOPY`](./InstructionSet#isa-section-calldatacopy) or [`RETURN`'s `offset` and `size`](./InstructionSet#isa-section-return)), followed by an appropriate `CAST` instruction. + ``` # Copy calldata to memory and cast a word to u64 CALLDATACOPY cdOffset size offsetA // copy calldata to memory at offsetA CAST offsetA dstOffset // cast first copied word to a u64 ``` + This would perform the following: + ``` # CALLDATACOPY cdOffset size offsetA T[offsetA:offsetA+size] = field // CALLDATACOPY assigns the field tag @@ -171,4 +179,4 @@ M[offsetA:offsetA+size] = calldata[cdOffset:cdOffset+size] // copy calldata to m # CAST offsetA dstOffset T[offsetA] = u64 // CAST assigns a new tag M[dstOffset] = cast(offsetA) // perform the cast operation -``` \ No newline at end of file +``` diff --git a/yellow-paper/docs/rollup-circuits/base_rollup.md b/yellow-paper/docs/rollup-circuits/base-rollup.md similarity index 96% rename from yellow-paper/docs/rollup-circuits/base_rollup.md rename to yellow-paper/docs/rollup-circuits/base-rollup.md index e489e2400ebf..c041e3bbfe6f 100644 --- a/yellow-paper/docs/rollup-circuits/base_rollup.md +++ b/yellow-paper/docs/rollup-circuits/base-rollup.md @@ -1,9 +1,8 @@ --- title: Base Rollup -sidebar_position: 2 --- -The base rollup circuit is the most complex of the rollup circuits, as it has to interpret the output data of a kernel proof and perform the state updates and transaction validation. While this makes the data structures complex to follow, the goal of the circuit is fairly straight forward: +The base rollup circuit is the most complex of the rollup circuits, as it has to interpret the output data of a kernel proof and perform the state updates and transaction validation. While this makes the data structures complex to follow, the goal of the circuit is fairly straight forward: Take `BaseRollupInputs` as an input value, and transform it to `BaseOrMergeRollupPublicInputs` as an output value while making sure that the validity conditions are met. @@ -86,7 +85,7 @@ class Body { } Body *-- "m" TxEffect -class ProvenBlock { +class ProvenBlock { archive: Snapshot header: Header body: Body @@ -162,7 +161,7 @@ class TxContext { } TxContext *-- ContractDeploymentData: contract_deployment_data -class CombinedConstantData { +class CombinedConstantData { historical_header: Header tx_context: TxContext } @@ -240,8 +239,8 @@ def BaseRollupCircuit( public_data_tree_root = partial.public_data_tree for i in len(kernel_data): tx_hash, _c, public_data_tree_root = kernel_checks( - kernel_data[i], - constants, + kernel_data[i], + constants, public_data_tree_root, historical_header_membership_witnesses[i], ) @@ -260,7 +259,7 @@ def BaseRollupCircuit( ) # We can use the sorted nullifiers to simplify batch-insertion - # The sorting can be checked with a permutation + # The sorting can be checked with a permutation nullifier_snapshot = successor_merkle_batch_insertion( partial.nullifier_tree.root, [...nullifiers for kernel_data.public_inputs.end.nullifiers in kernel_data], @@ -303,8 +302,8 @@ def BaseRollupCircuit( ) def kernel_checks( - kernel: KernelData, - constants: ConstantRollupData, + kernel: KernelData, + constants: ConstantRollupData, public_data_tree_root: Fr, historical_header_membership_witness: HeaderMembershipWitness ) -> (Fr[2], Fr[], Fr): @@ -319,9 +318,9 @@ def kernel_checks( assert len(kernel.public_inputs.end.public_call_stack) == 0 assert merkle_inclusion( - kernel.constants.historical_header.hash(), - kernel.constants.historical_header.global_variables.block_number, - historical_header_membership_witness, + kernel.constants.historical_header.hash(), + kernel.constants.historical_header.global_variables.block_number, + historical_header_membership_witness, constants.last_archive ) @@ -340,4 +339,4 @@ def kernel_checks( kernel.public_inputs.end.l2_to_l1_messages ) return (tx_hash, contracts, kernel.public_inputs.end.end_public_data_root) -``` \ No newline at end of file +``` diff --git a/yellow-paper/docs/rollup-circuits/index.md b/yellow-paper/docs/rollup-circuits/index.md index 6b58c6d14133..d1f4c842f03e 100644 --- a/yellow-paper/docs/rollup-circuits/index.md +++ b/yellow-paper/docs/rollup-circuits/index.md @@ -1,6 +1,5 @@ --- title: Rollup Circuits -sidebar_position: 99 --- ## Overview @@ -10,12 +9,13 @@ Together with the [validating light node](./../contracts/index.md) the rollup ci To support this, we construct a single proof for the entire block, which is then verified by the validating light node. This single proof is constructed by recursively merging proofs together in a binary tree structure. This structure allows us to keep the workload of each individual proof small, while making it very parallelizable. This works very well for the case where we want many actors to be able to participate in the proof generation. The tree structure is outlined below, but the general idea is that we have a tree where all the leaves are transactions (kernel proofs) and through $\log(n)$ steps we can then "compress" them down to just a single root proof. Note that we have three (3) different types of "merger" circuits, namely: + - The base rollup - Merges two kernel proofs - The merge rollup - Merges two base rollup proofs OR two merge rollup proofs - The root rollup - - Merges two merge rollup proofs + - Merges two merge rollup proofs In the diagram the size of the tree is limited for show, but a larger tree will have more layers of merge rollups proofs. Circles mark the different types of proofs, while squares mark the different circuit types. @@ -89,7 +89,7 @@ graph BT style K7 fill:#1976D2; ``` -To understand what the circuits are doing and what checks they need to apply it is useful to understand what data is going into the circuits and what data is coming out. +To understand what the circuits are doing and what checks they need to apply it is useful to understand what data is going into the circuits and what data is coming out. Below is a figure of the data structures thrown around for the block proof creation. Note that the diagram does not include much of the operations for kernels, but mainly the data structures that are used for the rollup circuits. @@ -162,7 +162,7 @@ class Body { } Body *-- "m" TxEffect -class ProvenBlock { +class ProvenBlock { archive: Snapshot header: Header body: Body @@ -238,7 +238,7 @@ class TxContext { } TxContext *-- ContractDeploymentData: contract_deployment_data -class CombinedConstantData { +class CombinedConstantData { historical_header: Header tx_context: TxContext } @@ -301,14 +301,14 @@ class ChildRollupData { } ChildRollupData *-- BaseOrMergeRollupPublicInputs: public_inputs -class MergeRollupInputs { +class MergeRollupInputs { left: ChildRollupData right: ChildRollupData } MergeRollupInputs *-- ChildRollupData: left MergeRollupInputs *-- ChildRollupData: right -class RootRollupInputs { +class RootRollupInputs { l1_to_l2_msgs_tree: Snapshot l1_to_l2_msgs: List~Fr~ l1_to_l2_msgs_sibling_path: List~Fr~ @@ -340,10 +340,12 @@ Reconsider `ContractDeploymentData` in light of the newer (still being finalised Since the diagram can be quite overwhelming, we will go through the different data structures and what they are used for along with the three (3) different rollup circuits. ### Higher-level tasks + Before looking at the circuits individually, it can however be a good idea to recall the reason we had them in the first place. For this, we are especially interested in the tasks that span multiple circuits and proofs. #### State consistency -While the individual kernels are validated on their own, they might rely on state changes earlier in the block. For the block to be correctly validated, this means that when validating kernel $n$, it must be executed on top of the state after all kernels $ B3 ``` - ```mermaid graph BT R[OutHash] @@ -448,7 +450,8 @@ graph BT K7 --> B3 ``` - The roots of these trees, together with incoming messages, makes up the `content_hash`. +The roots of these trees, together with incoming messages, makes up the `content_hash`. + ```mermaid graph BT R[content_hash] @@ -480,4 +483,4 @@ SHA256 is used since as the hash function since it will likely be reconstructed import DocCardList from '@theme/DocCardList'; - \ No newline at end of file + diff --git a/yellow-paper/docs/rollup-circuits/merge_rollup.md b/yellow-paper/docs/rollup-circuits/merge-rollup.md similarity index 97% rename from yellow-paper/docs/rollup-circuits/merge_rollup.md rename to yellow-paper/docs/rollup-circuits/merge-rollup.md index 84b59358d33e..0b6a98ee8481 100644 --- a/yellow-paper/docs/rollup-circuits/merge_rollup.md +++ b/yellow-paper/docs/rollup-circuits/merge-rollup.md @@ -1,6 +1,5 @@ --- title: Merge Rollup -sidebar_position: 3 --- The Merge rollup circuit is our in-between circuit, it doesn't need to perform any state updates, but mainly check the consistency of its inputs. @@ -61,7 +60,7 @@ class ChildRollupData { } ChildRollupData *-- BaseOrMergeRollupPublicInputs: public_inputs -class MergeRollupInputs { +class MergeRollupInputs { left: ChildRollupData right: ChildRollupData } @@ -73,7 +72,7 @@ MergeRollupInputs *-- ChildRollupData: right ```python def MergeRollupCircuit( - left: ChildRollupData, + left: ChildRollupData, right: ChildRollupData ) -> BaseOrMergeRollupPublicInputs: assert left.proof.is_valid(left.inputs) @@ -94,4 +93,4 @@ def MergeRollupCircuit( end=right.inputs.end, constants=left.inputs.constants ) -``` \ No newline at end of file +``` diff --git a/yellow-paper/docs/rollup-circuits/root_rollup.md b/yellow-paper/docs/rollup-circuits/root-rollup.md similarity index 92% rename from yellow-paper/docs/rollup-circuits/root_rollup.md rename to yellow-paper/docs/rollup-circuits/root-rollup.md index b3c2005acedd..8d396a22ecba 100644 --- a/yellow-paper/docs/rollup-circuits/root_rollup.md +++ b/yellow-paper/docs/rollup-circuits/root-rollup.md @@ -1,6 +1,5 @@ --- title: Root Rollup -sidebar_position: 4 --- The root rollup circuit is our top circuit, it applies the state changes passed through its children and the cross-chain messages. Essentially, it is the last step that allows us to prove that the state transition function $\mathcal{T}(S, B) \mapsto S'$ was applied correctly for a state $S$ and a block $B$. Note, that the root rollup circuit's public inputs do not comprise the block entirely as it would be too costly to verify. Given a `ProvenBlock` and proof a node can derive the public inputs and validate the correctness of the state progression. @@ -12,7 +11,6 @@ A[RootRollupInputs] --> C[RootRollupCircuit] --> B[RootRollupPublicInputs] --> D For rollup purposes, the node we want to convince of the correctness is the [validating light node](./../contracts/index.md) that we put on L1. We will cover it in more detail in the [cross-chain communication](./../contracts/index.md) section. - :::info Squishers This might practically happen through a series of "squisher" circuits that will wrap the proof in another proof that is cheaper to verify on-chain. For example, wrapping a ultra-plonk proof in a standard plonk proof. ::: @@ -88,7 +86,7 @@ class Body { } Body *-- "m" TxEffect -class ProvenBlock { +class ProvenBlock { archive: Snapshot header: Header body: Body @@ -125,7 +123,7 @@ class ChildRollupData { } ChildRollupData *-- BaseOrMergeRollupPublicInputs: public_inputs -class RootRollupInputs { +class RootRollupInputs { l1_to_l2_msgs_tree: Snapshot l1_to_l2_msgs: List~Fr~ l1_to_l2_msgs_sibling_path: List~Fr~ @@ -149,8 +147,8 @@ RootRollupPublicInputs *--Header : header ```python def RootRollupCircuit( - left: ChildRollupData, - right: ChildRollupData, + left: ChildRollupData, + right: ChildRollupData, l1_to_l2_msgs: List[Fr], l1_to_l2_msgs_sibling_path: List[Fr], parent: Header, @@ -166,18 +164,18 @@ def RootRollupCircuit( assert left.inputs.height_in_block_tree == right.inputs.height_in_block_tree assert merkle_inclusion( - parent.hash(), - parent_sibling_path, - left.inputs.constants.global_variables.block_number, + parent.hash(), + parent_sibling_path, + left.inputs.constants.global_variables.block_number, left.inputs.constants.last_archive.root ) l1_to_l2_msg_subtree = MerkleTree(l1_to_l2_msgs) l1_to_l2_msg_tree = merkle_insertion( - parent.state.l1_to_l2_message_tree, - l1_to_l2_msg_subtree.root, - l1_to_l2_msgs_sibling_path, - L1_TO_L2_SUBTREE_HEIGHT, + parent.state.l1_to_l2_message_tree, + l1_to_l2_msg_subtree.root, + l1_to_l2_msgs_sibling_path, + L1_TO_L2_SUBTREE_HEIGHT, L1_To_L2_HEIGHT ) @@ -196,9 +194,9 @@ def RootRollupCircuit( archive = merkle_insertion( header.last_archive - header.hash(), - archive_sibling_path, - 0, + header.hash(), + archive_sibling_path, + 0, ARCHIVE_HEIGHT ) @@ -209,4 +207,4 @@ def RootRollupCircuit( ) ``` -The `RootRollupPublicInputs` can then be used together with `Body` to build a `ProvenBlock` which can be used to convince the [validating light node](./../contracts/index.md) of state progression. \ No newline at end of file +The `RootRollupPublicInputs` can then be used together with `Body` to build a `ProvenBlock` which can be used to convince the [validating light node](./../contracts/index.md) of state progression. diff --git a/yellow-paper/docs/state/archive.md b/yellow-paper/docs/state/archive.md index ffb84328dc36..0e9cb3cf98b8 100644 --- a/yellow-paper/docs/state/archive.md +++ b/yellow-paper/docs/state/archive.md @@ -4,7 +4,7 @@ title: Archive # Archive -The Archive is an [append-only Merkle tree](./tree_impls.md#append-only-merkle-trees) that stores the headers of all previous blocks in the chain as its leaves. +The Archive is an [append-only Merkle tree](./tree_impls.md#append-only-merkle-trees) that stores the headers of all previous blocks in the chain as its leaves. For most chains this is not required since they are always executing at the head of the chain. However, private execution relies on proofs generated by the user, and since user's don't know the current head they must base their proofs on historical state. By including all prior headers (which include commitments to the state) the Archive allows us to easily prove that the historic state that a transaction was proven upon is valid. @@ -85,4 +85,4 @@ class Archive { leaves: List~Header~ } Archive *.. "m" Header : leaves -``` \ No newline at end of file +``` diff --git a/yellow-paper/docs/state/index.md b/yellow-paper/docs/state/index.md index 51bbde05b3b5..1816b5a629a5 100644 --- a/yellow-paper/docs/state/index.md +++ b/yellow-paper/docs/state/index.md @@ -1,40 +1,37 @@ --- title: State -sidebar_position: 10 --- # State -The global state is the set of data that makes up Aztec - it is persistent and only updates when new blocks are added to the chain. +The global state is the set of data that makes up Aztec - it is persistent and only updates when new blocks are added to the chain. -The state consists of multiple different categories of data with varying requirements. What all of the categories have in common is that they need strong integrity guarantees and efficient membership proofs. Like most other blockchains, this can be enforced by structuring the data as leafs in Merkle trees. +The state consists of multiple different categories of data with varying requirements. What all of the categories have in common is that they need strong integrity guarantees and efficient membership proofs. Like most other blockchains, this can be enforced by structuring the data as leafs in Merkle trees. However, unlike most other blockchains, our contract state cannot use a Merkle tree as a key-value store for each contracts data. The reason for this is that we have both private and public state; while public state could be stored in a key-value tree, private state cannot, as doing so would leak information whenever the private state is updated, even if encrypted. - To work around this, we use a two-tree approach for state that can be used privately. Namely we have one (or more) tree(s) where data is added to (sometimes called a data tree), and a second tree where we "nullify" or mark the data as deleted. This allows us to "update" a leaf by adding a new leaf to the date trees, and add the nullifier of the old leaf to the second tree (the nullifier tree). That way we can show that the new leaf is the "active" one, and that the old leaf is "deleted". -When dealing with private data, only the hash of the data is stored in the leaf in our data tree and we must setup a derivation mechanism that ensures nullifiers can be computed deterministically from the pre-image (the data that was hashed). This way, no-one can tell what data is stored in the leaf (unless they already know it), and therefore won't be able to derive the nullifier and tell if the leaf is active or deleted. +When dealing with private data, only the hash of the data is stored in the leaf in our data tree and we must setup a derivation mechanism that ensures nullifiers can be computed deterministically from the pre-image (the data that was hashed). This way, no-one can tell what data is stored in the leaf (unless they already know it), and therefore won't be able to derive the nullifier and tell if the leaf is active or deleted. Convincing someone that a piece of data is active can then be done by proving its membership in the data tree, and that it is not deleted by proving its non-membership in the nullifier tree. This ability to efficiently prove non-membership is one of the extra requirements we have for some parts of our state. To support the requirements most efficiently, we use two families of Merkle trees: + - The [Append-only Merkle tree](./tree_impls.md#append-only-merkle-trees), which supports efficient membership proofs, - The [Indexed Merkle tree](./tree_impls.md#indexed-merkle-trees), which supports efficient membership and non-membership proofs but increases the cost of adding leafs. ### Private State Access -Whenever a user is to read or use data, they must then convince the "rollup" that the their data is active. As mentioned above, they must prove that the data is in the data tree (membership proof) and that it is still active (non-membership proof). However, there are nuances to this approach! - -One important aspect to consider is *when* state can be accessed. In most blockchains, state is always accessed at the head of the chain and changes are only made by the sequencer as new blocks are added. +Whenever a user is to read or use data, they must then convince the "rollup" that the their data is active. As mentioned above, they must prove that the data is in the data tree (membership proof) and that it is still active (non-membership proof). However, there are nuances to this approach! -However, since private execution relies on proofs generated by the user, this would be very impractical - one users transaction could invalidate everyone elses. +One important aspect to consider is _when_ state can be accessed. In most blockchains, state is always accessed at the head of the chain and changes are only made by the sequencer as new blocks are added. +However, since private execution relies on proofs generated by the user, this would be very impractical - one users transaction could invalidate everyone elses. -While proving inclusion in the data tree can be done using historical state, the non-membership proof in the nullifier tree cannot. +While proving inclusion in the data tree can be done using historical state, the non-membership proof in the nullifier tree cannot. -Membership can be proven using historical state because we are using an append-only tree, so anything that was there in the past must still be in the append-only tree now. - -However, this doesn't work for the non-membership proof, as it can only prove that the data was active at the time the proof was generated, not that it is still active today! This would allow a user to create multiple transactions spending the same data and then send those transactions all at once, creating a double spend. +Membership can be proven using historical state because we are using an append-only tree, so anything that was there in the past must still be in the append-only tree now. +However, this doesn't work for the non-membership proof, as it can only prove that the data was active at the time the proof was generated, not that it is still active today! This would allow a user to create multiple transactions spending the same data and then send those transactions all at once, creating a double spend. To solve this, we need to perform the non-membership proofs at the head of the chain, which only the sequencer knows! This means that instead of the user proving that the nullifier of the data is not in the nullifier tree, they provide the nullifier as part of their transaction, and the sequencer then proves non-membership **AND** inserts it into the nullifier tree. This way, if multiple transactions include the same nullifier, only one of them will be included in the block as the others will fail the non-membership proof. @@ -47,6 +44,7 @@ A side-effect of this also means that if multiple users are "sharing" their note ## State Categories Below is a short description of the state catagories (trees) and why they have the type they have. + - [**Note Hashes**](./note_hash_tree.md): A set of hashes (commitments) of the individual blobs of contract data (we call these blobs of data notes). New notes can be created and their hashes inserted through contract execution. We need to support efficient membership proofs as any read will require one to prove validity. The set is represented as an [Append-only Merkle tree](./tree_impls.md#append-only-merkle-trees), storing the note hashes as leafs. - [**Nullifiers**](./nullifier_tree.md): A set of nullifiers for notes that have been spent. We need to support efficient non-membership proofs since we need to check that a note has not been spent before it can be used. The set is represented as an [Indexed Merkle tree](./tree_impls.md#indexed-merkle-trees). - [**Public Data**](./public_data_tree.md): The key-value store for public contract state. We need to support both efficient membership and non-membership proofs! We require both, since the tree is "empty" from the start. Meaning that if the key is not already stored (non-membership), we need to insert it, and if it is already stored (membership) we need to just update the value. @@ -148,7 +146,7 @@ class ContractTree { type: AppendOnlyMerkleTree leaves: List~NewContractData~ } -ContractTree *.. "m" NewContractData : leaves +ContractTree *.. "m" NewContractData : leaves class PublicDataPreimage { key: Fr @@ -161,7 +159,7 @@ class PublicDataTree { type: SuccessorMerkleTree leaves: List~PublicDataPreimage~ } -PublicDataTree *.. "m" PublicDataPreimage : leaves +PublicDataTree *.. "m" PublicDataPreimage : leaves class L1ToL2MessageTree { type: AppendOnlyMerkleTree @@ -180,7 +178,7 @@ class NullifierTree { } NullifierTree *.. "m" NullifierPreimage : leaves -class State { +class State { archive: Archive note_hash_tree: NoteHashTree nullifier_tree: NullifierTree @@ -196,13 +194,12 @@ State *-- PublicDataTree : public_data_tree State *-- ContractTree : contract_tree ``` - import DocCardList from '@theme/DocCardList'; --- -:::warning **Discussion Point**: +:::warning **Discussion Point**: "Indexed merkle tree" is not a very telling name, as our normal merkle trees are indexed too. I propose we call them "successor merkle trees" instead since each leaf refers to its successor. The low-nullifiers are also the predecessor of the nullifier you are inserting, so it seems nice that you prove that the nullifier you are inserting has a predecessor and that the predecessors successor would also be the successor of the nullifier you are inserting. -::: \ No newline at end of file +::: diff --git a/yellow-paper/docs/state/note_hash_tree.md b/yellow-paper/docs/state/note-hash-tree.md similarity index 100% rename from yellow-paper/docs/state/note_hash_tree.md rename to yellow-paper/docs/state/note-hash-tree.md diff --git a/yellow-paper/docs/state/nullifier_tree.md b/yellow-paper/docs/state/nullifier-tree.md similarity index 100% rename from yellow-paper/docs/state/nullifier_tree.md rename to yellow-paper/docs/state/nullifier-tree.md diff --git a/yellow-paper/docs/state/public_data_tree.md b/yellow-paper/docs/state/public-data-tree.md similarity index 100% rename from yellow-paper/docs/state/public_data_tree.md rename to yellow-paper/docs/state/public-data-tree.md diff --git a/yellow-paper/docs/state/tree_impls.md b/yellow-paper/docs/state/tree-implementations.md similarity index 99% rename from yellow-paper/docs/state/tree_impls.md rename to yellow-paper/docs/state/tree-implementations.md index ce0b7cd91acd..0a698eb2de4d 100644 --- a/yellow-paper/docs/state/tree_impls.md +++ b/yellow-paper/docs/state/tree-implementations.md @@ -1,7 +1,3 @@ ---- -sidebar_position: 1 ---- - # Tree implementations Aztec relies on two Merkle tree implementations in the protocol: append-only and indexed Merkle trees. diff --git a/yellow-paper/docs/transactions/index.md b/yellow-paper/docs/transactions/index.md index d9d7bbb3e46a..732cfea62c58 100644 --- a/yellow-paper/docs/transactions/index.md +++ b/yellow-paper/docs/transactions/index.md @@ -4,7 +4,7 @@ title: Transactions # Transactions -A transaction is the minimal action that changes the state of the network. Transactions in Aztec have a private and a public component, where the former is executed in the user's private execution environment (PXE) and the latter by the sequencer. +A transaction is the minimal action that changes the state of the network. Transactions in Aztec have a private and a public component, where the former is executed in the user's private execution environment (PXE) and the latter by the sequencer. A transaction is also split into three phases to [support authorization abstraction and fee payments](../gas-and-fees/gas-and-fees.md#fees): a validation and fee preparation phase, a main execution phase, and fee distribution phase. diff --git a/yellow-paper/docs/transactions/local-execution.md b/yellow-paper/docs/transactions/local-execution.md index 154655706d54..bca926136da8 100644 --- a/yellow-paper/docs/transactions/local-execution.md +++ b/yellow-paper/docs/transactions/local-execution.md @@ -6,6 +6,7 @@ Transactions are initiated via a _transaction execution request_ sent from the u A transaction execution request has the following structure. Note that, since Aztec uses full native account abstraction where every account is backed by a contract, a transaction execution request only needs to provide the contract address, function, and arguments of the initial call; nonces and signatures are arguments to the call, and thus opaque to the protocol. + | Field | Type | Description | |----------|----------|----------| | origin | AztecAddress | Address of the contract where the transaction is initiated. | @@ -25,7 +26,6 @@ In terms of circuitry, the simulation step must execute all application circuits ## Proving step -The proving step is similar to the simulation step, though witnesses are generated for all circuits and proven. Note that it is not necessary to execute the simulation step before the proving step, though it is desirable in order to provide the user with info on their transaction and catch any failed assertions early. +The proving step is similar to the simulation step, though witnesses are generated for all circuits and proven. Note that it is not necessary to execute the simulation step before the proving step, though it is desirable in order to provide the user with info on their transaction and catch any failed assertions early. The output of the proving step is a [_transaction_](./tx-object.md) object with a valid _proof_ associated, ready to be broadcasted to the network. - diff --git a/yellow-paper/docs/transactions/public-execution.md b/yellow-paper/docs/transactions/public-execution.md index e281acad277d..534f47f8b997 100644 --- a/yellow-paper/docs/transactions/public-execution.md +++ b/yellow-paper/docs/transactions/public-execution.md @@ -1,6 +1,6 @@ # Public execution -Transactions have a _public execution_ component. Once a transaction is picked up by a sequencer to be included in a block, the sequencer is responsible for executing all enqueued public function calls in the transaction. These are defined by the `data.accumulatedData.publicCallStack` field of the [transaction object](./tx-object.md), which are commitments to the preimages of the `enqueuedPublicFunctionCalls` in the transaction. The sequencer pops function calls from the stack, and pushes new ones as needed, until the public call stack is empty. +Transactions have a _public execution_ component. Once a transaction is picked up by a sequencer to be included in a block, the sequencer is responsible for executing all enqueued public function calls in the transaction. These are defined by the `data.accumulatedData.publicCallStack` field of the [transaction object](./tx-object.md), which are commitments to the preimages of the `enqueuedPublicFunctionCalls` in the transaction. The sequencer pops function calls from the stack, and pushes new ones as needed, until the public call stack is empty. ## Bytecode @@ -13,4 +13,3 @@ Since public execution is run by the sequencer, it is run on the state of the ch ## Reverts Note that, unlike local private execution, public execution can _revert_ due to a failed assertion, running out of gas, trying to call a non-existing function, or other failures. If this happens, the sequencer halts execution and discards all side effects from the [transaction payload phase](../gas-and-fees/gas-and-fees.md#transaction-payload). The transaction is still included in the block and pays fees, but is flagged as reverted. - diff --git a/yellow-paper/docusaurus.config.js b/yellow-paper/docusaurus.config.js index e8ca3ea04887..522eac56374d 100644 --- a/yellow-paper/docusaurus.config.js +++ b/yellow-paper/docusaurus.config.js @@ -95,7 +95,7 @@ const config = { items: [ { type: "docSidebar", - sidebarId: "tutorialSidebar", + sidebarId: "yellowPaperSidebar", position: "left", label: "Protocol Description", }, diff --git a/yellow-paper/sidebars.js b/yellow-paper/sidebars.js index 9ab54c2459c4..1e97e37b58bd 100644 --- a/yellow-paper/sidebars.js +++ b/yellow-paper/sidebars.js @@ -14,20 +14,95 @@ /** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ const sidebars = { // By default, Docusaurus generates a sidebar from the docs folder structure - tutorialSidebar: [{type: 'autogenerated', dirName: '.'}], + // tutorialSidebar: [{type: 'autogenerated', dirName: '.'}], // But you can create a sidebar manually - /* - tutorialSidebar: [ - 'intro', - 'hello', + + yellowPaperSidebar: [ + "intro", { - type: 'category', - label: 'Tutorial', - items: ['tutorial-basics/create-a-document'], + "Addresses & keys": [ + "addresses-and-keys/index", + "addresses-and-keys/specification", + "addresses-and-keys/precompiles", + "addresses-and-keys/diversified-and-stealth", + ], + State: [ + "state/index", + "state/tree-implementations", + "state/archive", + "state/note-hash-tree", + "state/nullifier-tree", + "state/public-data-tree", + ], + Transactions: [ + "transactions/index", + "transactions/local-execution", + "transactions/public-execution", + "transactions/tx-object", + "transactions/validity", + ], + Bytecode: ["bytecode/index"], + Calls: [ + "calls/index", + "calls/sync-calls", + "calls/enqueued-calls", + "calls/batched-calls", + "calls/static-calls", + "calls/delegate-calls", + "calls/unconstrained-calls", + "calls/public-private-messaging", + ], + "Cross-chain communication": [ + "cross-chain-communication/index", + "cross-chain-communication/da", + ], + Logs: ["logs/index"], + "Private Message Delivery": [ + "private-message-delivery/index", + "private-message-delivery/private-msg-delivery", // renamed to avoid routing problems + "private-message-delivery/note-discovery", + "private-message-delivery/encryption-and-decryption", + "private-message-delivery/registry", + "private-message-delivery/send-note-guidelines", + ], + "Gas & Fees": ["gas-and-fees/gas-and-fees"], + Decentralization: [ + "decentralization/governance", + "decentralization/block-production", + "decentralization/p2p-network", + ], + Cryptography: [ + "cryptography/performance-targets", + "cryptography/protocol-overview", + ], + // Protocol Statements? + "Kernel Circuits": [ + "circuits/private-function", + "circuits/private-kernel-initial", + "circuits/private-kernel-inner", + "circuits/private-kernel-reset", + "circuits/private-kernel-tail", + "circuits/public-kernel-iterative", + "circuits/public-kernel-tail", + ], + "Rollup Circuits": [ + "rollup-circuits/index", + "rollup-circuits/base-rollup", + "rollup-circuits/merge-rollup", + "rollup-circuits/root-rollup", + ], + "Public VM": [ + "public-vm/avm", + "public-vm/alu", + "public-vm/bytecode-validation-circuit", + "public-vm/control-flow", + "public-vm/InstructionSet", // TODO: change name's case + "public-vm/security", + "public-vm/state-model", + ], }, ], - */ }; module.exports = sidebars;