diff --git a/.circleci/config.yml b/.circleci/config.yml index 41989139df1..ad4b657f7e2 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -955,9 +955,20 @@ jobs: name: "Benchmark" command: cond_run_script end-to-end ./scripts/run_tests_local benchmarks/bench_publish_rollup.test.ts environment: - { - DEBUG: "aztec:benchmarks:*,aztec:sequencer,aztec:world_state,aztec:merkle_trees", - } + DEBUG: "aztec:benchmarks:*,aztec:sequencer,aztec:sequencer:*,aztec:world_state,aztec:merkle_trees" + + bench-process-history: + machine: + image: ubuntu-2204:2023.07.2 + resource_class: large + steps: + - *checkout + - *setup_env + - run: + name: "Benchmark" + command: cond_run_script end-to-end ./scripts/run_tests_local benchmarks/bench_process_history.test.ts + environment: + DEBUG: "aztec:benchmarks:*,aztec:sequencer,aztec:sequencer:*,aztec:world_state,aztec:merkle_trees" build-docs: machine: @@ -1331,6 +1342,7 @@ workflows: - guides-sample-dapp: *e2e_test - guides-up-quick-start: *e2e_test - bench-publish-rollup: *e2e_test + - bench-process-history: *e2e_test - e2e-end: requires: @@ -1368,12 +1380,13 @@ workflows: - guides-dapp-testing - guides-sample-dapp - guides-up-quick-start - - bench-publish-rollup <<: *defaults - bench-summary: requires: - e2e-end + - bench-publish-rollup + - bench-process-history <<: *defaults # Deploy under canary tag @@ -1392,6 +1405,7 @@ workflows: - deploy-dockerhub-canary <<: *deploy_defaults + # Run canary tests - canary-uniswap-test: requires: - build-deployment-canary @@ -1413,7 +1427,7 @@ workflows: - canary-browser-test - canary-cli-test - # Deployment and Canary tests + # Production deployment - deploy-dockerhub: requires: - canary-end diff --git a/.release-please-manifest.json b/.release-please-manifest.json index b5d7e794dd6..397224dc4bd 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,5 +1,5 @@ { - ".": "0.8.8", - "barretenberg": "0.8.8", - "barretenberg/ts": "0.8.8" + ".": "0.8.9", + "barretenberg": "0.8.9", + "barretenberg/ts": "0.8.9" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 6902b4f7202..746f51d5aa2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,22 @@ # Changelog +## [0.8.9](https://github.com/AztecProtocol/aztec-packages/compare/aztec-packages-v0.8.8...aztec-packages-v0.8.9) (2023-10-10) + + +### Features + +* Auto-recompile the boxes and fix broken frontend CompleteAddress import ([#2727](https://github.com/AztecProtocol/aztec-packages/issues/2727)) ([4ec4ea0](https://github.com/AztecProtocol/aztec-packages/commit/4ec4ea061e2d003da905d6c2026608b41cdca044)) + + +### Bug Fixes + +* Default export in noir-version ([#2757](https://github.com/AztecProtocol/aztec-packages/issues/2757)) ([6ff7bed](https://github.com/AztecProtocol/aztec-packages/commit/6ff7bed1722f8e7afa4b4c495216ca20ea47f42a)) + + +### Documentation + +* Add preview image ([#2759](https://github.com/AztecProtocol/aztec-packages/issues/2759)) ([45597af](https://github.com/AztecProtocol/aztec-packages/commit/45597af2a75ffeb8ecd91028f30f159910821673)) + ## [0.8.8](https://github.com/AztecProtocol/aztec-packages/compare/aztec-packages-v0.8.7...aztec-packages-v0.8.8) (2023-10-09) diff --git a/VERSION b/VERSION index b6ea8ae7434..c222e965f7b 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -v0.8.8 x-release-please-version +v0.8.9 x-release-please-version diff --git a/barretenberg/.gitrepo b/barretenberg/.gitrepo index c574e1d887c..11d22b9c22e 100644 --- a/barretenberg/.gitrepo +++ b/barretenberg/.gitrepo @@ -6,7 +6,7 @@ [subrepo] remote = https://github.com/AztecProtocol/barretenberg branch = master - commit = 9de42a37415f8a3f1bd7de3ec460b1b299dd15fa - parent = 4b3f1aa8957af96389ed0e85aba6a99baa21e9ab + commit = 3fc459b3d2748f43a9b432528c7bdd84c8e515c9 + parent = c1cd7ea7e8faed4ce491cad52777afbbe7bae187 method = merge cmdver = 0.4.6 diff --git a/barretenberg/CHANGELOG.md b/barretenberg/CHANGELOG.md index 06625210056..74d07800581 100644 --- a/barretenberg/CHANGELOG.md +++ b/barretenberg/CHANGELOG.md @@ -1,5 +1,12 @@ # Changelog +## [0.8.9](https://github.com/AztecProtocol/aztec-packages/compare/barretenberg-v0.8.8...barretenberg-v0.8.9) (2023-10-10) + + +### Miscellaneous + +* **barretenberg:** Synchronize aztec-packages versions + ## [0.8.8](https://github.com/AztecProtocol/aztec-packages/compare/barretenberg-v0.8.7...barretenberg-v0.8.8) (2023-10-09) diff --git a/barretenberg/VERSION b/barretenberg/VERSION index b6ea8ae7434..c222e965f7b 100644 --- a/barretenberg/VERSION +++ b/barretenberg/VERSION @@ -1 +1 @@ -v0.8.8 x-release-please-version +v0.8.9 x-release-please-version diff --git a/barretenberg/cpp/.clangd b/barretenberg/cpp/.clangd index 06f5d0d0590..e09234d9e7a 100644 --- a/barretenberg/cpp/.clangd +++ b/barretenberg/cpp/.clangd @@ -1,4 +1,4 @@ -CompileFlags: # Tweak the parse settings +CompileFlags: # Tweak the parse settings Remove: -fconstexpr-ops-limit=* --- # Applies all barretenberg source files @@ -42,7 +42,7 @@ Diagnostics: - misc-non-private-member-variables-in-classes - cppcoreguidelines-non-private-member-variables-in-classes # We have many `for` loops that violate this part of the bounds safety profile - - cppcoreguidelines-pro-bounds-constant-array-index + - cppcoreguidelines-pro-bounds-constant-array-index # Large diff; we often `use` an entire namespace. - google-build-using-namespace # Large diff @@ -59,6 +59,8 @@ Diagnostics: - readability-function-cognitive-complexity # It is often nicer to not be explicit - google-explicit-constructor + # Not honouring. + - cppcoreguidelines-owning-memory --- # this divider is necessary # Disable some checks for Google Test/Bench @@ -69,5 +71,4 @@ Diagnostics: # these checks get triggered by the Google macros Remove: - cppcoreguidelines-avoid-non-const-global-variables - - cppcoreguidelines-owning-memory - - cppcoreguidelines-special-member-functions \ No newline at end of file + - cppcoreguidelines-special-member-functions diff --git a/barretenberg/cpp/CMakeLists.txt b/barretenberg/cpp/CMakeLists.txt index 41ee434b54b..61a34ed61d2 100644 --- a/barretenberg/cpp/CMakeLists.txt +++ b/barretenberg/cpp/CMakeLists.txt @@ -6,7 +6,7 @@ cmake_minimum_required(VERSION 3.24) project( Barretenberg DESCRIPTION "BN254 elliptic curve library, and PLONK SNARK prover" - VERSION 0.8.8 # x-release-please-version + VERSION 0.8.9 # x-release-please-version LANGUAGES CXX C ) # Insert version into `bb` config file diff --git a/barretenberg/cpp/CMakePresets.json b/barretenberg/cpp/CMakePresets.json index 0b43346e94a..6bafa8fed7a 100644 --- a/barretenberg/cpp/CMakePresets.json +++ b/barretenberg/cpp/CMakePresets.json @@ -182,6 +182,35 @@ "cacheVariables": { "MULTITHREADING": "ON" } + }, + { + "name": "xray-1thread", + "displayName": "Build with single-threaded XRay Profiling", + "description": "Build with Clang and enable single-threaded LLVM XRay for profiling", + "generator": "Unix Makefiles", + "inherits": "clang16", + "environment": { + "CFLAGS": "-fxray-instrument -fxray-instruction-threshold=10", + "CXXFLAGS": "-fxray-instrument -fxray-instruction-threshold=10", + "LDFLAGS": "-fxray-instrument -fxray-instruction-threshold=10" + }, + "cacheVariables": { + "MULTITHREADING": "OFF" + }, + "binaryDir": "build-xray-1thread" + }, + { + "name": "xray", + "displayName": "Build with multi-threaded XRay Profiling", + "description": "Build with Clang and enable multi-threaded LLVM XRay for profiling", + "generator": "Unix Makefiles", + "inherits": "clang16", + "environment": { + "CFLAGS": "-fxray-instrument -fxray-instruction-threshold=10", + "CXXFLAGS": "-fxray-instrument -fxray-instruction-threshold=10", + "LDFLAGS": "-fxray-instrument -fxray-instruction-threshold=10" + }, + "binaryDir": "build-xray" } ], "buildPresets": [ @@ -273,6 +302,16 @@ "inheritConfigureEnvironment": true, "jobs": 0, "targets": ["barretenberg.wasm"] + }, + { + "name": "xray-1thread", + "configurePreset": "xray-1thread", + "inherits": "default" + }, + { + "name": "xray", + "configurePreset": "xray", + "inherits": "default" } ], "testPresets": [ diff --git a/barretenberg/cpp/scripts/collect_profile_information.sh b/barretenberg/cpp/scripts/collect_profile_information.sh new file mode 100755 index 00000000000..62757181ac3 --- /dev/null +++ b/barretenberg/cpp/scripts/collect_profile_information.sh @@ -0,0 +1,45 @@ +#!/bin/bash +set -eu + +PRESET=${1:-xray-1thread} # can also be 'xray' +ONLY_PROCESS=${2:-} + +# Move above script dir. +cd $(dirname $0)/.. + +# Configure and build with xray preset. +cmake --preset $PRESET +cmake --build --preset $PRESET + +cd build-$PRESET + +if [ -z "$ONLY_PROCESS" ]; then + # Clear old profile data. + rm -f xray-log.honk_bench_main_simple.* + + # Run benchmark with profiling. + XRAY_OPTIONS="patch_premain=true xray_mode=xray-basic verbosity=1" ./bin/honk_bench_main_simple +fi + +function shorten_cpp_names() { + NO_TEMP='s/<[^<>;]+>//g;' + sed -E '# Multiple rounds of template removal (crude but simple). + '"$NO_TEMP $NO_TEMP $NO_TEMP $NO_TEMP $NO_TEMP $NO_TEMP"' + # Remove problematic trailing const. + s/ const;/;/g; + # Parameter removal. + s/\([^();]*\)/()/g; + # Return value removal. + s/;[^; ]+ /;/g; + # Remove namespaces. + s/[a-zA-Z_][a-zA-Z0-9_]*:://g; + ' +} + +# Process benchmark file. +llvm-xray-16 stack xray-log.honk_bench_main_simple.* \ + --instr_map=./bin/honk_bench_main_simple --stack-format=flame --aggregate-threads --aggregation-type=time --all-stacks \ + | node ../scripts/llvm_xray_stack_flame_corrector.js \ + | shorten_cpp_names \ + | ../scripts/flamegraph.pl > xray.svg +echo "Profiling complete, now you can do e.g. 'scp mainframe:`readlink -f xray.svg` .' on a local terminal and open the SVG in a browser." diff --git a/barretenberg/cpp/scripts/flamegraph.pl b/barretenberg/cpp/scripts/flamegraph.pl new file mode 100755 index 00000000000..d2172b61664 --- /dev/null +++ b/barretenberg/cpp/scripts/flamegraph.pl @@ -0,0 +1,1252 @@ +#!/usr/bin/perl -w +# +# flamegraph.pl flame stack grapher. +# +# This takes stack samples and renders a call graph, allowing hot functions +# and codepaths to be quickly identified. Stack samples can be generated using +# tools such as DTrace, perf, SystemTap, and Instruments. +# +# USAGE: ./flamegraph.pl [options] input.txt > graph.svg +# +# grep funcA input.txt | ./flamegraph.pl [options] > graph.svg +# +# Then open the resulting .svg in a web browser, for interactivity: mouse-over +# frames for info, click to zoom, and ctrl-F to search. +# +# Options are listed in the usage message (--help). +# +# The input is stack frames and sample counts formatted as single lines. Each +# frame in the stack is semicolon separated, with a space and count at the end +# of the line. These can be generated for Linux perf script output using +# stackcollapse-perf.pl, for DTrace using stackcollapse.pl, and for other tools +# using the other stackcollapse programs. Example input: +# +# swapper;start_kernel;rest_init;cpu_idle;default_idle;native_safe_halt 1 +# +# An optional extra column of counts can be provided to generate a differential +# flame graph of the counts, colored red for more, and blue for less. This +# can be useful when using flame graphs for non-regression testing. +# See the header comment in the difffolded.pl program for instructions. +# +# The input functions can optionally have annotations at the end of each +# function name, following a precedent by some tools (Linux perf's _[k]): +# _[k] for kernel +# _[i] for inlined +# _[j] for jit +# _[w] for waker +# Some of the stackcollapse programs support adding these annotations, eg, +# stackcollapse-perf.pl --kernel --jit. They are used merely for colors by +# some palettes, eg, flamegraph.pl --color=java. +# +# The output flame graph shows relative presence of functions in stack samples. +# The ordering on the x-axis has no meaning; since the data is samples, time +# order of events is not known. The order used sorts function names +# alphabetically. +# +# While intended to process stack samples, this can also process stack traces. +# For example, tracing stacks for memory allocation, or resource usage. You +# can use --title to set the title to reflect the content, and --countname +# to change "samples" to "bytes" etc. +# +# There are a few different palettes, selectable using --color. By default, +# the colors are selected at random (except for differentials). Functions +# called "-" will be printed gray, which can be used for stack separators (eg, +# between user and kernel stacks). +# +# HISTORY +# +# This was inspired by Neelakanth Nadgir's excellent function_call_graph.rb +# program, which visualized function entry and return trace events. As Neel +# wrote: "The output displayed is inspired by Roch's CallStackAnalyzer which +# was in turn inspired by the work on vftrace by Jan Boerhout". See: +# https://blogs.oracle.com/realneel/entry/visualizing_callstacks_via_dtrace_and +# +# Copyright 2016 Netflix, Inc. +# Copyright 2011 Joyent, Inc. All rights reserved. +# Copyright 2011 Brendan Gregg. All rights reserved. +# +# CDDL HEADER START +# +# The contents of this file are subject to the terms of the +# Common Development and Distribution License (the "License"). +# You may not use this file except in compliance with the License. +# +# You can obtain a copy of the license at docs/cddl1.txt or +# http://opensource.org/licenses/CDDL-1.0. +# See the License for the specific language governing permissions +# and limitations under the License. +# +# When distributing Covered Code, include this CDDL HEADER in each +# file and include the License file at docs/cddl1.txt. +# If applicable, add the following below this CDDL HEADER, with the +# fields enclosed by brackets "[]" replaced with your own identifying +# information: Portions Copyright [yyyy] [name of copyright owner] +# +# CDDL HEADER END +# +# 11-Oct-2014 Adrien Mahieux Added zoom. +# 21-Nov-2013 Shawn Sterling Added consistent palette file option +# 17-Mar-2013 Tim Bunce Added options and more tunables. +# 15-Dec-2011 Dave Pacheco Support for frames with whitespace. +# 10-Sep-2011 Brendan Gregg Created this. + +use strict; + +use Getopt::Long; + +use open qw(:std :utf8); + +# tunables +my $encoding; +my $fonttype = "Verdana"; +my $imagewidth = 1200; # max width, pixels +my $frameheight = 16; # max height is dynamic +my $fontsize = 12; # base text size +my $fontwidth = 0.59; # avg width relative to fontsize +my $minwidth = 0.1; # min function width, pixels +my $nametype = "Function:"; # what are the names in the data? +my $countname = "samples"; # what are the counts in the data? +my $colors = "hot"; # color theme +my $bgcolors = ""; # background color theme +my $nameattrfile; # file holding function attributes +my $timemax; # (override the) sum of the counts +my $factor = 1; # factor to scale counts by +my $hash = 0; # color by function name +my $palette = 0; # if we use consistent palettes (default off) +my %palette_map; # palette map hash +my $pal_file = "palette.map"; # palette map file name +my $stackreverse = 0; # reverse stack order, switching merge end +my $inverted = 0; # icicle graph +my $flamechart = 0; # produce a flame chart (sort by time, do not merge stacks) +my $negate = 0; # switch differential hues +my $titletext = ""; # centered heading +my $titledefault = "Flame Graph"; # overwritten by --title +my $titleinverted = "Icicle Graph"; # " " +my $searchcolor = "rgb(230,0,230)"; # color for search highlighting +my $notestext = ""; # embedded notes in SVG +my $subtitletext = ""; # second level title (optional) +my $help = 0; + +sub usage { + die < outfile.svg\n + --title TEXT # change title text + --subtitle TEXT # second level title (optional) + --width NUM # width of image (default 1200) + --height NUM # height of each frame (default 16) + --minwidth NUM # omit smaller functions (default 0.1 pixels) + --fonttype FONT # font type (default "Verdana") + --fontsize NUM # font size (default 12) + --countname TEXT # count type label (default "samples") + --nametype TEXT # name type label (default "Function:") + --colors PALETTE # set color palette. choices are: hot (default), mem, + # io, wakeup, chain, java, js, perl, red, green, blue, + # aqua, yellow, purple, orange + --bgcolors COLOR # set background colors. gradient choices are yellow + # (default), blue, green, grey; flat colors use "#rrggbb" + --hash # colors are keyed by function name hash + --cp # use consistent palette (palette.map) + --reverse # generate stack-reversed flame graph + --inverted # icicle graph + --flamechart # produce a flame chart (sort by time, do not merge stacks) + --negate # switch differential hues (blue<->red) + --notes TEXT # add notes comment in SVG (for debugging) + --help # this message + + eg, + $0 --title="Flame Graph: malloc()" trace.txt > graph.svg +USAGE_END +} + +GetOptions( + 'fonttype=s' => \$fonttype, + 'width=i' => \$imagewidth, + 'height=i' => \$frameheight, + 'encoding=s' => \$encoding, + 'fontsize=f' => \$fontsize, + 'fontwidth=f' => \$fontwidth, + 'minwidth=f' => \$minwidth, + 'title=s' => \$titletext, + 'subtitle=s' => \$subtitletext, + 'nametype=s' => \$nametype, + 'countname=s' => \$countname, + 'nameattr=s' => \$nameattrfile, + 'total=s' => \$timemax, + 'factor=f' => \$factor, + 'colors=s' => \$colors, + 'bgcolors=s' => \$bgcolors, + 'hash' => \$hash, + 'cp' => \$palette, + 'reverse' => \$stackreverse, + 'inverted' => \$inverted, + 'flamechart' => \$flamechart, + 'negate' => \$negate, + 'notes=s' => \$notestext, + 'help' => \$help, +) or usage(); +$help && usage(); + +# internals +my $ypad1 = $fontsize * 3; # pad top, include title +my $ypad2 = $fontsize * 2 + 10; # pad bottom, include labels +my $ypad3 = $fontsize * 2; # pad top, include subtitle (optional) +my $xpad = 10; # pad lefm and right +my $framepad = 1; # vertical padding for frames +my $depthmax = 0; +my %Events; +my %nameattr; + +if ($flamechart && $titletext eq "") { + $titletext = "Flame Chart"; +} + +if ($titletext eq "") { + unless ($inverted) { + $titletext = $titledefault; + } else { + $titletext = $titleinverted; + } +} + +if ($nameattrfile) { + # The name-attribute file format is a function name followed by a tab then + # a sequence of tab separated name=value pairs. + open my $attrfh, $nameattrfile or die "Can't read $nameattrfile: $!\n"; + while (<$attrfh>) { + chomp; + my ($funcname, $attrstr) = split /\t/, $_, 2; + die "Invalid format in $nameattrfile" unless defined $attrstr; + $nameattr{$funcname} = { map { split /=/, $_, 2 } split /\t/, $attrstr }; + } +} + +if ($notestext =~ /[<>]/) { + die "Notes string can't contain < or >" +} + +# background colors: +# - yellow gradient: default (hot, java, js, perl) +# - green gradient: mem +# - blue gradient: io, wakeup, chain +# - gray gradient: flat colors (red, green, blue, ...) +if ($bgcolors eq "") { + # choose a default + if ($colors eq "mem") { + $bgcolors = "green"; + } elsif ($colors =~ /^(io|wakeup|chain)$/) { + $bgcolors = "blue"; + } elsif ($colors =~ /^(red|green|blue|aqua|yellow|purple|orange)$/) { + $bgcolors = "grey"; + } else { + $bgcolors = "yellow"; + } +} +my ($bgcolor1, $bgcolor2); +if ($bgcolors eq "yellow") { + $bgcolor1 = "#eeeeee"; # background color gradient start + $bgcolor2 = "#eeeeb0"; # background color gradient stop +} elsif ($bgcolors eq "blue") { + $bgcolor1 = "#eeeeee"; $bgcolor2 = "#e0e0ff"; +} elsif ($bgcolors eq "green") { + $bgcolor1 = "#eef2ee"; $bgcolor2 = "#e0ffe0"; +} elsif ($bgcolors eq "grey") { + $bgcolor1 = "#f8f8f8"; $bgcolor2 = "#e8e8e8"; +} elsif ($bgcolors =~ /^#......$/) { + $bgcolor1 = $bgcolor2 = $bgcolors; +} else { + die "Unrecognized bgcolor option \"$bgcolors\"" +} + +# SVG functions +{ package SVG; + sub new { + my $class = shift; + my $self = {}; + bless ($self, $class); + return $self; + } + + sub header { + my ($self, $w, $h) = @_; + my $enc_attr = ''; + if (defined $encoding) { + $enc_attr = qq{ encoding="$encoding"}; + } + $self->{svg} .= < + + + + +SVG + } + + sub include { + my ($self, $content) = @_; + $self->{svg} .= $content; + } + + sub colorAllocate { + my ($self, $r, $g, $b) = @_; + return "rgb($r,$g,$b)"; + } + + sub group_start { + my ($self, $attr) = @_; + + my @g_attr = map { + exists $attr->{$_} ? sprintf(qq/$_="%s"/, $attr->{$_}) : () + } qw(id class); + push @g_attr, $attr->{g_extra} if $attr->{g_extra}; + if ($attr->{href}) { + my @a_attr; + push @a_attr, sprintf qq/xlink:href="%s"/, $attr->{href} if $attr->{href}; + # default target=_top else links will open within SVG + push @a_attr, sprintf qq/target="%s"/, $attr->{target} || "_top"; + push @a_attr, $attr->{a_extra} if $attr->{a_extra}; + $self->{svg} .= sprintf qq/\n/, join(' ', (@a_attr, @g_attr)); + } else { + $self->{svg} .= sprintf qq/\n/, join(' ', @g_attr); + } + + $self->{svg} .= sprintf qq/%s<\/title>/, $attr->{title} + if $attr->{title}; # should be first element within g container + } + + sub group_end { + my ($self, $attr) = @_; + $self->{svg} .= $attr->{href} ? qq/<\/a>\n/ : qq/<\/g>\n/; + } + + sub filledRectangle { + my ($self, $x1, $y1, $x2, $y2, $fill, $extra) = @_; + $x1 = sprintf "%0.1f", $x1; + $x2 = sprintf "%0.1f", $x2; + my $w = sprintf "%0.1f", $x2 - $x1; + my $h = sprintf "%0.1f", $y2 - $y1; + $extra = defined $extra ? $extra : ""; + $self->{svg} .= qq/\n/; + } + + sub stringTTF { + my ($self, $id, $x, $y, $str, $extra) = @_; + $x = sprintf "%0.2f", $x; + $id = defined $id ? qq/id="$id"/ : ""; + $extra ||= ""; + $self->{svg} .= qq/$str<\/text>\n/; + } + + sub svg { + my $self = shift; + return "$self->{svg}\n"; + } + 1; +} + +sub namehash { + # Generate a vector hash for the name string, weighting early over + # later characters. We want to pick the same colors for function + # names across different flame graphs. + my $name = shift; + my $vector = 0; + my $weight = 1; + my $max = 1; + my $mod = 10; + # if module name present, trunc to 1st char + $name =~ s/.(.*?)`//; + foreach my $c (split //, $name) { + my $i = (ord $c) % $mod; + $vector += ($i / ($mod++ - 1)) * $weight; + $max += 1 * $weight; + $weight *= 0.70; + last if $mod > 12; + } + return (1 - $vector / $max) +} + +sub color { + my ($type, $hash, $name) = @_; + my ($v1, $v2, $v3); + + if ($hash) { + $v1 = namehash($name); + $v2 = $v3 = namehash(scalar reverse $name); + } else { + $v1 = rand(1); + $v2 = rand(1); + $v3 = rand(1); + } + + # theme palettes + if (defined $type and $type eq "hot") { + my $r = 205 + int(50 * $v3); + my $g = 0 + int(230 * $v1); + my $b = 0 + int(55 * $v2); + return "rgb($r,$g,$b)"; + } + if (defined $type and $type eq "mem") { + my $r = 0; + my $g = 190 + int(50 * $v2); + my $b = 0 + int(210 * $v1); + return "rgb($r,$g,$b)"; + } + if (defined $type and $type eq "io") { + my $r = 80 + int(60 * $v1); + my $g = $r; + my $b = 190 + int(55 * $v2); + return "rgb($r,$g,$b)"; + } + + # multi palettes + if (defined $type and $type eq "java") { + # Handle both annotations (_[j], _[i], ...; which are + # accurate), as well as input that lacks any annotations, as + # best as possible. Without annotations, we get a little hacky + # and match on java|org|com, etc. + if ($name =~ m:_\[j\]$:) { # jit annotation + $type = "green"; + } elsif ($name =~ m:_\[i\]$:) { # inline annotation + $type = "aqua"; + } elsif ($name =~ m:^L?(java|javax|jdk|net|org|com|io|sun)/:) { # Java + $type = "green"; + } elsif ($name =~ /:::/) { # Java, typical perf-map-agent method separator + $type = "green"; + } elsif ($name =~ /::/) { # C++ + $type = "yellow"; + } elsif ($name =~ m:_\[k\]$:) { # kernel annotation + $type = "orange"; + } elsif ($name =~ /::/) { # C++ + $type = "yellow"; + } else { # system + $type = "red"; + } + # fall-through to color palettes + } + if (defined $type and $type eq "perl") { + if ($name =~ /::/) { # C++ + $type = "yellow"; + } elsif ($name =~ m:Perl: or $name =~ m:\.pl:) { # Perl + $type = "green"; + } elsif ($name =~ m:_\[k\]$:) { # kernel + $type = "orange"; + } else { # system + $type = "red"; + } + # fall-through to color palettes + } + if (defined $type and $type eq "js") { + # Handle both annotations (_[j], _[i], ...; which are + # accurate), as well as input that lacks any annotations, as + # best as possible. Without annotations, we get a little hacky, + # and match on a "/" with a ".js", etc. + if ($name =~ m:_\[j\]$:) { # jit annotation + if ($name =~ m:/:) { + $type = "green"; # source + } else { + $type = "aqua"; # builtin + } + } elsif ($name =~ /::/) { # C++ + $type = "yellow"; + } elsif ($name =~ m:/.*\.js:) { # JavaScript (match "/" in path) + $type = "green"; + } elsif ($name =~ m/:/) { # JavaScript (match ":" in builtin) + $type = "aqua"; + } elsif ($name =~ m/^ $/) { # Missing symbol + $type = "green"; + } elsif ($name =~ m:_\[k\]:) { # kernel + $type = "orange"; + } else { # system + $type = "red"; + } + # fall-through to color palettes + } + if (defined $type and $type eq "wakeup") { + $type = "aqua"; + # fall-through to color palettes + } + if (defined $type and $type eq "chain") { + if ($name =~ m:_\[w\]:) { # waker + $type = "aqua" + } else { # off-CPU + $type = "blue"; + } + # fall-through to color palettes + } + + # color palettes + if (defined $type and $type eq "red") { + my $r = 200 + int(55 * $v1); + my $x = 50 + int(80 * $v1); + return "rgb($r,$x,$x)"; + } + if (defined $type and $type eq "green") { + my $g = 200 + int(55 * $v1); + my $x = 50 + int(60 * $v1); + return "rgb($x,$g,$x)"; + } + if (defined $type and $type eq "blue") { + my $b = 205 + int(50 * $v1); + my $x = 80 + int(60 * $v1); + return "rgb($x,$x,$b)"; + } + if (defined $type and $type eq "yellow") { + my $x = 175 + int(55 * $v1); + my $b = 50 + int(20 * $v1); + return "rgb($x,$x,$b)"; + } + if (defined $type and $type eq "purple") { + my $x = 190 + int(65 * $v1); + my $g = 80 + int(60 * $v1); + return "rgb($x,$g,$x)"; + } + if (defined $type and $type eq "aqua") { + my $r = 50 + int(60 * $v1); + my $g = 165 + int(55 * $v1); + my $b = 165 + int(55 * $v1); + return "rgb($r,$g,$b)"; + } + if (defined $type and $type eq "orange") { + my $r = 190 + int(65 * $v1); + my $g = 90 + int(65 * $v1); + return "rgb($r,$g,0)"; + } + + return "rgb(0,0,0)"; +} + +sub color_scale { + my ($value, $max) = @_; + my ($r, $g, $b) = (255, 255, 255); + $value = -$value if $negate; + if ($value > 0) { + $g = $b = int(210 * ($max - $value) / $max); + } elsif ($value < 0) { + $r = $g = int(210 * ($max + $value) / $max); + } + return "rgb($r,$g,$b)"; +} + +sub color_map { + my ($colors, $func) = @_; + if (exists $palette_map{$func}) { + return $palette_map{$func}; + } else { + $palette_map{$func} = color($colors, $hash, $func); + return $palette_map{$func}; + } +} + +sub write_palette { + open(FILE, ">$pal_file"); + foreach my $key (sort keys %palette_map) { + print FILE $key."->".$palette_map{$key}."\n"; + } + close(FILE); +} + +sub read_palette { + if (-e $pal_file) { + open(FILE, $pal_file) or die "can't open file $pal_file: $!"; + while ( my $line = ) { + chomp($line); + (my $key, my $value) = split("->",$line); + $palette_map{$key}=$value; + } + close(FILE) + } +} + +my %Node; # Hash of merged frame data +my %Tmp; + +# flow() merges two stacks, storing the merged frames and value data in %Node. +sub flow { + my ($last, $this, $v, $d) = @_; + + my $len_a = @$last - 1; + my $len_b = @$this - 1; + + my $i = 0; + my $len_same; + for (; $i <= $len_a; $i++) { + last if $i > $len_b; + last if $last->[$i] ne $this->[$i]; + } + $len_same = $i; + + for ($i = $len_a; $i >= $len_same; $i--) { + my $k = "$last->[$i];$i"; + # a unique ID is constructed from "func;depth;etime"; + # func-depth isn't unique, it may be repeated later. + $Node{"$k;$v"}->{stime} = delete $Tmp{$k}->{stime}; + if (defined $Tmp{$k}->{delta}) { + $Node{"$k;$v"}->{delta} = delete $Tmp{$k}->{delta}; + } + delete $Tmp{$k}; + } + + for ($i = $len_same; $i <= $len_b; $i++) { + my $k = "$this->[$i];$i"; + $Tmp{$k}->{stime} = $v; + if (defined $d) { + $Tmp{$k}->{delta} += $i == $len_b ? $d : 0; + } + } + + return $this; +} + +# parse input +my @Data; +my @SortedData; +my $last = []; +my $time = 0; +my $delta = undef; +my $ignored = 0; +my $line; +my $maxdelta = 1; + +# reverse if needed +foreach (<>) { + chomp; + $line = $_; + if ($stackreverse) { + # there may be an extra samples column for differentials + # XXX todo: redo these REs as one. It's repeated below. + my($stack, $samples) = (/^(.*)\s+?(\d+(?:\.\d*)?)$/); + my $samples2 = undef; + if ($stack =~ /^(.*)\s+?(\d+(?:\.\d*)?)$/) { + $samples2 = $samples; + ($stack, $samples) = $stack =~ (/^(.*)\s+?(\d+(?:\.\d*)?)$/); + unshift @Data, join(";", reverse split(";", $stack)) . " $samples $samples2"; + } else { + unshift @Data, join(";", reverse split(";", $stack)) . " $samples"; + } + } else { + unshift @Data, $line; + } +} + +if ($flamechart) { + # In flame chart mode, just reverse the data so time moves from left to right. + @SortedData = reverse @Data; +} else { + @SortedData = sort @Data; +} + +# process and merge frames +foreach (@SortedData) { + chomp; + # process: folded_stack count + # eg: func_a;func_b;func_c 31 + my ($stack, $samples) = (/^(.*)\s+?(\d+(?:\.\d*)?)$/); + unless (defined $samples and defined $stack) { + ++$ignored; + next; + } + + # there may be an extra samples column for differentials: + my $samples2 = undef; + if ($stack =~ /^(.*)\s+?(\d+(?:\.\d*)?)$/) { + $samples2 = $samples; + ($stack, $samples) = $stack =~ (/^(.*)\s+?(\d+(?:\.\d*)?)$/); + } + $delta = undef; + if (defined $samples2) { + $delta = $samples2 - $samples; + $maxdelta = abs($delta) if abs($delta) > $maxdelta; + } + + # for chain graphs, annotate waker frames with "_[w]", for later + # coloring. This is a hack, but has a precedent ("_[k]" from perf). + if ($colors eq "chain") { + my @parts = split ";--;", $stack; + my @newparts = (); + $stack = shift @parts; + $stack .= ";--;"; + foreach my $part (@parts) { + $part =~ s/;/_[w];/g; + $part .= "_[w]"; + push @newparts, $part; + } + $stack .= join ";--;", @parts; + } + + # merge frames and populate %Node: + $last = flow($last, [ '', split ";", $stack ], $time, $delta); + + if (defined $samples2) { + $time += $samples2; + } else { + $time += $samples; + } +} +flow($last, [], $time, $delta); + +warn "Ignored $ignored lines with invalid format\n" if $ignored; +unless ($time) { + warn "ERROR: No stack counts found\n"; + my $im = SVG->new(); + # emit an error message SVG, for tools automating flamegraph use + my $imageheight = $fontsize * 5; + $im->header($imagewidth, $imageheight); + $im->stringTTF(undef, int($imagewidth / 2), $fontsize * 2, + "ERROR: No valid input provided to flamegraph.pl."); + print $im->svg; + exit 2; +} +if ($timemax and $timemax < $time) { + warn "Specified --total $timemax is less than actual total $time, so ignored\n" + if $timemax/$time > 0.02; # only warn is significant (e.g., not rounding etc) + undef $timemax; +} +$timemax ||= $time; + +my $widthpertime = ($imagewidth - 2 * $xpad) / $timemax; +my $minwidth_time = $minwidth / $widthpertime; + +# prune blocks that are too narrow and determine max depth +while (my ($id, $node) = each %Node) { + my ($func, $depth, $etime) = split ";", $id; + my $stime = $node->{stime}; + die "missing start for $id" if not defined $stime; + + if (($etime-$stime) < $minwidth_time) { + delete $Node{$id}; + next; + } + $depthmax = $depth if $depth > $depthmax; +} + +# draw canvas, and embed interactive JavaScript program +my $imageheight = (($depthmax + 1) * $frameheight) + $ypad1 + $ypad2; +$imageheight += $ypad3 if $subtitletext ne ""; +my $titlesize = $fontsize + 5; +my $im = SVG->new(); +my ($black, $vdgrey, $dgrey) = ( + $im->colorAllocate(0, 0, 0), + $im->colorAllocate(160, 160, 160), + $im->colorAllocate(200, 200, 200), + ); +$im->header($imagewidth, $imageheight); +my $inc = < + + + + + + + +INC +$im->include($inc); +$im->filledRectangle(0, 0, $imagewidth, $imageheight, 'url(#background)'); +$im->stringTTF("title", int($imagewidth / 2), $fontsize * 2, $titletext); +$im->stringTTF("subtitle", int($imagewidth / 2), $fontsize * 4, $subtitletext) if $subtitletext ne ""; +$im->stringTTF("details", $xpad, $imageheight - ($ypad2 / 2), " "); +$im->stringTTF("unzoom", $xpad, $fontsize * 2, "Reset Zoom", 'class="hide"'); +$im->stringTTF("search", $imagewidth - $xpad - 100, $fontsize * 2, "Search"); +$im->stringTTF("ignorecase", $imagewidth - $xpad - 16, $fontsize * 2, "ic"); +$im->stringTTF("matched", $imagewidth - $xpad - 100, $imageheight - ($ypad2 / 2), " "); + +if ($palette) { + read_palette(); +} + +# draw frames +$im->group_start({id => "frames"}); +while (my ($id, $node) = each %Node) { + my ($func, $depth, $etime) = split ";", $id; + my $stime = $node->{stime}; + my $delta = $node->{delta}; + + $etime = $timemax if $func eq "" and $depth == 0; + + my $x1 = $xpad + $stime * $widthpertime; + my $x2 = $xpad + $etime * $widthpertime; + my ($y1, $y2); + unless ($inverted) { + $y1 = $imageheight - $ypad2 - ($depth + 1) * $frameheight + $framepad; + $y2 = $imageheight - $ypad2 - $depth * $frameheight; + } else { + $y1 = $ypad1 + $depth * $frameheight; + $y2 = $ypad1 + ($depth + 1) * $frameheight - $framepad; + } + + my $samples = sprintf "%.0f", ($etime - $stime) * $factor; + (my $samples_txt = $samples) # add commas per perlfaq5 + =~ s/(^[-+]?\d+?(?=(?>(?:\d{3})+)(?!\d))|\G\d{3}(?=\d))/$1,/g; + + my $info; + if ($func eq "" and $depth == 0) { + $info = "all ($samples_txt $countname, 100%)"; + } else { + my $pct = sprintf "%.2f", ((100 * $samples) / ($timemax * $factor)); + my $escaped_func = $func; + # clean up SVG breaking characters: + $escaped_func =~ s/&/&/g; + $escaped_func =~ s//>/g; + $escaped_func =~ s/"/"/g; + $escaped_func =~ s/_\[[kwij]\]$//; # strip any annotation + unless (defined $delta) { + $info = "$escaped_func ($samples_txt $countname, $pct%)"; + } else { + my $d = $negate ? -$delta : $delta; + my $deltapct = sprintf "%.2f", ((100 * $d) / ($timemax * $factor)); + $deltapct = $d > 0 ? "+$deltapct" : $deltapct; + $info = "$escaped_func ($samples_txt $countname, $pct%; $deltapct%)"; + } + } + + my $nameattr = { %{ $nameattr{$func}||{} } }; # shallow clone + $nameattr->{title} ||= $info; + $im->group_start($nameattr); + + my $color; + if ($func eq "--") { + $color = $vdgrey; + } elsif ($func eq "-") { + $color = $dgrey; + } elsif (defined $delta) { + $color = color_scale($delta, $maxdelta); + } elsif ($palette) { + $color = color_map($colors, $func); + } else { + $color = color($colors, $hash, $func); + } + $im->filledRectangle($x1, $y1, $x2, $y2, $color, 'rx="2" ry="2"'); + + my $chars = int( ($x2 - $x1) / ($fontsize * $fontwidth)); + my $text = ""; + if ($chars >= 3) { # room for one char plus two dots + $func =~ s/_\[[kwij]\]$//; # strip any annotation + $text = substr $func, 0, $chars; + substr($text, -2, 2) = ".." if $chars < length $func; + $text =~ s/&/&/g; + $text =~ s//>/g; + } + $im->stringTTF(undef, $x1 + 3, 3 + ($y1 + $y2) / 2, $text); + + $im->group_end($nameattr); +} +$im->group_end(); + +print $im->svg; + +if ($palette) { + write_palette(); +} + +# vim: ts=8 sts=8 sw=8 noexpandtab diff --git a/barretenberg/cpp/scripts/llvm_xray_stack_flame_corrector.js b/barretenberg/cpp/scripts/llvm_xray_stack_flame_corrector.js new file mode 100644 index 00000000000..97323d4cb63 --- /dev/null +++ b/barretenberg/cpp/scripts/llvm_xray_stack_flame_corrector.js @@ -0,0 +1,183 @@ +// Corrects LLVM-XRAY stack traces to properly line up. +// Otherwise, there is a weird offset in each stack level that does not correspond to any function call. +// In the public domain. +// Conversion of public domain https://github.com/DerickEddington/corrector_of_llvm_xray_stack_flame +class Record { + constructor(node_path = [], attribute = BigInt(0)) { + this.node_path = node_path; + this.attribute = attribute; + } + + static fromString(line) { + const components = line.split(";"); + if (components.length >= 2) { + const attribute = BigInt(components.pop().trim()); + return new Record(components, attribute); + } else { + throw new Error("invalid flame format line"); + } + } + + toString() { + // To be reversed in-place + const np = this.node_path.map((x) => x); + return `${np.reverse().join(";")}; ${this.attribute}`; + } +} + +const Kind = { + LEAF: "Leaf", + BRANCH: "Branch", +}; + +class Node { + constructor() { + this.kind = Kind.LEAF; + this.attribute = null; + this.children = new Map(); + } + + correctForChild(child) { + if (this.attribute !== null) { + this.attribute -= child.attribute; + } + } + + child(name) { + if (this.kind === Kind.LEAF) { + this.kind = Kind.BRANCH; + this.children = new Map(); + return this.child(name); + } else { + if (!this.children.has(name)) { + this.children.set(name, new Node()); + } + return this.children.get(name); + } + } + + forEachChild(func) { + for (let child of this.children.values()) { + func(this, child); + } + } +} + +class Tree { + constructor() { + this.roots = new Node(); + this.original_order = []; + } +} + +class BadTree extends Tree { + constructor() { + super(); + } + + static fromIterator(records) { + let tree = new BadTree(); + for (let record of records) { + tree.extend(record); + } + return tree; + } + + extend(record) { + let parent = this.roots; + const [lastComponent, ...pathPrefix] = record.node_path.reverse(); + for (let component of pathPrefix.reverse()) { + parent = parent.child(component); + } + const lastNode = parent.child(lastComponent); + if (lastNode.attribute === null) { + lastNode.attribute = record.attribute; + this.original_order.push({ record, node: lastNode }); + } else { + lastNode.attribute += record.attribute; + } + } + + correct() { + const recur = (parent, child) => { + parent.correctForChild(child); + child.forEachChild(recur); + }; + this.roots.forEachChild((_, root) => root.forEachChild(recur)); + return new GoodTree(this); + } +} + +class GoodTree extends Tree { + constructor(tree) { + super(); + this.roots = tree.roots; + this.original_order = tree.original_order; + } + + *iter() { + for (let ordRecord of this.original_order) { + const { record, node } = ordRecord; + const originalNodePath = record.node_path; + const possiblyCorrectedAttribute = node.attribute; + yield new Record(originalNodePath, possiblyCorrectedAttribute); + } + } + + async dump() { + let output = []; + for (let record of this.iter()) { + output.push(`${record.toString()}\n`); + } + return output; + } +} + +async function correctStackData(input) { + const inputRecords = input.map((line) => Record.fromString(line)); + + const badTree = BadTree.fromIterator(inputRecords); + const goodTree = badTree.correct(); + return await goodTree.dump(); +} + +async function test() { + const result = await correctStackData([ + "thread1;main; 5925054742", + "thread1;main;f2; 5925051360", + "thread1;main;f2;busy; 5925047168", + "thread1;main; 5941982261", + "thread1;main;f1; 5941978880", + "thread1;main;f1;busy; 5941971904", + "thread1;main; 5930717973", + "thread1;main;busy; 5930714592", + ]); + const expected = [ + "thread1;main; 10144\n", + "thread1;main;f2; 4192\n", + "thread1;main;f2;busy; 5925047168\n", + "thread1;main;f1; 6976\n", + "thread1;main;f1;busy; 5941971904\n", + "thread1;main;busy; 5930714592\n", + ]; + if (JSON.stringify(result) !== JSON.stringify(expected)) { + throw new Error("test fail"); + } + console.log("test pass"); +} + +async function main() { + // Read standard input + const inputLines = await new Promise((resolve) => { + let data = ""; + process.stdin + .on("data", (chunk) => (data += chunk)) + .on("end", () => resolve(data.split("\n").filter((line) => line))); + }); + for (const line of await correctStackData(inputLines)) { + process.stdout.write(line); + } +} + +// test(); +main(); diff --git a/barretenberg/cpp/src/barretenberg/bb/main.cpp b/barretenberg/cpp/src/barretenberg/bb/main.cpp index 81c4a2bfb49..dfcd63d8d3c 100644 --- a/barretenberg/cpp/src/barretenberg/bb/main.cpp +++ b/barretenberg/cpp/src/barretenberg/bb/main.cpp @@ -53,11 +53,11 @@ acir_format::acir_format get_constraint_system(std::string const& bytecode_path) */ bool proveAndVerify(const std::string& bytecodePath, const std::string& witnessPath, bool recursive) { - auto acir_composer = new acir_proofs::AcirComposer(MAX_CIRCUIT_SIZE, verbose); + acir_proofs::AcirComposer acir_composer(MAX_CIRCUIT_SIZE, verbose); auto constraint_system = get_constraint_system(bytecodePath); auto witness = get_witness(witnessPath); - auto proof = acir_composer->create_proof(srs::get_crs_factory(), constraint_system, witness, recursive); - auto verified = acir_composer->verify_proof(proof, recursive); + auto proof = acir_composer.create_proof(constraint_system, witness, recursive); + auto verified = acir_composer.verify_proof(proof, recursive); vinfo("verified: ", verified); return verified; @@ -80,10 +80,10 @@ void prove(const std::string& bytecodePath, bool recursive, const std::string& outputPath) { - auto acir_composer = new acir_proofs::AcirComposer(MAX_CIRCUIT_SIZE, verbose); + acir_proofs::AcirComposer acir_composer(MAX_CIRCUIT_SIZE, verbose); auto constraint_system = get_constraint_system(bytecodePath); auto witness = get_witness(witnessPath); - auto proof = acir_composer->create_proof(srs::get_crs_factory(), constraint_system, witness, recursive); + auto proof = acir_composer.create_proof(constraint_system, witness, recursive); if (outputPath == "-") { writeRawBytesToStdout(proof); @@ -104,10 +104,10 @@ void prove(const std::string& bytecodePath, */ void gateCount(const std::string& bytecodePath) { - auto acir_composer = new acir_proofs::AcirComposer(MAX_CIRCUIT_SIZE, verbose); + acir_proofs::AcirComposer acir_composer(MAX_CIRCUIT_SIZE, verbose); auto constraint_system = get_constraint_system(bytecodePath); - acir_composer->create_circuit(constraint_system); - auto gate_count = acir_composer->get_total_circuit_size(); + acir_composer.create_circuit(constraint_system); + auto gate_count = acir_composer.get_total_circuit_size(); writeUint64AsRawBytesToStdout(static_cast(gate_count)); vinfo("gate count: ", gate_count); @@ -131,10 +131,10 @@ void gateCount(const std::string& bytecodePath) */ bool verify(const std::string& proof_path, bool recursive, const std::string& vk_path) { - auto acir_composer = new acir_proofs::AcirComposer(MAX_CIRCUIT_SIZE, verbose); + acir_proofs::AcirComposer acir_composer(MAX_CIRCUIT_SIZE, verbose); auto vk_data = from_buffer(read_file(vk_path)); - acir_composer->load_verification_key(barretenberg::srs::get_crs_factory(), std::move(vk_data)); - auto verified = acir_composer->verify_proof(read_file(proof_path), recursive); + acir_composer.load_verification_key(std::move(vk_data)); + auto verified = acir_composer.verify_proof(read_file(proof_path), recursive); vinfo("verified: ", verified); @@ -153,10 +153,10 @@ bool verify(const std::string& proof_path, bool recursive, const std::string& vk */ void writeVk(const std::string& bytecodePath, const std::string& outputPath) { - auto acir_composer = new acir_proofs::AcirComposer(MAX_CIRCUIT_SIZE, verbose); + acir_proofs::AcirComposer acir_composer(MAX_CIRCUIT_SIZE, verbose); auto constraint_system = get_constraint_system(bytecodePath); - acir_composer->init_proving_key(srs::get_crs_factory(), constraint_system); - auto vk = acir_composer->init_verification_key(); + acir_composer.init_proving_key(constraint_system); + auto vk = acir_composer.init_verification_key(); auto serialized_vk = to_buffer(*vk); if (outputPath == "-") { writeRawBytesToStdout(serialized_vk); @@ -182,10 +182,10 @@ void writeVk(const std::string& bytecodePath, const std::string& outputPath) */ void contract(const std::string& output_path, const std::string& vk_path) { - auto acir_composer = new acir_proofs::AcirComposer(MAX_CIRCUIT_SIZE, verbose); + acir_proofs::AcirComposer acir_composer(MAX_CIRCUIT_SIZE, verbose); auto vk_data = from_buffer(read_file(vk_path)); - acir_composer->load_verification_key(barretenberg::srs::get_crs_factory(), std::move(vk_data)); - auto contract = acir_composer->get_solidity_verifier(); + acir_composer.load_verification_key(std::move(vk_data)); + auto contract = acir_composer.get_solidity_verifier(); if (output_path == "-") { writeStringToStdout(contract); @@ -223,9 +223,9 @@ void contract(const std::string& output_path, const std::string& vk_path) */ void proofAsFields(const std::string& proof_path, std::string const& vk_path, const std::string& output_path) { - auto acir_composer = new acir_proofs::AcirComposer(MAX_CIRCUIT_SIZE, verbose); + acir_proofs::AcirComposer acir_composer(MAX_CIRCUIT_SIZE, verbose); auto vk_data = from_buffer(read_file(vk_path)); - auto data = acir_composer->serialize_proof_into_fields(read_file(proof_path), vk_data.num_public_inputs); + auto data = acir_composer.serialize_proof_into_fields(read_file(proof_path), vk_data.num_public_inputs); auto json = format("[", join(map(data, [](auto fr) { return format("\"", fr, "\""); })), "]"); if (output_path == "-") { @@ -252,10 +252,10 @@ void proofAsFields(const std::string& proof_path, std::string const& vk_path, co */ void vkAsFields(const std::string& vk_path, const std::string& output_path) { - auto acir_composer = new acir_proofs::AcirComposer(MAX_CIRCUIT_SIZE, verbose); + acir_proofs::AcirComposer acir_composer(MAX_CIRCUIT_SIZE, verbose); auto vk_data = from_buffer(read_file(vk_path)); - acir_composer->load_verification_key(barretenberg::srs::get_crs_factory(), std::move(vk_data)); - auto data = acir_composer->serialize_verification_key_into_fields(); + acir_composer.load_verification_key(std::move(vk_data)); + auto data = acir_composer.serialize_verification_key_into_fields(); // We need to move vk_hash to the front... std::rotate(data.begin(), data.end() - 1, data.end()); diff --git a/barretenberg/cpp/src/barretenberg/benchmark/CMakeLists.txt b/barretenberg/cpp/src/barretenberg/benchmark/CMakeLists.txt index 16f375379bb..d851d5af285 100644 --- a/barretenberg/cpp/src/barretenberg/benchmark/CMakeLists.txt +++ b/barretenberg/cpp/src/barretenberg/benchmark/CMakeLists.txt @@ -2,4 +2,4 @@ add_subdirectory(decrypt_bench) add_subdirectory(pippenger_bench) add_subdirectory(plonk_bench) add_subdirectory(honk_bench) -add_subdirectory(relations_bench) \ No newline at end of file +add_subdirectory(relations_bench) diff --git a/barretenberg/cpp/src/barretenberg/benchmark/honk_bench/CMakeLists.txt b/barretenberg/cpp/src/barretenberg/benchmark/honk_bench/CMakeLists.txt index 3234668be52..38b08abcb34 100644 --- a/barretenberg/cpp/src/barretenberg/benchmark/honk_bench/CMakeLists.txt +++ b/barretenberg/cpp/src/barretenberg/benchmark/honk_bench/CMakeLists.txt @@ -19,4 +19,17 @@ foreach(BENCHMARK_SOURCE ${BENCHMARK_SOURCES}) add_executable(${BENCHMARK_NAME}_bench main.bench.cpp ${BENCHMARK_SOURCE} benchmark_utilities.hpp) target_link_libraries(${BENCHMARK_NAME}_bench ${LINKED_LIBRARIES}) add_custom_target(run_${BENCHMARK_NAME} COMMAND ${BENCHMARK_NAME} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -endforeach() \ No newline at end of file +endforeach() + +add_executable( + honk_bench_main_simple + main.simple.cpp +) + +target_link_libraries( + honk_bench_main_simple + PRIVATE + stdlib_sha256 + stdlib_keccak + stdlib_merkle_tree +) diff --git a/barretenberg/cpp/src/barretenberg/benchmark/honk_bench/benchmark_utilities.hpp b/barretenberg/cpp/src/barretenberg/benchmark/honk_bench/benchmark_utilities.hpp index 21e3513fd84..288f9605c56 100644 --- a/barretenberg/cpp/src/barretenberg/benchmark/honk_bench/benchmark_utilities.hpp +++ b/barretenberg/cpp/src/barretenberg/benchmark/honk_bench/benchmark_utilities.hpp @@ -1,3 +1,4 @@ +#pragma once #include #include "barretenberg/honk/composer/ultra_composer.hpp" @@ -62,9 +63,6 @@ template void generate_sha256_test_circuit(Builder& builder, { std::string in; in.resize(32); - for (size_t i = 0; i < 32; ++i) { - in[i] = 0; - } proof_system::plonk::stdlib::packed_byte_array input(&builder, in); for (size_t i = 0; i < num_iterations; i++) { input = proof_system::plonk::stdlib::sha256(input); @@ -244,4 +242,4 @@ void construct_proof_with_specified_num_iterations(State& state, } } -} // namespace bench_utils \ No newline at end of file +} // namespace bench_utils diff --git a/barretenberg/cpp/src/barretenberg/benchmark/honk_bench/main.simple.cpp b/barretenberg/cpp/src/barretenberg/benchmark/honk_bench/main.simple.cpp new file mode 100644 index 00000000000..f33faf554d1 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/benchmark/honk_bench/main.simple.cpp @@ -0,0 +1,61 @@ +/* Entry point for profiling with e.g. LLVM xray. + * This provides a simple entrypoint to bypass artifacts with + * TODO(AD): Consider if we can directly profile the bench executables. + */ +#include +#include +#include + +#include "barretenberg/honk/composer/ultra_composer.hpp" +#include "barretenberg/proof_system/circuit_builder/ultra_circuit_builder.hpp" +#include "barretenberg/proof_system/types/circuit_type.hpp" +#include "barretenberg/stdlib/encryption/ecdsa/ecdsa.hpp" +#include "barretenberg/stdlib/hash/keccak/keccak.hpp" +#include "barretenberg/stdlib/hash/sha256/sha256.hpp" +#include "barretenberg/stdlib/merkle_tree/membership.hpp" +#include "barretenberg/stdlib/merkle_tree/memory_store.hpp" +#include "barretenberg/stdlib/merkle_tree/memory_tree.hpp" +#include "barretenberg/stdlib/merkle_tree/merkle_tree.hpp" +#include "barretenberg/stdlib/primitives/bool/bool.hpp" +#include "barretenberg/stdlib/primitives/curves/secp256k1.hpp" +#include "barretenberg/stdlib/primitives/field/field.hpp" +#include "barretenberg/stdlib/primitives/packed_byte_array/packed_byte_array.hpp" +#include "barretenberg/stdlib/primitives/witness/witness.hpp" + +using namespace proof_system::plonk; + +using UltraBuilder = proof_system::UltraCircuitBuilder; +using UltraHonk = proof_system::honk::UltraComposer; + +template void generate_sha256_test_circuit(Builder& builder, size_t num_iterations) +{ + std::string in; + in.resize(32); + proof_system::plonk::stdlib::packed_byte_array input(&builder, in); + for (size_t i = 0; i < num_iterations; i++) { + input = proof_system::plonk::stdlib::sha256(input); + } +} + +/** + * @brief Benchmark: Construction of a Ultra Honk proof for a circuit determined by the provided circuit function + */ +void construct_proof_ultra() noexcept +{ + barretenberg::srs::init_crs_factory("../srs_db/ignition"); + // Constuct circuit and prover; don't include this part in measurement + auto builder = typename UltraHonk::CircuitBuilder(); + generate_sha256_test_circuit(builder, 1); + + auto composer = UltraHonk(); + auto instance = composer.create_instance(builder); + auto ext_prover = composer.create_prover(instance); + for (size_t i = 0; i < 10; i++) { + auto proof = ext_prover.construct_proof(); + } +} + +int main() +{ + construct_proof_ultra(); +} diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/acir_composer.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/acir_composer.cpp index 29463a06db7..da774efecd3 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/acir_composer.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/acir_composer.cpp @@ -31,9 +31,7 @@ void AcirComposer::create_circuit(acir_format::acir_format& constraint_system) size_hint_ = circuit_subgroup_size_; } -void AcirComposer::init_proving_key( - std::shared_ptr> const& crs_factory, - acir_format::acir_format& constraint_system) +void AcirComposer::init_proving_key(acir_format::acir_format& constraint_system) { vinfo("building circuit... ", size_hint_); builder_ = acir_format::Builder(size_hint_); @@ -47,16 +45,14 @@ void AcirComposer::init_proving_key( total_circuit_size_ = builder_.get_total_circuit_size(); circuit_subgroup_size_ = builder_.get_circuit_subgroup_size(total_circuit_size_); - composer_ = acir_format::Composer(crs_factory); + composer_ = acir_format::Composer(); vinfo("computing proving key..."); proving_key_ = composer_.compute_proving_key(builder_); } -std::vector AcirComposer::create_proof( - std::shared_ptr> const& crs_factory, - acir_format::acir_format& constraint_system, - acir_format::WitnessVector& witness, - bool is_recursive) +std::vector AcirComposer::create_proof(acir_format::acir_format& constraint_system, + acir_format::WitnessVector& witness, + bool is_recursive) { // Release prior memory first. composer_ = acir_format::Composer(/*p_key=*/0, /*v_key=*/0); @@ -67,12 +63,10 @@ std::vector AcirComposer::create_proof( composer_ = [&]() { if (proving_key_) { - auto composer = acir_format::Composer(proving_key_, verification_key_); - // You can't produce the verification key unless you manually set the crs. Which seems like a bug. - composer_.crs_factory_ = crs_factory; + auto composer = acir_format::Composer(proving_key_, nullptr); return composer; } else { - return acir_format::Composer(crs_factory); + return acir_format::Composer(); } }(); if (!proving_key_) { @@ -108,12 +102,10 @@ std::shared_ptr AcirComposer::init_verifi return verification_key_; } -void AcirComposer::load_verification_key( - std::shared_ptr> const& crs_factory, - proof_system::plonk::verification_key_data&& data) +void AcirComposer::load_verification_key(proof_system::plonk::verification_key_data&& data) { - verification_key_ = - std::make_shared(std::move(data), crs_factory->get_verifier_crs()); + verification_key_ = std::make_shared( + std::move(data), srs::get_crs_factory()->get_verifier_crs()); composer_ = acir_format::Composer(proving_key_, verification_key_); } diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/acir_composer.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/acir_composer.hpp index 25814e78d91..f4e125478ba 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/acir_composer.hpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/acir_composer.hpp @@ -14,18 +14,13 @@ class AcirComposer { void create_circuit(acir_format::acir_format& constraint_system); - void init_proving_key(std::shared_ptr> const& crs_factory, - acir_format::acir_format& constraint_system); - - std::vector create_proof( - std::shared_ptr> const& crs_factory, - acir_format::acir_format& constraint_system, - acir_format::WitnessVector& witness, - bool is_recursive); - - void load_verification_key( - std::shared_ptr> const& crs_factory, - proof_system::plonk::verification_key_data&& data); + void init_proving_key(acir_format::acir_format& constraint_system); + + std::vector create_proof(acir_format::acir_format& constraint_system, + acir_format::WitnessVector& witness, + bool is_recursive); + + void load_verification_key(proof_system::plonk::verification_key_data&& data); std::shared_ptr init_verification_key(); diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/c_bind.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/c_bind.cpp index 1af145e2978..0bdfbb519d2 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/c_bind.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/c_bind.cpp @@ -35,7 +35,7 @@ WASM_EXPORT void acir_init_proving_key(in_ptr acir_composer_ptr, uint8_t const* auto acir_composer = reinterpret_cast(*acir_composer_ptr); auto constraint_system = acir_format::circuit_buf_to_acir_format(from_buffer>(acir_vec)); - acir_composer->init_proving_key(barretenberg::srs::get_crs_factory(), constraint_system); + acir_composer->init_proving_key(constraint_system); } WASM_EXPORT void acir_create_proof(in_ptr acir_composer_ptr, @@ -48,8 +48,7 @@ WASM_EXPORT void acir_create_proof(in_ptr acir_composer_ptr, auto constraint_system = acir_format::circuit_buf_to_acir_format(from_buffer>(acir_vec)); auto witness = acir_format::witness_buf_to_witness_data(from_buffer>(witness_vec)); - auto proof_data = - acir_composer->create_proof(barretenberg::srs::get_crs_factory(), constraint_system, witness, *is_recursive); + auto proof_data = acir_composer->create_proof(constraint_system, witness, *is_recursive); *out = to_heap_buffer(proof_data); } @@ -57,7 +56,7 @@ WASM_EXPORT void acir_load_verification_key(in_ptr acir_composer_ptr, uint8_t co { auto acir_composer = reinterpret_cast(*acir_composer_ptr); auto vk_data = from_buffer(vk_buf); - acir_composer->load_verification_key(barretenberg::srs::get_crs_factory(), std::move(vk_data)); + acir_composer->load_verification_key(std::move(vk_data)); } WASM_EXPORT void acir_init_verification_key(in_ptr acir_composer_ptr) diff --git a/barretenberg/cpp/src/barretenberg/examples/c_bind.cpp b/barretenberg/cpp/src/barretenberg/examples/c_bind.cpp index 23129857262..53f373658fe 100644 --- a/barretenberg/cpp/src/barretenberg/examples/c_bind.cpp +++ b/barretenberg/cpp/src/barretenberg/examples/c_bind.cpp @@ -6,7 +6,7 @@ using namespace proof_system::plonk::stdlib::types; WASM_EXPORT void examples_simple_create_and_verify_proof(bool* valid) { - auto ptrs = examples::simple::create_builder_and_composer(barretenberg::srs::get_crs_factory()); + auto ptrs = examples::simple::create_builder_and_composer(); auto proof = examples::simple::create_proof(ptrs); *valid = examples::simple::verify_proof(ptrs, proof); examples::simple::delete_builder_and_composer(ptrs); diff --git a/barretenberg/cpp/src/barretenberg/examples/simple/simple.cpp b/barretenberg/cpp/src/barretenberg/examples/simple/simple.cpp index 19af6aa0d69..ad35fa0ee95 100644 --- a/barretenberg/cpp/src/barretenberg/examples/simple/simple.cpp +++ b/barretenberg/cpp/src/barretenberg/examples/simple/simple.cpp @@ -19,8 +19,7 @@ void build_circuit(Builder& builder) } } -BuilderComposerPtrs create_builder_and_composer( - std::shared_ptr> const& crs_factory) +BuilderComposerPtrs create_builder_and_composer() { // WARNING: Size hint is essential to perform 512k circuits! auto builder = std::make_unique(CIRCUIT_SIZE); @@ -36,7 +35,7 @@ BuilderComposerPtrs create_builder_and_composer( info("composer gates: ", builder->get_num_gates()); info("computing proving key..."); - auto composer = std::make_unique(crs_factory); + auto composer = std::make_unique(); auto pk = composer->compute_proving_key(*builder); return { builder.release(), composer.release() }; diff --git a/barretenberg/cpp/src/barretenberg/examples/simple/simple.hpp b/barretenberg/cpp/src/barretenberg/examples/simple/simple.hpp index 0932708bf21..264d328d2d2 100644 --- a/barretenberg/cpp/src/barretenberg/examples/simple/simple.hpp +++ b/barretenberg/cpp/src/barretenberg/examples/simple/simple.hpp @@ -12,8 +12,7 @@ struct BuilderComposerPtrs { Composer* composer; }; -BuilderComposerPtrs create_builder_and_composer( - std::shared_ptr> const& crs_factory); +BuilderComposerPtrs create_builder_and_composer(); proof create_proof(BuilderComposerPtrs pair); diff --git a/barretenberg/cpp/src/barretenberg/examples/simple/simple.test.cpp b/barretenberg/cpp/src/barretenberg/examples/simple/simple.test.cpp index a43c3de4503..17689497a75 100644 --- a/barretenberg/cpp/src/barretenberg/examples/simple/simple.test.cpp +++ b/barretenberg/cpp/src/barretenberg/examples/simple/simple.test.cpp @@ -8,8 +8,8 @@ namespace examples::simple { TEST(examples_simple, create_proof) { auto srs_path = std::filesystem::absolute("../srs_db/ignition"); - auto crs_factory = std::make_shared>(srs_path); - auto ptrs = create_builder_and_composer(crs_factory); + srs::init_crs_factory(srs_path); + auto ptrs = create_builder_and_composer(); auto proof = create_proof(ptrs); bool valid = verify_proof(ptrs, proof); delete_builder_and_composer(ptrs); diff --git a/barretenberg/cpp/src/barretenberg/honk/composer/goblin/full_goblin_composer.test.cpp b/barretenberg/cpp/src/barretenberg/honk/composer/goblin/full_goblin_composer.test.cpp index e5d1995fb89..2012cb1547a 100644 --- a/barretenberg/cpp/src/barretenberg/honk/composer/goblin/full_goblin_composer.test.cpp +++ b/barretenberg/cpp/src/barretenberg/honk/composer/goblin/full_goblin_composer.test.cpp @@ -101,13 +101,56 @@ class FullGoblinComposerTests : public ::testing::Test { // Store the commitment data for use by the prover of the next circuit op_queue->set_commitment_data(op_queue_commitments); } + + /** + * @brief Construct and a verify a Honk proof + * + */ + bool construct_and_verify_honk_proof(auto& composer, auto& builder) + { + auto instance = composer.create_instance(builder); + auto prover = composer.create_prover(instance); + auto verifier = composer.create_verifier(instance); + auto proof = prover.construct_proof(); + bool verified = verifier.verify_proof(proof); + + return verified; + } + + /** + * @brief Construct and verify a Goblin ECC op queue merge proof + * + */ + bool construct_and_verify_merge_proof(auto& composer, auto& op_queue) + { + auto merge_prover = composer.create_merge_prover(op_queue); + auto merge_verifier = composer.create_merge_verifier(10); + auto merge_proof = merge_prover.construct_proof(); + bool verified = merge_verifier.verify_proof(merge_proof); + + return verified; + } + + /** + * @brief Construct and verify a Goblin ECC op queue merge proof + * + */ + bool construct_and_verify_eccvm_proof(auto& composer, auto& builder) + { + auto prover = composer.create_prover(builder); + auto proof = prover.construct_proof(); + auto verifier = composer.create_verifier(builder); + bool verified = verifier.verify_proof(proof); + + return verified; + } }; /** * @brief Test proof construction/verification for a circuit with ECC op gates, public inputs, and basic arithmetic * gates * @note We simulate op queue interactions with a previous circuit so the actual circuit under test utilizes an op queue - * with non-empty 'previous' data. This avoid complications with zero-commitments etc. + * with non-empty 'previous' data. This avoids complications with zero-commitments etc. * */ TEST_F(FullGoblinComposerTests, SimpleCircuit) @@ -124,13 +167,16 @@ TEST_F(FullGoblinComposerTests, SimpleCircuit) generate_test_circuit(builder); + // The same composer is used to manage Honk and Merge prover/verifier auto composer = GoblinUltraComposer(); - auto instance = composer.create_instance(builder); - auto prover = composer.create_prover(instance); - auto verifier = composer.create_verifier(instance); - auto proof = prover.construct_proof(); - bool verified = verifier.verify_proof(proof); - EXPECT_EQ(verified, true); + + // Construct and verify Ultra Goblin Honk proof + auto honk_verified = construct_and_verify_honk_proof(composer, builder); + EXPECT_TRUE(honk_verified); + + // Construct and verify op queue merge proof + auto merge_verified = construct_and_verify_merge_proof(composer, op_queue); + EXPECT_TRUE(merge_verified); } // Construct an ECCVM circuit then generate and verify its proof @@ -138,15 +184,10 @@ TEST_F(FullGoblinComposerTests, SimpleCircuit) // Instantiate an ECCVM builder with the vm ops stored in the op queue auto builder = ECCVMBuilder(op_queue->raw_ops); - // // Can fiddle with one of the operands to trigger a failure - // builder.vm_operations[0].z1 *= 2; - + // Construct and verify ECCVM proof auto composer = ECCVMComposer(); - auto prover = composer.create_prover(builder); - auto proof = prover.construct_proof(); - auto verifier = composer.create_verifier(builder); - bool verified = verifier.verify_proof(proof); - ASSERT_TRUE(verified); + auto eccvm_verified = construct_and_verify_eccvm_proof(composer, builder); + EXPECT_TRUE(eccvm_verified); } } @@ -168,13 +209,16 @@ TEST_F(FullGoblinComposerTests, SimpleCircuitFailureCase) generate_test_circuit(builder); + // The same composer is used to manage Honk and Merge prover/verifier auto composer = GoblinUltraComposer(); - auto instance = composer.create_instance(builder); - auto prover = composer.create_prover(instance); - auto verifier = composer.create_verifier(instance); - auto proof = prover.construct_proof(); - bool verified = verifier.verify_proof(proof); - EXPECT_EQ(verified, true); + + // Construct and verify Ultra Goblin Honk proof + auto honk_verified = construct_and_verify_honk_proof(composer, builder); + EXPECT_TRUE(honk_verified); + + // Construct and verify op queue merge proof + auto merge_verified = construct_and_verify_merge_proof(composer, op_queue); + EXPECT_TRUE(merge_verified); } // Construct an ECCVM circuit then generate and verify its proof @@ -185,12 +229,10 @@ TEST_F(FullGoblinComposerTests, SimpleCircuitFailureCase) // Fiddle with one of the operands to trigger a failure builder.vm_operations[0].z1 += 1; + // Construct and verify ECCVM proof auto composer = ECCVMComposer(); - auto prover = composer.create_prover(builder); - auto proof = prover.construct_proof(); - auto verifier = composer.create_verifier(builder); - bool verified = verifier.verify_proof(proof); - EXPECT_EQ(verified, false); + auto eccvm_verified = construct_and_verify_eccvm_proof(composer, builder); + EXPECT_FALSE(eccvm_verified); } } diff --git a/barretenberg/cpp/src/barretenberg/honk/composer/goblin_ultra_composer.test.cpp b/barretenberg/cpp/src/barretenberg/honk/composer/goblin_ultra_composer.test.cpp index 5d33532eb1b..ee13dff5b1e 100644 --- a/barretenberg/cpp/src/barretenberg/honk/composer/goblin_ultra_composer.test.cpp +++ b/barretenberg/cpp/src/barretenberg/honk/composer/goblin_ultra_composer.test.cpp @@ -56,18 +56,11 @@ class GoblinUltraHonkComposerTests : public ::testing::Test { } /** - * @brief Construct a goblin ultra circuit then generate a verify its proof + * @brief Construct and a verify a Honk proof * - * @param op_queue - * @return auto */ - bool construct_test_circuit_then_generate_and_verify_proof(auto& op_queue) + bool construct_and_verify_honk_proof(auto& composer, auto& builder) { - auto builder = proof_system::GoblinUltraCircuitBuilder(op_queue); - - generate_test_circuit(builder); - - auto composer = GoblinUltraComposer(); auto instance = composer.create_instance(builder); auto prover = composer.create_prover(instance); auto verifier = composer.create_verifier(instance); @@ -76,6 +69,20 @@ class GoblinUltraHonkComposerTests : public ::testing::Test { return verified; } + + /** + * @brief Construct and verify a Goblin ECC op queue merge proof + * + */ + bool construct_and_verify_merge_proof(auto& composer, auto& op_queue) + { + auto merge_prover = composer.create_merge_prover(op_queue); + auto merge_verifier = composer.create_merge_verifier(10); + auto merge_proof = merge_prover.construct_proof(); + bool verified = merge_verifier.verify_proof(merge_proof); + + return verified; + } }; /** @@ -92,18 +99,27 @@ TEST_F(GoblinUltraHonkComposerTests, SingleCircuit) // Add mock data to op queue to simulate interaction with a previous circuit op_queue->populate_with_mock_initital_data(); - // Construct a test circuit then generate and verify its proof - auto verified = construct_test_circuit_then_generate_and_verify_proof(op_queue); + auto builder = proof_system::GoblinUltraCircuitBuilder(op_queue); + + generate_test_circuit(builder); - EXPECT_EQ(verified, true); + auto composer = GoblinUltraComposer(); + + // Construct and verify Honk proof + auto honk_verified = construct_and_verify_honk_proof(composer, builder); + EXPECT_TRUE(honk_verified); + + // Construct and verify Goblin ECC op queue Merge proof + auto merge_verified = construct_and_verify_merge_proof(composer, op_queue); + EXPECT_TRUE(merge_verified); } /** - * @brief Test proof construction/verification for a circuit with ECC op gates, public inputs, and basic arithmetic - * gates + * @brief Test Merge proof construction/verification for multiple circuits with ECC op gates, public inputs, and + * basic arithmetic gates * */ -TEST_F(GoblinUltraHonkComposerTests, MultipleCircuits) +TEST_F(GoblinUltraHonkComposerTests, MultipleCircuitsMergeOnly) { // Instantiate EccOpQueue. This will be shared across all circuits in the series auto op_queue = std::make_shared(); @@ -114,7 +130,75 @@ TEST_F(GoblinUltraHonkComposerTests, MultipleCircuits) // Construct multiple test circuits that share an ECC op queue. Generate and verify a proof for each. size_t NUM_CIRCUITS = 3; for (size_t i = 0; i < NUM_CIRCUITS; ++i) { - construct_test_circuit_then_generate_and_verify_proof(op_queue); + auto builder = proof_system::GoblinUltraCircuitBuilder(op_queue); + + generate_test_circuit(builder); + + auto composer = GoblinUltraComposer(); + + // Construct and verify Goblin ECC op queue Merge its proof + auto merge_verified = construct_and_verify_merge_proof(composer, op_queue); + EXPECT_TRUE(merge_verified); + } +} + +/** + * @brief Test Honk proof construction/verification for multiple circuits with ECC op gates, public inputs, and + * basic arithmetic gates + * + */ +TEST_F(GoblinUltraHonkComposerTests, MultipleCircuitsHonkOnly) +{ + // Instantiate EccOpQueue. This will be shared across all circuits in the series + auto op_queue = std::make_shared(); + + // Add mock data to op queue to simulate interaction with a previous circuit + op_queue->populate_with_mock_initital_data(); + + // Construct multiple test circuits that share an ECC op queue. Generate and verify a proof for each. + size_t NUM_CIRCUITS = 3; + for (size_t i = 0; i < NUM_CIRCUITS; ++i) { + auto builder = proof_system::GoblinUltraCircuitBuilder(op_queue); + + generate_test_circuit(builder); + + auto composer = GoblinUltraComposer(); + + // Construct and verify Honk proof + auto honk_verified = construct_and_verify_honk_proof(composer, builder); + EXPECT_TRUE(honk_verified); + } +} + +/** + * @brief Test Honk and Merge proof construction/verification for multiple circuits with ECC op gates, public inputs, + * and basic arithmetic gates + * + */ +TEST_F(GoblinUltraHonkComposerTests, MultipleCircuitsHonkAndMerge) +{ + // Instantiate EccOpQueue. This will be shared across all circuits in the series + auto op_queue = std::make_shared(); + + // Add mock data to op queue to simulate interaction with a previous circuit + op_queue->populate_with_mock_initital_data(); + + // Construct multiple test circuits that share an ECC op queue. Generate and verify a proof for each. + size_t NUM_CIRCUITS = 3; + for (size_t i = 0; i < NUM_CIRCUITS; ++i) { + auto builder = proof_system::GoblinUltraCircuitBuilder(op_queue); + + generate_test_circuit(builder); + + auto composer = GoblinUltraComposer(); + + // Construct and verify Honk proof + auto honk_verified = construct_and_verify_honk_proof(composer, builder); + EXPECT_TRUE(honk_verified); + + // Construct and verify Goblin ECC op queue Merge its proof + auto merge_verified = construct_and_verify_merge_proof(composer, op_queue); + EXPECT_TRUE(merge_verified); } // Compute the commitments to the aggregate op queue directly and check that they match those that were computed diff --git a/barretenberg/cpp/src/barretenberg/honk/composer/ultra_composer.hpp b/barretenberg/cpp/src/barretenberg/honk/composer/ultra_composer.hpp index 8cf4c27c8aa..7451cf23bb8 100644 --- a/barretenberg/cpp/src/barretenberg/honk/composer/ultra_composer.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/composer/ultra_composer.hpp @@ -1,5 +1,7 @@ #pragma once #include "barretenberg/honk/instance/prover_instance.hpp" +#include "barretenberg/honk/proof_system/goblin_merge/merge_prover.hpp" +#include "barretenberg/honk/proof_system/goblin_merge/merge_verifier.hpp" #include "barretenberg/honk/proof_system/protogalaxy_prover.hpp" #include "barretenberg/honk/proof_system/protogalaxy_verifier.hpp" #include "barretenberg/honk/proof_system/ultra_prover.hpp" @@ -72,6 +74,34 @@ template class UltraComposer_ { UltraProver_ create_prover(std::shared_ptr); UltraVerifier_ create_verifier(std::shared_ptr); + /** + * @brief Create Prover for Goblin ECC op queue merge protocol + * + * @param op_queue + * @return MergeProver_ + */ + MergeProver_ create_merge_prover(std::shared_ptr op_queue) + { + // Store the previous aggregate op queue size and update the current one + op_queue->set_size_data(); + // Merge requires a commitment key with size equal to that of the current op queue transcript T_i since the + // shift of the current contribution t_i will be of degree equal to deg(T_i) + auto commitment_key = compute_commitment_key(op_queue->get_current_size()); + return MergeProver_(commitment_key, op_queue); + } + + /** + * @brief Create Verifier for Goblin ECC op queue merge protocol + * + * @param size Size of commitment key required to commit to shifted op queue contribution t_i + * @return MergeVerifier_ + */ + MergeVerifier_ create_merge_verifier(size_t size) + { + auto pcs_verification_key = std::make_unique(size, crs_factory_); + return MergeVerifier_(std::move(pcs_verification_key)); + } + ProtoGalaxyProver_ create_folding_prover(std::vector> instances) { ProverInstances insts(instances); diff --git a/barretenberg/cpp/src/barretenberg/honk/flavor/goblin_ultra.hpp b/barretenberg/cpp/src/barretenberg/honk/flavor/goblin_ultra.hpp index 006bba75d6e..95b79c204c2 100644 --- a/barretenberg/cpp/src/barretenberg/honk/flavor/goblin_ultra.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/flavor/goblin_ultra.hpp @@ -288,8 +288,6 @@ class GoblinUltra { size_t num_ecc_op_gates; // needed to determine public input offset - std::shared_ptr op_queue; - // The plookup wires that store plookup read data. std::array get_table_column_wires() { return { w_l, w_r, w_o }; }; }; diff --git a/barretenberg/cpp/src/barretenberg/honk/instance/prover_instance.cpp b/barretenberg/cpp/src/barretenberg/honk/instance/prover_instance.cpp index 96154837399..76b3e5eb949 100644 --- a/barretenberg/cpp/src/barretenberg/honk/instance/prover_instance.cpp +++ b/barretenberg/cpp/src/barretenberg/honk/instance/prover_instance.cpp @@ -249,7 +249,6 @@ std::shared_ptr ProverInstance_::compute_pr if constexpr (IsGoblinFlavor) { proving_key->num_ecc_op_gates = num_ecc_op_gates; - proving_key->op_queue = circuit.op_queue; } return proving_key; diff --git a/barretenberg/cpp/src/barretenberg/honk/pcs/claim.hpp b/barretenberg/cpp/src/barretenberg/honk/pcs/claim.hpp index 9daeeb70746..05f405494f8 100644 --- a/barretenberg/cpp/src/barretenberg/honk/pcs/claim.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/pcs/claim.hpp @@ -19,6 +19,20 @@ template class OpeningPair { bool operator==(const OpeningPair& other) const = default; }; +/** + * @brief Polynomial p and an opening pair (r,v) such that p(r) = v + * + * @tparam Params for the given commitment scheme + */ +template class ProverOpeningClaim { + using Fr = typename Curve::ScalarField; + using Polynomial = barretenberg::Polynomial; + + public: + Polynomial polynomial; // p + OpeningPair opening_pair; // (challenge r, evaluation v = p(r)) +}; + /** * @brief Unverified claim (C,r,v) for some witness polynomial p(X) such that * - C = Commit(p(X)) diff --git a/barretenberg/cpp/src/barretenberg/honk/pcs/zeromorph/zeromorph.hpp b/barretenberg/cpp/src/barretenberg/honk/pcs/zeromorph/zeromorph.hpp new file mode 100644 index 00000000000..d9dda108c65 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/honk/pcs/zeromorph/zeromorph.hpp @@ -0,0 +1,366 @@ +#pragma once +#include "barretenberg/polynomials/polynomial.hpp" + +/** + * @brief + * + */ +namespace proof_system::honk::pcs::zeromorph { + +/** + * @brief Prover for ZeroMorph multilinear PCS + * + * @tparam Curve + */ +template class ZeroMorphProver_ { + using Fr = typename Curve::ScalarField; + using Commitment = typename Curve::AffineElement; + using Polynomial = barretenberg::Polynomial; + + // TODO(#742): Set this N_max to be the number of G1 elements in the mocked zeromorph SRS once it's in place. (Then, + // eventually, set it based on the real SRS). + static const size_t N_max = 1 << 10; + + public: + /** + * @brief Compute multivariate quotients q_k(X_0, ..., X_{k-1}) for f(X_0, ..., X_{d-1}) + * @details Given multilinear polynomial f = f(X_0, ..., X_{d-1}) for which f(u) = v, compute q_k such that: + * + * f(X_0, ..., X_{d-1}) - v = \sum_{k=0}^{d-1} (X_k - u_k)q_k(X_0, ..., X_{k-1}) + * + * The polynomials q_k can be computed explicitly as the difference of the partial evaluation of f in the last + * (n - k) variables at, respectively, u'' = (u_k + 1, u_{k+1}, ..., u_{n-1}) and u' = (u_k, ..., u_{n-1}). I.e. + * + * q_k(X_0, ..., X_{k-1}) = f(X_0,...,X_{k-1}, u'') - f(X_0,...,X_{k-1}, u') + * + * @note In practice, 2^d is equal to the circuit size + * + * TODO(#739): This method has been designed for clarity at the expense of efficiency. Implement the more efficient + * algorithm detailed in the latest versions of the ZeroMorph paper. + * @param polynomial Multilinear polynomial f(X_0, ..., X_{d-1}) + * @param u_challenge Multivariate challenge u = (u_0, ..., u_{d-1}) + * @return std::vector The quotients q_k + */ + static std::vector compute_multilinear_quotients(Polynomial polynomial, std::span u_challenge) + { + size_t log_poly_size = numeric::get_msb(polynomial.size()); + // The size of the multilinear challenge must equal the log of the polynomial size + ASSERT(log_poly_size == u_challenge.size()); + + // Define the vector of quotients q_k, k = 0, ..., log_n-1 + std::vector quotients; + for (size_t k = 0; k < log_poly_size; ++k) { + size_t size = 1 << k; + quotients.emplace_back(Polynomial(size)); // degree 2^k - 1 + } + + // Compute the q_k in reverse order, i.e. q_{n-1}, ..., q_0 + for (size_t k = 0; k < log_poly_size; ++k) { + // Define partial evaluation point u' = (u_k, ..., u_{n-1}) + auto evaluation_point_size = static_cast(k + 1); + std::vector u_partial(u_challenge.end() - evaluation_point_size, u_challenge.end()); + + // Compute f' = f(X_0,...,X_{k-1}, u') + auto f_1 = polynomial.partial_evaluate_mle(u_partial); + + // Increment first element to get altered partial evaluation point u'' = (u_k + 1, u_{k+1}, ..., u_{n-1}) + u_partial[0] += 1; + + // Compute f'' = f(X_0,...,X_{k-1}, u'') + auto f_2 = polynomial.partial_evaluate_mle(u_partial); + + // Compute q_k = f''(X_0,...,X_{k-1}) - f'(X_0,...,X_{k-1}) + auto q_k = f_2; + q_k -= f_1; + + quotients[log_poly_size - k - 1] = q_k; + } + + return quotients; + } + + /** + * @brief Construct batched, lifted-degree univariate quotient \hat{q} = \sum_k y^k * X^{N - d_k - 1} * q_k + * @details The purpose of the batched lifted-degree quotient is to reduce the individual degree checks + * deg(q_k) <= 2^k - 1 to a single degree check on \hat{q}. This is done by first shifting each of the q_k to the + * right (i.e. multiplying by an appropriate power of X) so that each is degree N-1, then batching them all together + * using powers of the provided challenge. Note: In practice, we do not actually compute the shifted q_k, we simply + * accumulate them into \hat{q} at the appropriate offset. + * + * @param quotients Polynomials q_k, interpreted as univariates; deg(q_k) = 2^k - 1 + * @param N + * @return Polynomial + */ + static Polynomial compute_batched_lifted_degree_quotient(std::vector& quotients, + Fr y_challenge, + size_t N) + { + // Batched lifted degree quotient polynomial + auto result = Polynomial(N); + + // Compute \hat{q} = \sum_k y^k * X^{N - d_k - 1} * q_k + size_t k = 0; + auto scalar = Fr(1); // y^k + for (auto& quotient : quotients) { + // Rather than explicitly computing the shifts of q_k by N - d_k - 1 (i.e. multiplying q_k by X^{N - d_k - + // 1}) then accumulating them, we simply accumulate y^k*q_k into \hat{q} at the index offset N - d_k - 1 + auto deg_k = static_cast((1 << k) - 1); + size_t offset = N - deg_k - 1; + for (size_t idx = 0; idx < deg_k + 1; ++idx) { + result[offset + idx] += scalar * quotient[idx]; + } + scalar *= y_challenge; // update batching scalar y^k + k++; + } + + return result; + } + + /** + * @brief Compute partially evaluated degree check polynomial \zeta_x = q - \sum_k y^k * x^{N - d_k - 1} * q_k + * @details Compute \zeta_x, where + * + * \zeta_x = q - \sum_k y^k * x^{N - d_k - 1} * q_k + * + * @param batched_quotient + * @param quotients + * @param y_challenge + * @param x_challenge + * @return Polynomial Degree check polynomial \zeta_x such that \zeta_x(x) = 0 + */ + static Polynomial compute_partially_evaluated_degree_check_polynomial(Polynomial& batched_quotient, + std::vector& quotients, + Fr y_challenge, + Fr x_challenge) + { + size_t N = batched_quotient.size(); + size_t log_N = quotients.size(); + + // Initialize partially evaluated degree check polynomial \zeta_x to \hat{q} + auto result = batched_quotient; + + auto y_power = Fr(1); // y^k + for (size_t k = 0; k < log_N; ++k) { + // Accumulate y^k * x^{N - d_k - 1} * q_k into \hat{q} + auto deg_k = static_cast((1 << k) - 1); + auto x_power = x_challenge.pow(N - deg_k - 1); // x^{N - d_k - 1} + + result.add_scaled(quotients[k], -y_power * x_power); + + y_power *= y_challenge; // update batching scalar y^k + } + + return result; + } + + /** + * @brief Compute partially evaluated zeromorph identity polynomial Z_x + * @details Compute Z_x, where + * + * Z_x = x * f_batched + g_batched - v * x * \Phi_n(x) + * - x * \sum_k (x^{2^k}\Phi_{n-k-1}(x^{2^{k-1}}) - u_k\Phi_{n-k}(x^{2^k})) * q_k + * + * where f_batched = \sum_{i=0}^{m-1}\alpha^i*f_i, g_batched = \sum_{i=0}^{l-1}\alpha^{m+i}*g_i + * + * @param input_polynomial + * @param quotients + * @param v_evaluation + * @param x_challenge + * @return Polynomial + */ + static Polynomial compute_partially_evaluated_zeromorph_identity_polynomial(Polynomial& f_batched, + Polynomial& g_batched, + std::vector& quotients, + Fr v_evaluation, + std::span u_challenge, + Fr x_challenge) + { + size_t N = f_batched.size(); + size_t log_N = quotients.size(); + + // Initialize Z_x with x * \sum_{i=0}^{m-1} f_i + \sum_{i=0}^{l-1} g_i + auto result = Polynomial(N); + result.add_scaled(f_batched, x_challenge); + result += g_batched; + + // Compute Z_x -= v * x * \Phi_n(x) + auto phi_numerator = x_challenge.pow(N) - 1; // x^N - 1 + auto phi_n_x = phi_numerator / (x_challenge - 1); + result[0] -= v_evaluation * x_challenge * phi_n_x; + + // Add contribution from q_k polynomials + auto x_power = x_challenge; // x^{2^k} + for (size_t k = 0; k < log_N; ++k) { + x_power = x_challenge.pow(1 << k); // x^{2^k} + + // \Phi_{n-k-1}(x^{2^{k + 1}}) + auto phi_term_1 = phi_numerator / (x_challenge.pow(1 << (k + 1)) - 1); + + // \Phi_{n-k}(x^{2^k}) + auto phi_term_2 = phi_numerator / (x_challenge.pow(1 << k) - 1); + + // x^{2^k} * \Phi_{n-k-1}(x^{2^{k+1}}) - u_k * \Phi_{n-k}(x^{2^k}) + auto scalar = x_power * phi_term_1 - u_challenge[k] * phi_term_2; + + scalar *= x_challenge; + scalar *= Fr(-1); + + result.add_scaled(quotients[k], scalar); + } + + return result; + } + + /** + * @brief Compute combined evaluation and degree-check quotient polynomial pi + * @details Compute univariate quotient pi, where + * + * pi = (q_\zeta + z*q_Z) X^{N_{max}-(N-1)}, with q_\zeta = \zeta_x/(X-x), q_Z = Z_x/(X-x) + * + * @param Z_x + * @param zeta_x + * @param x_challenge + * @param z_challenge + * @param N_max + * @return Polynomial + */ + static Polynomial compute_batched_evaluation_and_degree_check_quotient(Polynomial& zeta_x, + Polynomial& Z_x, + Fr x_challenge, + Fr z_challenge) + { + // We cannot commit to polynomials with size > N_max + size_t N = zeta_x.size(); + ASSERT(N <= N_max); + + // Compute q_{\zeta} and q_Z in place + zeta_x.factor_roots(x_challenge); + Z_x.factor_roots(x_challenge); + + // Compute batched quotient q_{\zeta} + z*q_Z + auto batched_quotient = zeta_x; + batched_quotient.add_scaled(Z_x, z_challenge); + + // TODO(#742): To complete the degree check, we need to commit to (q_{\zeta} + z*q_Z)*X^{N_max - N - 1}. + // Verification then requires a pairing check similar to the standard KZG check but with [1]_2 replaced by + // [X^{N_max - N -1}]_2. Two issues: A) we do not have an SRS with these G2 elements (so need to generate a fake + // setup until we can do the real thing), and B) its not clear to me how to update our pairing algorithms to do + // this type of pairing. For now, simply construct q_{\zeta} + z*q_Z without the shift and do a standard KZG + // pairing check. When we're ready, all we have to do to make this fully legit is commit to the shift here and + // update the pairing check accordingly. Note: When this is implemented properly, it doesnt make sense to store + // the (massive) shifted polynomial of size N_max. Ideally would only store the unshifted version and just + // compute the shifted commitment directly via a new method. + auto batched_shifted_quotient = batched_quotient; + + return batched_shifted_quotient; + } +}; + +/** + * @brief Verifier for ZeroMorph multilinear PCS + * + * @tparam Curve + */ +template class ZeroMorphVerifier_ { + using Fr = typename Curve::ScalarField; + using Commitment = typename Curve::AffineElement; + + public: + /** + * @brief Compute commitment to partially evaluated batched lifted degree quotient identity + * @details Compute commitment C_{\zeta_x} = [\zeta_x]_1 using homomorphicity: + * + * C_{\zeta_x} = [q]_1 - \sum_k y^k * x^{N - d_k - 1} * [q_k]_1 + * + * @param C_q Commitment to batched lifted degree quotient + * @param C_q_k Commitments to quotients q_k + * @param y_challenge + * @param x_challenge + * @return Commitment + */ + static Commitment compute_C_zeta_x(Commitment C_q, std::vector& C_q_k, Fr y_challenge, Fr x_challenge) + { + size_t log_N = C_q_k.size(); + size_t N = 1 << log_N; + + auto result = C_q; + for (size_t k = 0; k < log_N; ++k) { + auto deg_k = static_cast((1 << k) - 1); + // Compute scalar y^k * x^{N - deg_k - 1} + auto scalar = y_challenge.pow(k); + scalar *= x_challenge.pow(N - deg_k - 1); + scalar *= Fr(-1); + + result = result + C_q_k[k] * scalar; + } + return result; + } + + /** + * @brief Compute commitment to partially evaluated ZeroMorph identity Z + * @details Compute commitment C_{Z_x} = [Z_x]_1 using homomorphicity: + * + * C_{Z_x} = x * \sum_{i=0}^{m-1}\alpha^i*[f_i] + \sum_{i=0}^{l-1}\alpha^{m+i}*[g_i] - C_v_x + * - x * \sum_k (x^{2^k}\Phi_{n-k-1}(x^{2^{k-1}}) - u_k\Phi_{n-k}(x^{2^k})) * [q_k] + * + * @param C_v_x v * x * \Phi_n(x) * [1]_1 + * @param f_commitments Commitments to unshifted polynomials [f_i] + * @param g_commitments Commitments to to-be-shifted polynomials [g_i] + * @param C_q_k Commitments to q_k + * @param alpha + * @param x_challenge + * @param u_challenge multilinear challenge + * @return Commitment + */ + static Commitment compute_C_Z_x(Commitment C_v_x, + std::vector& f_commitments, + std::vector& g_commitments, + std::vector& C_q_k, + Fr alpha, + Fr x_challenge, + std::vector u_challenge) + { + size_t log_N = C_q_k.size(); + size_t N = 1 << log_N; + + auto phi_numerator = x_challenge.pow(N) - 1; // x^N - 1 + // auto phi_n_x = phi_numerator / (x_challenge - 1); + + Commitment result = -C_v_x; // initialize with -C_{v,x} + auto alpha_pow = Fr(1); + // Add contribution x * \sum_{i=0}^{m-1} [f_i] + for (auto& commitment : f_commitments) { + auto scalar = x_challenge * alpha_pow; + result = result + (commitment * scalar); + alpha_pow *= alpha; + } + // Add contribution \sum_{i=0}^{l-1} [g_i] + for (auto& commitment : g_commitments) { + auto scalar = alpha_pow; + result = result + (commitment * scalar); + alpha_pow *= alpha; + } + + // Add contribution from q_k commitments + for (size_t k = 0; k < log_N; ++k) { + // Compute scalar x^{2^k} * \Phi_{n-k-1}(x^{2^{k+1}}) - u_k * \Phi_{n-k}(x^{2^k}) + auto x_pow_2k = x_challenge.pow(1 << k); // x^{2^k} + + // \Phi_{n-k-1}(x^{2^{k + 1}}) + auto phi_term_1 = phi_numerator / (x_challenge.pow(1 << (k + 1)) - 1); + + // \Phi_{n-k}(x^{2^k}) + auto phi_term_2 = phi_numerator / (x_challenge.pow(1 << k) - 1); + + auto scalar = x_pow_2k * phi_term_1; + scalar -= u_challenge[k] * phi_term_2; + scalar *= x_challenge; + scalar *= Fr(-1); + + result = result + C_q_k[k] * scalar; + } + return result; + } +}; + +} // namespace proof_system::honk::pcs::zeromorph \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/honk/pcs/zeromorph/zeromorph.test.cpp b/barretenberg/cpp/src/barretenberg/honk/pcs/zeromorph/zeromorph.test.cpp new file mode 100644 index 00000000000..8424491ba91 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/honk/pcs/zeromorph/zeromorph.test.cpp @@ -0,0 +1,482 @@ +#include "zeromorph.hpp" +#include "../commitment_key.test.hpp" +#include "barretenberg/honk/transcript/transcript.hpp" + +#include + +namespace proof_system::honk::pcs::zeromorph { + +template class ZeroMorphTest : public CommitmentTest { + public: + using Fr = typename Curve::ScalarField; + using Polynomial = barretenberg::Polynomial; + using Commitment = typename Curve::AffineElement; + using GroupElement = typename Curve::Element; + using ZeroMorphProver = ZeroMorphProver_; + using ZeroMorphVerifier = ZeroMorphVerifier_; + + // Evaluate Phi_k(x) = \sum_{i=0}^k x^i using the direct inefficent formula + Fr Phi(Fr challenge, size_t subscript) + { + size_t length = 1 << subscript; + auto result = Fr(0); + for (size_t idx = 0; idx < length; ++idx) { + result += challenge.pow(idx); + } + return result; + } + + /** + * @brief Construct and verify ZeroMorph proof of batched multilinear evaluation with shifts + * @details The goal is to construct and verify a single batched multilinear evaluation proof for m polynomials f_i + * and l polynomials h_i. It is assumed that the h_i are shifts of polynomials g_i (the "to-be-shifted" + * polynomials), which are a subset of the f_i. This is what is encountered in practice. We accomplish this using + * evaluations of h_i but commitments to only their unshifted counterparts g_i (which we get for "free" since + * commitments [g_i] are contained in the set of commitments [f_i]). + * + */ + bool execute_zeromorph_protocol(size_t NUM_UNSHIFTED, size_t NUM_SHIFTED) + { + bool verified = false; + + size_t N = 16; + size_t log_N = numeric::get_msb(N); + + auto u_challenge = this->random_evaluation_point(log_N); + + // Construct some random multilinear polynomials f_i and their evaluations v_i = f_i(u) + std::vector f_polynomials; // unshifted polynomials + std::vector v_evaluations; + for (size_t i = 0; i < NUM_UNSHIFTED; ++i) { + f_polynomials.emplace_back(this->random_polynomial(N)); + f_polynomials[i][0] = Fr(0); // ensure f is "shiftable" + v_evaluations.emplace_back(f_polynomials[i].evaluate_mle(u_challenge)); + } + + // Construct some "shifted" multilinear polynomials h_i as the left-shift-by-1 of f_i + std::vector g_polynomials; // to-be-shifted polynomials + std::vector h_polynomials; // shifts of the to-be-shifted polynomials + std::vector w_evaluations; + for (size_t i = 0; i < NUM_SHIFTED; ++i) { + g_polynomials.emplace_back(f_polynomials[i]); + h_polynomials.emplace_back(g_polynomials[i].shifted()); + w_evaluations.emplace_back(h_polynomials[i].evaluate_mle(u_challenge)); + // ASSERT_EQ(w_evaluations[i], g_polynomials[i].evaluate_mle(u_challenge, /* shift = */ true)); + } + + // Compute commitments [f_i] + std::vector f_commitments; + for (size_t i = 0; i < NUM_UNSHIFTED; ++i) { + f_commitments.emplace_back(this->commit(f_polynomials[i])); + } + + // Construct container of commitments of the "to-be-shifted" polynomials [g_i] (= [f_i]) + std::vector g_commitments; + for (size_t i = 0; i < NUM_SHIFTED; ++i) { + g_commitments.emplace_back(f_commitments[i]); + } + + // Initialize an empty ProverTranscript + auto prover_transcript = ProverTranscript::init_empty(); + + // Execute Prover protocol + { + auto alpha = prover_transcript.get_challenge("ZM:alpha"); + + // Compute batching of f_i and g_i polynomials: sum_{i=0}^{m-1}\alpha^i*f_i and + // sum_{i=0}^{l-1}\alpha^{m+i}*h_i, and also batched evaluation v = sum_{i=0}^{m-1}\alpha^i*v_i + + // sum_{i=0}^{l-1}\alpha^{m+i}*w_i. + auto f_batched = Polynomial(N); + auto g_batched = Polynomial(N); + auto v_evaluation = Fr(0); + auto alpha_pow = Fr(1); + for (size_t i = 0; i < NUM_UNSHIFTED; ++i) { + f_batched.add_scaled(f_polynomials[i], alpha_pow); + v_evaluation += alpha_pow * v_evaluations[i]; + alpha_pow *= alpha; + } + for (size_t i = 0; i < NUM_SHIFTED; ++i) { + g_batched.add_scaled(g_polynomials[i], alpha_pow); + v_evaluation += alpha_pow * w_evaluations[i]; + alpha_pow *= alpha; + } + + // The new f is f_batched + g_batched.shifted() = f_batched + h_batched + auto f_polynomial = f_batched; + f_polynomial += g_batched.shifted(); + + // Compute the multilinear quotients q_k = q_k(X_0, ..., X_{k-1}) + auto quotients = ZeroMorphProver::compute_multilinear_quotients(f_polynomial, u_challenge); + + // Compute and send commitments C_{q_k} = [q_k], k = 0,...,d-1 + std::vector q_k_commitments; + q_k_commitments.reserve(log_N); + for (size_t idx = 0; idx < log_N; ++idx) { + q_k_commitments[idx] = this->commit(quotients[idx]); + std::string label = "ZM:C_q_" + std::to_string(idx); + prover_transcript.send_to_verifier(label, q_k_commitments[idx]); + } + + // Get challenge y + auto y_challenge = prover_transcript.get_challenge("ZM:y"); + + // Compute the batched, lifted-degree quotient \hat{q} + auto batched_quotient = ZeroMorphProver::compute_batched_lifted_degree_quotient(quotients, y_challenge, N); + + // Compute and send the commitment C_q = [\hat{q}] + auto q_commitment = this->commit(batched_quotient); + prover_transcript.send_to_verifier("ZM:C_q", q_commitment); + + // Get challenges x and z + auto [x_challenge, z_challenge] = prover_transcript.get_challenges("ZM:x", "ZM:z"); + + // Compute degree check polynomial \zeta partially evaluated at x + auto zeta_x = ZeroMorphProver::compute_partially_evaluated_degree_check_polynomial( + batched_quotient, quotients, y_challenge, x_challenge); + + // Compute ZeroMorph identity polynomial Z partially evaluated at x + auto Z_x = ZeroMorphProver::compute_partially_evaluated_zeromorph_identity_polynomial( + f_batched, g_batched, quotients, v_evaluation, u_challenge, x_challenge); + + // Compute batched degree and ZM-identity quotient polynomial pi + auto pi_polynomial = ZeroMorphProver::compute_batched_evaluation_and_degree_check_quotient( + zeta_x, Z_x, x_challenge, z_challenge); + + // Compute and send proof commitment pi + auto pi_commitment = this->commit(pi_polynomial); + prover_transcript.send_to_verifier("ZM:PI", pi_commitment); + } + + auto verifier_transcript = VerifierTranscript::init_empty(prover_transcript); + + // Execute Verifier protocol + { + // Challenge alpha + auto alpha = verifier_transcript.get_challenge("ZM:alpha"); + + // Construct batched evaluation v = sum_{i=0}^{m-1}\alpha^i*v_i + sum_{i=0}^{l-1}\alpha^{m+i}*w_i + auto v_evaluation = Fr(0); + auto alpha_pow = Fr(1); + for (size_t i = 0; i < NUM_UNSHIFTED; ++i) { + v_evaluation += alpha_pow * v_evaluations[i]; + alpha_pow *= alpha; + } + for (size_t i = 0; i < NUM_SHIFTED; ++i) { + v_evaluation += alpha_pow * w_evaluations[i]; + alpha_pow *= alpha; + } + + // Receive commitments [q_k] + std::vector C_q_k; + C_q_k.reserve(log_N); + for (size_t i = 0; i < log_N; ++i) { + C_q_k.emplace_back( + verifier_transcript.template receive_from_prover("ZM:C_q_" + std::to_string(i))); + } + + // Challenge y + auto y_challenge = verifier_transcript.get_challenge("ZM:y"); + + // Receive commitment C_{q} + auto C_q = verifier_transcript.template receive_from_prover("ZM:C_q"); + + // Challenges x, z + auto [x_challenge, z_challenge] = verifier_transcript.get_challenges("ZM:x", "ZM:z"); + + // Compute commitment C_{v,x} = v * x * \Phi_n(x) * [1]_1 + auto C_v_x = Commitment::one() * v_evaluation * x_challenge * this->Phi(x_challenge, log_N); + + // Compute commitment C_{\zeta_x} + auto C_zeta_x = ZeroMorphVerifier::compute_C_zeta_x(C_q, C_q_k, y_challenge, x_challenge); + + // Compute commitment C_{Z_x} + Commitment C_Z_x = ZeroMorphVerifier::compute_C_Z_x( + C_v_x, f_commitments, g_commitments, C_q_k, alpha, x_challenge, u_challenge); + + // Compute commitment C_{\zeta,Z} + auto C_zeta_Z = C_zeta_x + C_Z_x * z_challenge; + + // Receive proof commitment \pi + auto C_pi = verifier_transcript.template receive_from_prover("ZM:PI"); + + // The prover and verifier manifests should agree + EXPECT_EQ(prover_transcript.get_manifest(), verifier_transcript.get_manifest()); + + // Construct inputs and perform pairing check to verify claimed evaluation + // Note: The pairing check (without the degree check component X^{N_max-N-1}) can be expressed naturally as + // e(C_{\zeta,Z}, [1]_2) = e(pi, [X - x]_2). This can be rearranged (e.g. see the plonk paper) as + // e(C_{\zeta,Z} - x*pi, [1]_2) * e(-pi, [X]_2) = 1, or + // e(P_0, [1]_2) * e(P_1, [X]_2) = 1 + auto P0 = C_zeta_Z + C_pi * x_challenge; + auto P1 = -C_pi; + verified = this->vk()->pairing_check(P0, P1); + // EXPECT_TRUE(verified); + } + return verified; + } +}; + +using CurveTypes = ::testing::Types; +TYPED_TEST_SUITE(ZeroMorphTest, CurveTypes); + +/** + * @brief Test method for computing q_k given multilinear f + * @details Given f = f(X_0, ..., X_{d-1}), and (u,v) such that f(u) = v, compute q_k = q_k(X_0, ..., X_{k-1}) such that + * the following identity holds: + * + * f(X_0, ..., X_{d-1}) - v = \sum_{k=0}^{d-1} (X_k - u_k)q_k(X_0, ..., X_{k-1}) + * + */ +TYPED_TEST(ZeroMorphTest, QuotientConstruction) +{ + // Define some useful type aliases + using ZeroMorphProver = ZeroMorphProver_; + using Fr = typename TypeParam::ScalarField; + using Polynomial = barretenberg::Polynomial; + + // Define size parameters + size_t N = 16; + size_t log_N = numeric::get_msb(N); + + // Construct a random multilinear polynomial f, and (u,v) such that f(u) = v. + Polynomial multilinear_f = this->random_polynomial(N); + std::vector u_challenge = this->random_evaluation_point(log_N); + Fr v_evaluation = multilinear_f.evaluate_mle(u_challenge); + + // Compute the multilinear quotients q_k = q_k(X_0, ..., X_{k-1}) + std::vector quotients = ZeroMorphProver::compute_multilinear_quotients(multilinear_f, u_challenge); + + // Show that the q_k were properly constructed by showing that the identity holds at a random multilinear challenge + // z, i.e. f(z) - v - \sum_{k=0}^{d-1} (z_k - u_k)q_k(z) = 0 + std::vector z_challenge = this->random_evaluation_point(log_N); + + Fr result = multilinear_f.evaluate_mle(z_challenge); + result -= v_evaluation; + for (size_t k = 0; k < log_N; ++k) { + auto q_k_eval = Fr(0); + if (k == 0) { + // q_0 = a_0 is a constant polynomial so it's evaluation is simply its constant coefficient + q_k_eval = quotients[k][0]; + } else { + // Construct (u_0, ..., u_{k-1}) + auto subrange_size = static_cast(k); + std::vector z_partial(z_challenge.begin(), z_challenge.begin() + subrange_size); + q_k_eval = quotients[k].evaluate_mle(z_partial); + } + // result = result - (z_k - u_k) * q_k(u_0, ..., u_{k-1}) + result -= (z_challenge[k] - u_challenge[k]) * q_k_eval; + } + + EXPECT_EQ(result, 0); +} + +/** + * @brief Test function for constructing batched lifted degree quotient \hat{q} + * + */ +TYPED_TEST(ZeroMorphTest, BatchedLiftedDegreeQuotient) +{ + // Define some useful type aliases + using ZeroMorphProver = ZeroMorphProver_; + using Fr = typename TypeParam::ScalarField; + using Polynomial = barretenberg::Polynomial; + + const size_t N = 8; + + // Define some mock q_k with deg(q_k) = 2^k - 1 + std::vector data_0 = { 1 }; + std::vector data_1 = { 2, 3 }; + std::vector data_2 = { 4, 5, 6, 7 }; + Polynomial q_0(data_0); + Polynomial q_1(data_1); + Polynomial q_2(data_2); + std::vector quotients = { q_0, q_1, q_2 }; + + auto y_challenge = Fr::random_element(); + + // Compute batched quotient \hat{q} using the prover method + auto batched_quotient = ZeroMorphProver::compute_batched_lifted_degree_quotient(quotients, y_challenge, N); + + // Now explicitly define q_k_lifted = X^{N-2^k} * q_k and compute the expected batched result + std::array data_0_lifted = { 0, 0, 0, 0, 0, 0, 0, 1 }; + std::array data_1_lifted = { 0, 0, 0, 0, 0, 0, 2, 3 }; + std::array data_2_lifted = { 0, 0, 0, 0, 4, 5, 6, 7 }; + Polynomial q_0_lifted(data_0_lifted); + Polynomial q_1_lifted(data_1_lifted); + Polynomial q_2_lifted(data_2_lifted); + + // Explicitly compute \hat{q} + auto batched_quotient_expected = Polynomial(N); + batched_quotient_expected += q_0_lifted; + batched_quotient_expected.add_scaled(q_1_lifted, y_challenge); + batched_quotient_expected.add_scaled(q_2_lifted, y_challenge * y_challenge); + + EXPECT_EQ(batched_quotient, batched_quotient_expected); +} + +/** + * @brief Test function for constructing partially evaluated quotient \zeta_x + * + */ +TYPED_TEST(ZeroMorphTest, PartiallyEvaluatedQuotientZeta) +{ + // Define some useful type aliases + using ZeroMorphProver = ZeroMorphProver_; + using Fr = typename TypeParam::ScalarField; + using Polynomial = barretenberg::Polynomial; + + const size_t N = 8; + + // Define some mock q_k with deg(q_k) = 2^k - 1 + std::vector data_0 = { 1 }; + std::vector data_1 = { 2, 3 }; + std::vector data_2 = { 4, 5, 6, 7 }; + Polynomial q_0(data_0); + Polynomial q_1(data_1); + Polynomial q_2(data_2); + std::vector quotients = { q_0, q_1, q_2 }; + + auto y_challenge = Fr::random_element(); + + auto batched_quotient = ZeroMorphProver::compute_batched_lifted_degree_quotient(quotients, y_challenge, N); + + auto x_challenge = Fr::random_element(); + + // Contruct zeta_x using the prover method + auto zeta_x = ZeroMorphProver::compute_partially_evaluated_degree_check_polynomial( + batched_quotient, quotients, y_challenge, x_challenge); + + // Now construct zeta_x explicitly + auto zeta_x_expected = Polynomial(N); + zeta_x_expected += batched_quotient; + // q_batched - \sum_k q_k * y^k * x^{N - deg(q_k) - 1} + zeta_x_expected.add_scaled(q_0, -x_challenge.pow(N - 0 - 1)); + zeta_x_expected.add_scaled(q_1, -y_challenge * x_challenge.pow(N - 1 - 1)); + zeta_x_expected.add_scaled(q_2, -y_challenge * y_challenge * x_challenge.pow(N - 3 - 1)); + + EXPECT_EQ(zeta_x, zeta_x_expected); +} + +/** + * @brief Demonstrate formulas for efficiently computing \Phi_k(x) = \sum_{i=0}^{k-1}x^i + * @details \Phi_k(x) = \sum_{i=0}^{k-1}x^i = (x^{2^k} - 1) / (x - 1) + * + */ +TYPED_TEST(ZeroMorphTest, PhiEvaluation) +{ + using Fr = typename TypeParam::ScalarField; + const size_t N = 8; + size_t n = numeric::get_msb(N); + + // \Phi_n(x) + { + auto x_challenge = Fr::random_element(); + + auto efficient = (x_challenge.pow(1 << n) - 1) / (x_challenge - 1); + + auto expected = this->Phi(x_challenge, n); + + EXPECT_EQ(efficient, expected); + } + + // \Phi_{n-k-1}(x^{2^{k + 1}}) = (x^{2^n} - 1) / (x^{2^{k + 1}} - 1) + { + auto x_challenge = Fr::random_element(); + + size_t k = 2; + + // x^{2^{k+1}} + auto x_pow = x_challenge.pow(1 << (k + 1)); + + auto efficient = x_challenge.pow(1 << n) - 1; // x^N - 1 + efficient = efficient / (x_pow - 1); // (x^N - 1) / (x^{2^{k + 1}} - 1) + + auto expected = this->Phi(x_pow, n - k - 1); + EXPECT_EQ(efficient, expected); + } +} + +/** + * @brief Test function for constructing partially evaluated quotient Z_x + * + */ +TYPED_TEST(ZeroMorphTest, PartiallyEvaluatedQuotientZ) +{ + // Define some useful type aliases + using ZeroMorphProver = ZeroMorphProver_; + using Fr = typename TypeParam::ScalarField; + using Polynomial = barretenberg::Polynomial; + + const size_t N = 8; + size_t log_N = numeric::get_msb(N); + + // Construct a random multilinear polynomial f, and (u,v) such that f(u) = v. + Polynomial multilinear_f = this->random_polynomial(N); + Polynomial multilinear_g = this->random_polynomial(N); + multilinear_g[0] = 0; + std::vector u_challenge = this->random_evaluation_point(log_N); + Fr v_evaluation = multilinear_f.evaluate_mle(u_challenge); + Fr w_evaluation = multilinear_g.evaluate_mle(u_challenge, /* shift = */ true); + + auto alpha = Fr::random_element(); + + // compute batched polynomial and evaluation + auto f_batched = multilinear_f; + auto g_batched = multilinear_g; + g_batched *= alpha; + auto v_batched = v_evaluation + alpha * w_evaluation; + + // Define some mock q_k with deg(q_k) = 2^k - 1 + auto q_0 = this->random_polynomial(1 << 0); + auto q_1 = this->random_polynomial(1 << 1); + auto q_2 = this->random_polynomial(1 << 2); + std::vector quotients = { q_0, q_1, q_2 }; + + auto x_challenge = Fr::random_element(); + + // Construct Z_x using the prover method + auto Z_x = ZeroMorphProver::compute_partially_evaluated_zeromorph_identity_polynomial( + f_batched, g_batched, quotients, v_batched, u_challenge, x_challenge); + + // Compute Z_x directly + auto Z_x_expected = g_batched; + Z_x_expected.add_scaled(f_batched, x_challenge); + Z_x_expected[0] -= v_batched * x_challenge * this->Phi(x_challenge, log_N); + for (size_t k = 0; k < log_N; ++k) { + auto x_pow_2k = x_challenge.pow(1 << k); // x^{2^k} + auto x_pow_2kp1 = x_challenge.pow(1 << (k + 1)); // x^{2^{k+1}} + // x^{2^k} * \Phi_{n-k-1}(x^{2^{k+1}}) - u_k * \Phi_{n-k}(x^{2^k}) + auto scalar = x_pow_2k * this->Phi(x_pow_2kp1, log_N - k - 1) - u_challenge[k] * this->Phi(x_pow_2k, log_N - k); + scalar *= x_challenge; + scalar *= Fr(-1); + Z_x_expected.add_scaled(quotients[k], scalar); + } + + EXPECT_EQ(Z_x, Z_x_expected); +} + +/** + * @brief Test full Prover/Verifier protocol for proving single multilinear evaluation + * + */ +TYPED_TEST(ZeroMorphTest, ProveAndVerifySingle) +{ + size_t num_unshifted = 1; + size_t num_shifted = 0; + auto verified = this->execute_zeromorph_protocol(num_unshifted, num_shifted); + EXPECT_TRUE(verified); +} + +/** + * @brief Test full Prover/Verifier protocol for proving batched multilinear evaluation with shifts + * + */ +TYPED_TEST(ZeroMorphTest, ProveAndVerifyBatchedWithShifts) +{ + size_t num_unshifted = 3; + size_t num_shifted = 2; + auto verified = this->execute_zeromorph_protocol(num_unshifted, num_shifted); + EXPECT_TRUE(verified); +} + +} // namespace proof_system::honk::pcs::zeromorph diff --git a/barretenberg/cpp/src/barretenberg/honk/proof_system/goblin_merge/merge_prover.cpp b/barretenberg/cpp/src/barretenberg/honk/proof_system/goblin_merge/merge_prover.cpp new file mode 100644 index 00000000000..d9e579cc22d --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/honk/proof_system/goblin_merge/merge_prover.cpp @@ -0,0 +1,120 @@ +#include "merge_prover.hpp" + +namespace proof_system::honk { + +/** + * Create MergeProver_ + * + */ +template +MergeProver_::MergeProver_(std::shared_ptr commitment_key, std::shared_ptr op_queue) + : op_queue(op_queue) + , pcs_commitment_key(commitment_key) +{} + +/** + * @brief Prove proper construction of the aggregate Goblin ECC op queue polynomials T_i^(j), j = 1,2,3,4. + * @details Let T_i^(j) be the jth column of the aggregate op queue after incorporating the contribution from the + * present circuit. T_{i-1}^(j) corresponds to the aggregate op queue at the previous stage and $t_i^(j)$ represents + * the contribution from the present circuit only. For each j, we have the relationship T_i = T_{i-1} + right_shift(t_i, + * M_{i-1}), where the shift magnitude M_{i-1} is the length of T_{i-1}. This protocol demonstrates that the aggregate + * op queue has been constructed correctly via a simple Schwartz-Zippel check. Evaluations are proven via batched KZG. + * + * TODO(#746): Prove connection between t_i^{shift}, committed to herein, and t_i, used in the main protocol. See issue + * for details (https://github.com/AztecProtocol/barretenberg/issues/746). + * + * @tparam Flavor + * @return plonk::proof& + */ +template plonk::proof& MergeProver_::construct_proof() +{ + size_t N = op_queue->get_current_size(); + + // Extract T_i, T_{i-1} + auto T_current = op_queue->get_aggregate_transcript(); + auto T_prev = op_queue->get_previous_aggregate_transcript(); + // TODO(#723): Cannot currently support an empty T_{i-1}. Need to be able to properly handle zero commitment. + ASSERT(T_prev[0].size() > 0); + + // Construct t_i^{shift} as T_i - T_{i-1} + std::array t_shift; + for (size_t i = 0; i < Flavor::NUM_WIRES; ++i) { + t_shift[i] = Polynomial(T_current[i]); + t_shift[i] -= T_prev[i]; + } + + // Compute/get commitments [t_i^{shift}], [T_{i-1}], and [T_i] and add to transcript + std::array C_T_current; + for (size_t idx = 0; idx < t_shift.size(); ++idx) { + // Get previous transcript commitment [T_{i-1}] from op queue + auto C_T_prev = op_queue->ultra_ops_commitments[idx]; + // Compute commitment [t_i^{shift}] directly + auto C_t_shift = pcs_commitment_key->commit(t_shift[idx]); + // Compute updated aggregate transcript commitment as [T_i] = [T_{i-1}] + [t_i^{shift}] + C_T_current[idx] = C_T_prev + C_t_shift; + + std::string suffix = std::to_string(idx + 1); + transcript.send_to_verifier("T_PREV_" + suffix, C_T_prev); + transcript.send_to_verifier("t_SHIFT_" + suffix, C_t_shift); + transcript.send_to_verifier("T_CURRENT_" + suffix, C_T_current[idx]); + } + + // Store the commitments [T_{i}] (to be used later in subsequent iterations as [T_{i-1}]). + op_queue->set_commitment_data(C_T_current); + + // Compute evaluations T_i(\kappa), T_{i-1}(\kappa), t_i^{shift}(\kappa), add to transcript. For each polynomial + // we add a univariate opening claim {p(X), (\kappa, p(\kappa))} to the set of claims to be checked via batched KZG. + auto kappa = transcript.get_challenge("kappa"); + + // Add univariate opening claims for each polynomial. + std::vector opening_claims; + // Compute evaluation T_{i-1}(\kappa) + for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { + auto polynomial = Polynomial(T_prev[idx]); + auto evaluation = polynomial.evaluate(kappa); + transcript.send_to_verifier("T_prev_eval_" + std::to_string(idx + 1), evaluation); + opening_claims.emplace_back(OpeningClaim{ polynomial, { kappa, evaluation } }); + } + // Compute evaluation t_i^{shift}(\kappa) + for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { + auto evaluation = t_shift[idx].evaluate(kappa); + transcript.send_to_verifier("t_shift_eval_" + std::to_string(idx + 1), evaluation); + opening_claims.emplace_back(OpeningClaim{ t_shift[idx], { kappa, evaluation } }); + } + // Compute evaluation T_i(\kappa) + for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { + auto polynomial = Polynomial(T_current[idx]); + auto evaluation = polynomial.evaluate(kappa); + transcript.send_to_verifier("T_current_eval_" + std::to_string(idx + 1), evaluation); + opening_claims.emplace_back(OpeningClaim{ polynomial, { kappa, evaluation } }); + } + + auto alpha = transcript.get_challenge("alpha"); + + // Constuct batched polynomial to opened via KZG + auto batched_polynomial = Polynomial(N); + auto batched_eval = FF(0); + auto alpha_pow = FF(1); + for (auto& claim : opening_claims) { + batched_polynomial.add_scaled(claim.polynomial, alpha_pow); + batched_eval += alpha_pow * claim.opening_pair.evaluation; + alpha_pow *= alpha; + } + + // Construct and commit to KZG quotient polynomial q = (f - v) / (X - kappa) + auto quotient = batched_polynomial; + quotient[0] -= batched_eval; + quotient.factor_roots(kappa); + + auto quotient_commitment = pcs_commitment_key->commit(quotient); + transcript.send_to_verifier("KZG:W", quotient_commitment); + + proof.proof_data = transcript.proof_data; + return proof; +} + +template class MergeProver_; +template class MergeProver_; +template class MergeProver_; + +} // namespace proof_system::honk \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/honk/proof_system/goblin_merge/merge_prover.hpp b/barretenberg/cpp/src/barretenberg/honk/proof_system/goblin_merge/merge_prover.hpp new file mode 100644 index 00000000000..5050de81600 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/honk/proof_system/goblin_merge/merge_prover.hpp @@ -0,0 +1,44 @@ +#pragma once + +#include "barretenberg/honk/flavor/goblin_ultra.hpp" +#include "barretenberg/honk/flavor/ultra.hpp" +#include "barretenberg/honk/flavor/ultra_grumpkin.hpp" +#include "barretenberg/honk/pcs/claim.hpp" +#include "barretenberg/honk/transcript/transcript.hpp" +#include "barretenberg/plonk/proof_system/types/proof.hpp" +#include "barretenberg/proof_system/op_queue/ecc_op_queue.hpp" + +namespace proof_system::honk { + +/** + * @brief Prover class for the Goblin ECC op queue transcript merge protocol + * + * @tparam Flavor + */ +template class MergeProver_ { + using FF = typename Flavor::FF; + using Polynomial = typename Flavor::Polynomial; + using CommitmentKey = typename Flavor::CommitmentKey; + using Commitment = typename Flavor::Commitment; + using PCS = typename Flavor::PCS; + using Curve = typename Flavor::Curve; + using OpeningClaim = typename pcs::ProverOpeningClaim; + using OpeningPair = typename pcs::OpeningPair; + + public: + ProverTranscript transcript; + std::shared_ptr op_queue; + std::shared_ptr pcs_commitment_key; + + explicit MergeProver_(std::shared_ptr, std::shared_ptr); + plonk::proof& construct_proof(); + + private: + plonk::proof proof; +}; + +extern template class MergeProver_; +extern template class MergeProver_; +extern template class MergeProver_; + +} // namespace proof_system::honk \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/honk/proof_system/goblin_merge/merge_verifier.cpp b/barretenberg/cpp/src/barretenberg/honk/proof_system/goblin_merge/merge_verifier.cpp new file mode 100644 index 00000000000..fea6b5611df --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/honk/proof_system/goblin_merge/merge_verifier.cpp @@ -0,0 +1,84 @@ +#include "merge_verifier.hpp" + +namespace proof_system::honk { + +template +MergeVerifier_::MergeVerifier_(std::unique_ptr verification_key) + : pcs_verification_key(std::move(verification_key)){}; + +/** + * @brief Verify proper construction of the aggregate Goblin ECC op queue polynomials T_i^(j), j = 1,2,3,4. + * @details Let T_i^(j) be the jth column of the aggregate op queue after incorporating the contribution from the + * present circuit. T_{i-1}^(j) corresponds to the aggregate op queue at the previous stage and $t_i^(j)$ represents + * the contribution from the present circuit only. For each j, we have the relationship T_i = T_{i-1} + right_shift(t_i, + * M_{i-1}), where the shift magnitude M_{i-1} is the length of T_{i-1}. This protocol verfies that the aggregate op + * queue has been constructed correctly via a simple Schwartz-Zippel check. Evaluations are checked via batched KZG. + * + * @tparam Flavor + * @return plonk::proof& + */ +template bool MergeVerifier_::verify_proof(const plonk::proof& proof) +{ + transcript = VerifierTranscript{ proof.proof_data }; + + // Receive commitments [t_i^{shift}], [T_{i-1}], and [T_i] + std::array C_T_prev; + std::array C_t_shift; + std::array C_T_current; + for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { + C_T_prev[idx] = transcript.template receive_from_prover("T_PREV_" + std::to_string(idx + 1)); + C_t_shift[idx] = transcript.template receive_from_prover("t_SHIFT_" + std::to_string(idx + 1)); + C_T_current[idx] = transcript.template receive_from_prover("T_CURRENT_" + std::to_string(idx + 1)); + } + + FF kappa = transcript.get_challenge("kappa"); + + // Receive transcript poly evaluations and add corresponding univariate opening claims {(\kappa, p(\kappa), [p(X)]} + std::array T_prev_evals; + std::array t_shift_evals; + std::array T_current_evals; + std::vector opening_claims; + for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { + T_prev_evals[idx] = transcript.template receive_from_prover("T_prev_eval_" + std::to_string(idx + 1)); + opening_claims.emplace_back(pcs::OpeningClaim{ { kappa, T_prev_evals[idx] }, C_T_prev[idx] }); + } + for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { + t_shift_evals[idx] = transcript.template receive_from_prover("t_shift_eval_" + std::to_string(idx + 1)); + opening_claims.emplace_back(pcs::OpeningClaim{ { kappa, t_shift_evals[idx] }, C_t_shift[idx] }); + } + for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { + T_current_evals[idx] = transcript.template receive_from_prover("T_current_eval_" + std::to_string(idx + 1)); + opening_claims.emplace_back(pcs::OpeningClaim{ { kappa, T_current_evals[idx] }, C_T_current[idx] }); + } + + // Check the identity T_i(\kappa) = T_{i-1}(\kappa) + t_i^{shift}(\kappa). If it fails, return false + bool identity_checked = true; + for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { + identity_checked = identity_checked && (T_current_evals[idx] == T_prev_evals[idx] + t_shift_evals[idx]); + } + + auto alpha = transcript.get_challenge("alpha"); + + // Constuct batched commitment and evaluation from constituents + auto batched_commitment = opening_claims[0].commitment; + auto batched_eval = opening_claims[0].opening_pair.evaluation; + auto alpha_pow = alpha; + for (size_t idx = 1; idx < opening_claims.size(); ++idx) { + auto& claim = opening_claims[idx]; + batched_commitment = batched_commitment + (claim.commitment * alpha_pow); + batched_eval += alpha_pow * claim.opening_pair.evaluation; + alpha_pow *= alpha; + } + + OpeningClaim batched_claim = { { kappa, batched_eval }, batched_commitment }; + + auto verified = PCS::verify(pcs_verification_key, batched_claim, transcript); + + return identity_checked && verified; +} + +template class MergeVerifier_; +template class MergeVerifier_; +template class MergeVerifier_; + +} // namespace proof_system::honk \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/honk/proof_system/goblin_merge/merge_verifier.hpp b/barretenberg/cpp/src/barretenberg/honk/proof_system/goblin_merge/merge_verifier.hpp new file mode 100644 index 00000000000..b2b0a3d22b4 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/honk/proof_system/goblin_merge/merge_verifier.hpp @@ -0,0 +1,42 @@ +#pragma once + +#include "barretenberg/honk/flavor/goblin_ultra.hpp" +#include "barretenberg/honk/flavor/ultra.hpp" +#include "barretenberg/honk/flavor/ultra_grumpkin.hpp" +#include "barretenberg/honk/pcs/claim.hpp" +#include "barretenberg/honk/transcript/transcript.hpp" +#include "barretenberg/plonk/proof_system/types/proof.hpp" +#include "barretenberg/proof_system/op_queue/ecc_op_queue.hpp" + +namespace proof_system::honk { + +/** + * @brief Verifier class for the Goblin ECC op queue transcript merge protocol + * + * @tparam Flavor + */ +template class MergeVerifier_ { + using FF = typename Flavor::FF; + using Polynomial = typename Flavor::Polynomial; + using CommitmentKey = typename Flavor::CommitmentKey; + using Commitment = typename Flavor::Commitment; + using PCS = typename Flavor::PCS; + using Curve = typename Flavor::Curve; + using OpeningClaim = typename pcs::OpeningClaim; + using VerificationKey = typename Flavor::VerificationKey; + using VerifierCommitmentKey = typename Flavor::VerifierCommitmentKey; + + public: + VerifierTranscript transcript; + std::shared_ptr op_queue; + std::shared_ptr pcs_verification_key; + + explicit MergeVerifier_(std::unique_ptr verification_key); + bool verify_proof(const plonk::proof& proof); +}; + +extern template class MergeVerifier_; +extern template class MergeVerifier_; +extern template class MergeVerifier_; + +} // namespace proof_system::honk \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/honk/proof_system/ultra_prover.cpp b/barretenberg/cpp/src/barretenberg/honk/proof_system/ultra_prover.cpp index ad3151b45bd..618df552869 100644 --- a/barretenberg/cpp/src/barretenberg/honk/proof_system/ultra_prover.cpp +++ b/barretenberg/cpp/src/barretenberg/honk/proof_system/ultra_prover.cpp @@ -166,109 +166,6 @@ template void UltraProver_::execute_pcs_evaluation_ } } -/** - * @brief Prove proper construction of the aggregate Goblin ECC op queue polynomials T_i^(j), j = 1,2,3,4. - * @details Let T_i^(j) be the jth column of the aggregate op queue after incorporating the contribution from the - * present circuit. T_{i-1}^(j) corresponds to the aggregate op queue at the previous stage and $t_i^(j)$ represents - * the contribution from the present circuit only. For each j, we have the relationship T_i = T_{i-1} + right_shift(t_i, - * M_{i-1}), where the shift magnitude M_{i-1} is the length of T_{i-1}. This stage of the protocol demonstrates that - * the aggregate op queue has been constructed correctly. - * - */ -template void UltraProver_::execute_op_queue_transcript_aggregation_round() -{ - if constexpr (IsGoblinFlavor) { - // Extract size M_{i-1} of T_{i-1} from op_queue - size_t prev_op_queue_size = instance->proving_key->op_queue->get_previous_size(); // M_{i-1} - // TODO(#723): Cannot currently support an empty T_{i-1}. Need to be able to properly handle zero commitment. - ASSERT(prev_op_queue_size > 0); - - auto circuit_size = instance->proving_key->circuit_size; - - // TODO(#723): The below assert ensures that M_{i-1} + m_i < n, i.e. the right shifted result can be expressed - // as a size n polynomial. If this is not the case then we should still be able to proceed without increasing - // the circuit size but need to handle with care. - ASSERT(prev_op_queue_size + instance->proving_key->num_ecc_op_gates < circuit_size); // M_{i-1} + m_i < n - - // Construct right-shift of op wires t_i^{shift} so that T_i(X) = T_{i-1}(X) + t_i^{shift}(X). - // Note: The op_wire polynomials (like all others) have constant coefficient equal to zero. Thus to obtain - // t_i^{shift} we must left-shift by 1 then right-shift by M_{i-1}, or equivalently, right-shift by - // M_{i-1} - 1. - std::array right_shifted_op_wires; - auto op_wires = instance->proving_key->get_ecc_op_wires(); - for (size_t i = 0; i < op_wires.size(); ++i) { - // Right shift by M_{i-1} - 1. - right_shifted_op_wires[i].set_to_right_shifted(op_wires[i], prev_op_queue_size - 1); - } - - // Compute/get commitments [t_i^{shift}], [T_{i-1}], and [T_i] and add to transcript - std::array prev_aggregate_op_queue_commitments; - std::array shifted_op_wire_commitments; - std::array aggregate_op_queue_commitments; - for (size_t idx = 0; idx < right_shifted_op_wires.size(); ++idx) { - // Get previous transcript commitment [T_{i-1}] from op queue - prev_aggregate_op_queue_commitments[idx] = instance->proving_key->op_queue->ultra_ops_commitments[idx]; - // Compute commitment [t_i^{shift}] directly - shifted_op_wire_commitments[idx] = pcs_commitment_key->commit(right_shifted_op_wires[idx]); - // Compute updated aggregate transcript commitmen as [T_i] = [T_{i-1}] + [t_i^{shift}] - aggregate_op_queue_commitments[idx] = - prev_aggregate_op_queue_commitments[idx] + shifted_op_wire_commitments[idx]; - - std::string suffix = std::to_string(idx + 1); - transcript.send_to_verifier("PREV_AGG_OP_QUEUE_" + suffix, prev_aggregate_op_queue_commitments[idx]); - transcript.send_to_verifier("SHIFTED_OP_WIRE_" + suffix, shifted_op_wire_commitments[idx]); - transcript.send_to_verifier("AGG_OP_QUEUE_" + suffix, aggregate_op_queue_commitments[idx]); - } - - // Store the commitments [T_{i}] (to be used later in subsequent iterations as [T_{i-1}]). - instance->proving_key->op_queue->set_commitment_data(aggregate_op_queue_commitments); - - // Compute evaluations T_i(\kappa), T_{i-1}(\kappa), t_i^{shift}(\kappa), add to transcript. For each polynomial - // we add a univariate opening claim {(\kappa, p(\kappa)), p(X)} to the set of claims to be combined in the - // batch univariate polynomial Q in Shplonk. (The other univariate claims come from the output of Gemini). - // TODO(#729): It should be possible to reuse the opening challenge from Gemini rather than generate a new one. - auto kappa = transcript.get_challenge("kappa"); - auto prev_aggregate_ecc_op_transcript = instance->proving_key->op_queue->get_previous_aggregate_transcript(); - auto aggregate_ecc_op_transcript = instance->proving_key->op_queue->get_aggregate_transcript(); - std::array prev_agg_op_queue_evals; - std::array right_shifted_op_wire_evals; - std::array agg_op_queue_evals; - std::array prev_agg_op_queue_polynomials; - std::array agg_op_queue_polynomials; - for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { - std::string suffix = std::to_string(idx + 1); - - // Compute evaluation T_{i-1}(\kappa) - prev_agg_op_queue_polynomials[idx] = Polynomial(prev_aggregate_ecc_op_transcript[idx]); - prev_agg_op_queue_evals[idx] = prev_agg_op_queue_polynomials[idx].evaluate(kappa); - transcript.send_to_verifier("prev_agg_op_queue_eval_" + suffix, prev_agg_op_queue_evals[idx]); - - // Compute evaluation t_i^{shift}(\kappa) - right_shifted_op_wire_evals[idx] = right_shifted_op_wires[idx].evaluate(kappa); - transcript.send_to_verifier("op_wire_eval_" + suffix, right_shifted_op_wire_evals[idx]); - - // Compute evaluation T_i(\kappa) - agg_op_queue_polynomials[idx] = Polynomial(aggregate_ecc_op_transcript[idx]); - agg_op_queue_evals[idx] = agg_op_queue_polynomials[idx].evaluate(kappa); - transcript.send_to_verifier("agg_op_queue_eval_" + suffix, agg_op_queue_evals[idx]); - } - - // Add univariate opening claims for each polynomial. - for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { - univariate_openings.opening_pairs.emplace_back(OpenPair{ kappa, prev_agg_op_queue_evals[idx] }); - univariate_openings.witnesses.emplace_back(std::move(prev_agg_op_queue_polynomials[idx])); - } - for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { - univariate_openings.opening_pairs.emplace_back(OpenPair{ kappa, right_shifted_op_wire_evals[idx] }); - univariate_openings.witnesses.emplace_back(std::move(right_shifted_op_wires[idx])); - } - for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { - univariate_openings.opening_pairs.emplace_back(OpenPair{ kappa, agg_op_queue_evals[idx] }); - univariate_openings.witnesses.emplace_back(std::move(agg_op_queue_polynomials[idx])); - } - } -} - /** * - Do Fiat-Shamir to get "nu" challenge. * - Compute commitment [Q]_1 @@ -346,9 +243,6 @@ template plonk::proof& UltraProver_::construct_proo // Compute Fold evaluations execute_pcs_evaluation_round(); - // ECC op queue transcript aggregation - execute_op_queue_transcript_aggregation_round(); - // Fiat-Shamir: nu // Compute Shplonk batched quotient commitment Q execute_shplonk_batched_quotient_round(); diff --git a/barretenberg/cpp/src/barretenberg/honk/proof_system/ultra_verifier.cpp b/barretenberg/cpp/src/barretenberg/honk/proof_system/ultra_verifier.cpp index 4d304709c0b..2b98a316108 100644 --- a/barretenberg/cpp/src/barretenberg/honk/proof_system/ultra_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/honk/proof_system/ultra_verifier.cpp @@ -164,55 +164,6 @@ template bool UltraVerifier_::verify_proof(const plonk batched_commitment_to_be_shifted, transcript); - // Perform ECC op queue transcript aggregation protocol - if constexpr (IsGoblinFlavor) { - // Receive commitments [t_i^{shift}], [T_{i-1}], and [T_i] - std::array prev_agg_op_queue_commitments; - std::array shifted_op_wire_commitments; - std::array agg_op_queue_commitments; - for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { - prev_agg_op_queue_commitments[idx] = - transcript.template receive_from_prover("PREV_AGG_OP_QUEUE_" + std::to_string(idx + 1)); - shifted_op_wire_commitments[idx] = - transcript.template receive_from_prover("SHIFTED_OP_WIRE_" + std::to_string(idx + 1)); - agg_op_queue_commitments[idx] = - transcript.template receive_from_prover("AGG_OP_QUEUE_" + std::to_string(idx + 1)); - } - - // Receive transcript poly evaluations - FF kappa = transcript.get_challenge("kappa"); - std::array prev_agg_op_queue_evals; - std::array shifted_op_wire_evals; - std::array agg_op_queue_evals; - for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { - prev_agg_op_queue_evals[idx] = - transcript.template receive_from_prover("prev_agg_op_queue_eval_" + std::to_string(idx + 1)); - shifted_op_wire_evals[idx] = - transcript.template receive_from_prover("op_wire_eval_" + std::to_string(idx + 1)); - agg_op_queue_evals[idx] = - transcript.template receive_from_prover("agg_op_queue_eval_" + std::to_string(idx + 1)); - - // Check the identity T_i(\kappa) = T_{i-1}(\kappa) + t_i^{shift}(\kappa). If it fails, return false - if (agg_op_queue_evals[idx] != prev_agg_op_queue_evals[idx] + shifted_op_wire_evals[idx]) { - return false; - } - } - - // Add corresponding univariate opening claims {(\kappa, p(\kappa), [p(X)]} - for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { - univariate_opening_claims.emplace_back(pcs::OpeningClaim{ { kappa, prev_agg_op_queue_evals[idx] }, - prev_agg_op_queue_commitments[idx] }); - } - for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { - univariate_opening_claims.emplace_back( - pcs::OpeningClaim{ { kappa, shifted_op_wire_evals[idx] }, shifted_op_wire_commitments[idx] }); - } - for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { - univariate_opening_claims.emplace_back( - pcs::OpeningClaim{ { kappa, agg_op_queue_evals[idx] }, agg_op_queue_commitments[idx] }); - } - } - // Produce a Shplonk claim: commitment [Q] - [Q_z], evaluation zero (at random challenge z) auto shplonk_claim = Shplonk::reduce_verification(pcs_verification_key, univariate_opening_claims, transcript); diff --git a/barretenberg/cpp/src/barretenberg/honk/sumcheck/sumcheck.test.cpp b/barretenberg/cpp/src/barretenberg/honk/sumcheck/sumcheck.test.cpp index f8ae29e46e3..963a271201f 100644 --- a/barretenberg/cpp/src/barretenberg/honk/sumcheck/sumcheck.test.cpp +++ b/barretenberg/cpp/src/barretenberg/honk/sumcheck/sumcheck.test.cpp @@ -143,6 +143,16 @@ TEST_F(SumcheckTests, PolynomialNormalization) l_6 * full_polynomials[i][6] + l_7 * full_polynomials[i][7]; EXPECT_EQ(hand_computed_value, sumcheck.partially_evaluated_polynomials[i][0]); } + + // We can also check the correctness of the multilinear evaluations produced by Sumcheck by directly evaluating the + // full polynomials at challenge u via the evaluate_mle() function + std::vector u_challenge = { u_0, u_1, u_2 }; + for (size_t i = 0; i < NUM_POLYNOMIALS; i++) { + barretenberg::Polynomial poly(full_polynomials[i]); + auto v_expected = poly.evaluate_mle(u_challenge); + auto v_result = output.claimed_evaluations[i]; + EXPECT_EQ(v_expected, v_result); + } } TEST_F(SumcheckTests, Prover) diff --git a/barretenberg/cpp/src/barretenberg/join_split_example/proofs/compute_circuit_data.hpp b/barretenberg/cpp/src/barretenberg/join_split_example/proofs/compute_circuit_data.hpp index 40f39d24e4e..cdf0dbeb171 100644 --- a/barretenberg/cpp/src/barretenberg/join_split_example/proofs/compute_circuit_data.hpp +++ b/barretenberg/cpp/src/barretenberg/join_split_example/proofs/compute_circuit_data.hpp @@ -56,9 +56,9 @@ circuit_data get_circuit_data(std::string const& name, circuit_data data; data.srs = srs; data.mock = mock; - Composer composer(srs); + Composer composer; Builder builder; - Composer mock_proof_composer(srs); + Composer mock_proof_composer; Builder mock_builder; BenchmarkInfoCollator benchmark_collator; diff --git a/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/c_bind.cpp b/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/c_bind.cpp deleted file mode 100644 index fcddcbfdb94..00000000000 --- a/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/c_bind.cpp +++ /dev/null @@ -1,98 +0,0 @@ -#include -#include - -#include "../mock/mock_circuit.hpp" -#include "barretenberg/common/container.hpp" -#include "barretenberg/common/mem.hpp" -#include "barretenberg/common/streams.hpp" -#include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" -#include "barretenberg/join_split_example/types.hpp" -#include "barretenberg/plonk/proof_system/proving_key/serialize.hpp" -#include "barretenberg/srs/global_crs.hpp" -#include "c_bind.h" -#include "compute_signing_data.hpp" -#include "join_split.hpp" - -using namespace barretenberg; -using namespace join_split_example::proofs::join_split; - -WASM_EXPORT void join_split__init_proving_key(bool mock) -{ - init_proving_key(barretenberg::srs::get_crs_factory(), mock); -} - -// WASM_EXPORT void join_split__init_proving_key_from_buffer(uint8_t const* pk_buf) -// { -// std::shared_ptr crs; -// plonk::proving_key_data pk_data; -// read(pk_buf, pk_data); -// init_proving_key(crs, std::move(pk_data)); -// } - -WASM_EXPORT void join_split__release_key() -{ - release_proving_key(); -} - -WASM_EXPORT uint32_t join_split__get_new_proving_key_data(uint8_t** output) -{ - // Computing the size of the serialized key is non trivial. We know it's ~331mb. - // Allocate a buffer large enough to hold it, and abort if we overflow. - // This is to keep memory usage down. - - auto proving_key = get_proving_key(); - auto buffer = to_buffer(*proving_key); - auto raw_buf = (uint8_t*)malloc(buffer.size()); - memcpy(raw_buf, (void*)buffer.data(), buffer.size()); - *output = raw_buf; - - return static_cast(buffer.size()); -} - -WASM_EXPORT void join_split__init_verification_key(void* /*unused*/, uint8_t const* /*unused*/) -{ - init_verification_key(barretenberg::srs::get_crs_factory()); -} - -// WASM_EXPORT void join_split__init_verification_key_from_buffer(uint8_t const* vk_buf, uint8_t const* g2x) -// { -// auto crs = std::make_shared(g2x); -// plonk::verification_key_data vk_data; -// read(vk_buf, vk_data); -// init_verification_key(crs, std::move(vk_data)); -// } - -WASM_EXPORT uint32_t join_split__get_new_verification_key_data(uint8_t** output) -{ - auto buffer = to_buffer(*get_verification_key()); - auto raw_buf = (uint8_t*)malloc(buffer.size()); - memcpy(raw_buf, (void*)buffer.data(), buffer.size()); - *output = raw_buf; - return static_cast(buffer.size()); -} - -WASM_EXPORT void join_split__compute_signing_data(uint8_t const* join_split_tx_buf, uint8_t* output) -{ - auto tx = from_buffer(join_split_tx_buf); - auto signing_data = compute_signing_data(tx); - barretenberg::fr::serialize_to_buffer(signing_data, output); -} - -WASM_EXPORT void* join_split__new_prover(uint8_t const* join_split_buf, bool mock) -{ - auto tx = from_buffer(join_split_buf); - auto prover = new_join_split_prover(tx, mock); - auto heapProver = new join_split_example::Prover(std::move(prover)); - return heapProver; -} - -WASM_EXPORT void join_split__delete_prover(void* prover) -{ - delete reinterpret_cast(prover); -} - -WASM_EXPORT bool join_split__verify_proof(uint8_t* proof, uint32_t length) -{ - plonk::proof pp = { std::vector(proof, proof + length) }; - return verify_proof(pp); -} diff --git a/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/c_bind.h b/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/c_bind.h deleted file mode 100644 index cd7390b1dad..00000000000 --- a/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/c_bind.h +++ /dev/null @@ -1,3 +0,0 @@ -#include - -WASM_EXPORT uint32_t join_split__get_new_proving_key_data(uint8_t** output); diff --git a/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/join_split.cpp b/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/join_split.cpp index 1da1caee45f..c4c52a1be38 100644 --- a/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/join_split.cpp +++ b/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/join_split.cpp @@ -14,8 +14,7 @@ using namespace proof_system::plonk::stdlib::merkle_tree; static std::shared_ptr proving_key; static std::shared_ptr verification_key; -void init_proving_key(std::shared_ptr> const& crs_factory, - bool mock) +void init_proving_key(bool mock) { if (proving_key) { return; @@ -27,12 +26,12 @@ void init_proving_key(std::shared_ptr> const& crs_factory) +void init_verification_key() { if (!proving_key) { std::abort(); } - // Patch the 'nothing' reference string fed to init_proving_key. - proving_key->reference_string = crs_factory->get_prover_crs(proving_key->circuit_size + 1); verification_key = - proof_system::plonk::compute_verification_key_common(proving_key, crs_factory->get_verifier_crs()); + proof_system::plonk::compute_verification_key_common(proving_key, srs::get_crs_factory()->get_verifier_crs()); } Prover new_join_split_prover(join_split_tx const& tx, bool mock) diff --git a/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/join_split.hpp b/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/join_split.hpp index 1d4a1dd5fa7..a436d99f884 100644 --- a/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/join_split.hpp +++ b/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/join_split.hpp @@ -7,12 +7,11 @@ namespace join_split_example { namespace proofs { namespace join_split { -void init_proving_key(std::shared_ptr> const& crs_factory, - bool mock); +void init_proving_key(bool mock); void release_proving_key(); -void init_verification_key(std::shared_ptr> const& crs_factory); +void init_verification_key(); Prover new_join_split_prover(join_split_tx const& tx, bool mock); diff --git a/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/join_split.test.cpp b/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/join_split.test.cpp index 8b86c4d1641..ef41bcfbf67 100644 --- a/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/join_split.test.cpp +++ b/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/join_split.test.cpp @@ -44,11 +44,10 @@ class join_split_tests : public ::testing::Test { static void SetUpTestCase() { barretenberg::srs::init_crs_factory("../srs_db/ignition"); - auto null_crs_factory = std::make_shared>(); - init_proving_key(null_crs_factory, false); + init_proving_key(false); auto crs_factory = std::make_unique>("../srs_db/ignition"); - init_verification_key(std::move(crs_factory)); + init_verification_key(); info("vk hash: ", get_verification_key()->sha256_hash()); } diff --git a/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/join_split_js_parity.test.cpp b/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/join_split_js_parity.test.cpp index f23bceef85f..ba601d93447 100644 --- a/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/join_split_js_parity.test.cpp +++ b/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/join_split_js_parity.test.cpp @@ -25,11 +25,9 @@ class join_split_js_parity_tests : public ::testing::Test { protected: static void SetUpTestCase() { - auto null_crs_factory = std::make_shared>(); - init_proving_key(null_crs_factory, false); - auto crs_factory = - std::make_unique>("../srs_db/ignition"); - init_verification_key(std::move(crs_factory)); + srs::init_crs_factory("../srs_db/ignition"); + init_proving_key(false); + init_verification_key(); info("vk hash: ", get_verification_key()->sha256_hash()); } diff --git a/barretenberg/cpp/src/barretenberg/plonk/composer/ultra_composer.cpp b/barretenberg/cpp/src/barretenberg/plonk/composer/ultra_composer.cpp index 7b0ded945c8..1026193bdf7 100644 --- a/barretenberg/cpp/src/barretenberg/plonk/composer/ultra_composer.cpp +++ b/barretenberg/cpp/src/barretenberg/plonk/composer/ultra_composer.cpp @@ -375,10 +375,11 @@ std::shared_ptr UltraComposer::compute_proving_key(CircuitBuilder& const size_t minimum_circuit_size = tables_size + lookups_size; const size_t num_randomized_gates = NUM_RESERVED_GATES; + auto crs_factory = srs::get_crs_factory(); // Initialize circuit_proving_key // TODO(#392)(Kesha): replace composer types. circuit_proving_key = initialize_proving_key( - circuit_constructor, crs_factory_.get(), minimum_circuit_size, num_randomized_gates, CircuitType::ULTRA); + circuit_constructor, crs_factory.get(), minimum_circuit_size, num_randomized_gates, CircuitType::ULTRA); construct_selector_polynomials(circuit_constructor, circuit_proving_key.get()); @@ -491,10 +492,12 @@ std::shared_ptr UltraComposer::compute_verification_key return circuit_verification_key; } + auto crs_factory = srs::get_crs_factory(); + if (!circuit_proving_key) { compute_proving_key(circuit_constructor); } - circuit_verification_key = compute_verification_key_common(circuit_proving_key, crs_factory_->get_verifier_crs()); + circuit_verification_key = compute_verification_key_common(circuit_proving_key, crs_factory->get_verifier_crs()); circuit_verification_key->circuit_type = CircuitType::ULTRA; diff --git a/barretenberg/cpp/src/barretenberg/plonk/composer/ultra_composer.hpp b/barretenberg/cpp/src/barretenberg/plonk/composer/ultra_composer.hpp index fcea9028d83..0d365a6c0c4 100644 --- a/barretenberg/cpp/src/barretenberg/plonk/composer/ultra_composer.hpp +++ b/barretenberg/cpp/src/barretenberg/plonk/composer/ultra_composer.hpp @@ -27,7 +27,6 @@ class UltraComposer { std::shared_ptr circuit_verification_key; // The crs_factory holds the path to the srs and exposes methods to extract the srs elements - std::shared_ptr> crs_factory_; bool computed_witness = false; @@ -37,11 +36,7 @@ class UltraComposer { // vanishing_polynomial cannot be trivially fetched here, I am directly setting this to 4 - 1 = 3. static constexpr size_t s_randomness = 3; - UltraComposer() { crs_factory_ = barretenberg::srs::get_crs_factory(); } - - explicit UltraComposer(std::shared_ptr> crs_factory) - : crs_factory_(std::move(crs_factory)) - {} + UltraComposer() = default; UltraComposer(std::shared_ptr p_key, std::shared_ptr v_key) : circuit_proving_key(std::move(p_key)) diff --git a/barretenberg/cpp/src/barretenberg/polynomials/polynomial.cpp b/barretenberg/cpp/src/barretenberg/polynomials/polynomial.cpp index c898d9d8a6f..31f5496e2bc 100644 --- a/barretenberg/cpp/src/barretenberg/polynomials/polynomial.cpp +++ b/barretenberg/cpp/src/barretenberg/polynomials/polynomial.cpp @@ -3,6 +3,7 @@ #include "barretenberg/common/slab_allocator.hpp" #include "barretenberg/common/thread.hpp" #include "barretenberg/common/thread_utils.hpp" +#include "barretenberg/numeric/bitop/pow.hpp" #include "polynomial_arithmetic.hpp" #include #include @@ -417,6 +418,49 @@ template Fr Polynomial::evaluate_mle(std::span evalu return result; } +template Polynomial Polynomial::partial_evaluate_mle(std::span evaluation_points) const +{ + // Get size of partial evaluation point u = (u_0,...,u_{m-1}) + const size_t m = evaluation_points.size(); + + // Assert that the size of the polynomial being evaluated is a power of 2 greater than (1 << m) + ASSERT(numeric::is_power_of_two(size_)); + ASSERT(size_ >= static_cast(1 << m)); + size_t n = numeric::get_msb(size_); + + // Partial evaluation is done in m rounds l = 0,...,m-1. At the end of round l, the polynomial has been partially + // evaluated at u_{m-l-1}, ..., u_{m-1} in variables X_{n-l-1}, ..., X_{n-1}. The size of this polynomial is n_l. + size_t n_l = 1 << (n - 1); + + // Temporary buffer of half the size of the polynomial + pointer tmp_ptr = allocate_aligned_memory(sizeof(Fr) * n_l); + auto tmp = tmp_ptr.get(); + + Fr* prev = coefficients_.get(); + + // Evaluate variable X_{n-1} at u_{m-1} + Fr u_l = evaluation_points[m - 1]; + for (size_t i = 0; i < n_l; ++i) { + tmp[i] = prev[i] + u_l * (prev[i + n_l] - prev[i]); + } + // Evaluate m-1 variables X_{n-l-1}, ..., X_{n-2} at m-1 remaining values u_0,...,u_{m-2}) + for (size_t l = 1; l < m; ++l) { + n_l = 1 << (n - l - 1); + u_l = evaluation_points[m - l - 1]; + for (size_t i = 0; i < n_l; ++i) { + tmp[i] = tmp[i] + u_l * (tmp[i + n_l] - tmp[i]); + } + } + + // Construct resulting polynomial g(X_0,…,X_{n-m-1})) = p(X_0,…,X_{n-m-1},u_0,...u_{m-1}) from buffer + auto result = Polynomial(n_l); + for (size_t idx = 0; idx < n_l; ++idx) { + result[idx] = tmp[idx]; + } + + return result; +} + template typename Polynomial::pointer Polynomial::allocate_aligned_memory(const size_t size) const { return std::static_pointer_cast(get_mem_slab(size)); diff --git a/barretenberg/cpp/src/barretenberg/polynomials/polynomial.hpp b/barretenberg/cpp/src/barretenberg/polynomials/polynomial.hpp index d22f47c5f73..b0ebaae33f6 100644 --- a/barretenberg/cpp/src/barretenberg/polynomials/polynomial.hpp +++ b/barretenberg/cpp/src/barretenberg/polynomials/polynomial.hpp @@ -202,6 +202,25 @@ template class Polynomial { */ Fr evaluate_mle(std::span evaluation_points, bool shift = false) const; + /** + * @brief Partially evaluates in the last k variables a polynomial interpreted as a multilinear extension. + * + * @details Partially evaluates p(X) = (a_0, ..., a_{2^n-1}) considered as multilinear extension p(X_0,…,X_{n-1}) = + * \sum_i a_i*L_i(X_0,…,X_{n-1}) at u = (u_0,…,u_{m-1}), m < n, in the last m variables X_n-m,…,X_{n-1}. The result + * is a multilinear polynomial in n-m variables g(X_0,…,X_{n-m-1})) = p(X_0,…,X_{n-m-1},u_0,...u_{m-1}). + * + * @note Intuitively, partially evaluating in one variable collapses the hypercube in one dimension, halving the + * number of coefficients needed to represent the result. To partially evaluate starting with the first variable (as + * is done in evaluate_mle), the vector of coefficents is halved by combining adjacent rows in a pairwise + * fashion (similar to what is done in Sumcheck via "edges"). To evaluate starting from the last variable, we + * instead bisect the whole vector and combine the two halves. I.e. rather than coefficents being combined with + * their immediate neighbor, they are combined with the coefficient that lives n/2 indices away. + * + * @param evaluation_points an MLE partial evaluation point u = (u_0,…,u_{m-1}) + * @return Polynomial g(X_0,…,X_{n-m-1})) = p(X_0,…,X_{n-m-1},u_0,...u_{m-1}) + */ + Polynomial partial_evaluate_mle(std::span evaluation_points) const; + /** * @brief Divides p(X) by (X-r₁)⋯(X−rₘ) in-place. * Assumes that p(rⱼ)=0 for all j diff --git a/barretenberg/cpp/src/barretenberg/polynomials/polynomial_arithmetic.test.cpp b/barretenberg/cpp/src/barretenberg/polynomials/polynomial_arithmetic.test.cpp index e462c1d2ca1..b0a57e135d8 100644 --- a/barretenberg/cpp/src/barretenberg/polynomials/polynomial_arithmetic.test.cpp +++ b/barretenberg/cpp/src/barretenberg/polynomials/polynomial_arithmetic.test.cpp @@ -1095,6 +1095,43 @@ TYPED_TEST(PolynomialTests, evaluate_mle) test_case(2); } +/** + * @brief Test the function for partially evaluating MLE polynomials + * + */ +TYPED_TEST(PolynomialTests, partial_evaluate_mle) +{ + // Initialize a random polynomial + using FF = TypeParam; + size_t N = 32; + Polynomial poly(N); + for (auto& coeff : poly) { + coeff = FF::random_element(); + } + + // Define a random multivariate evaluation point u = (u_0, u_1, u_2, u_3, u_4) + auto u_0 = FF::random_element(); + auto u_1 = FF::random_element(); + auto u_2 = FF::random_element(); + auto u_3 = FF::random_element(); + auto u_4 = FF::random_element(); + std::vector u_challenge = { u_0, u_1, u_2, u_3, u_4 }; + + // Show that directly computing v = p(u_0,...,u_4) yields the same result as first computing the partial evaluation + // in the last 3 variables g(X_0,X_1) = p(X_0,X_1,u_2,u_3,u_4), then v = g(u_0,u_1) + + // Compute v = p(u_0,...,u_4) + auto v_expected = poly.evaluate_mle(u_challenge); + + // Compute g(X_0,X_1) = p(X_0,X_1,u_2,u_3,u_4), then v = g(u_0,u_1) + std::vector u_part_1 = { u_0, u_1 }; + std::vector u_part_2 = { u_2, u_3, u_4 }; + auto partial_evaluated_poly = poly.partial_evaluate_mle(u_part_2); + auto v_result = partial_evaluated_poly.evaluate_mle(u_part_1); + + EXPECT_EQ(v_result, v_expected); +} + TYPED_TEST(PolynomialTests, factor_roots) { using FF = TypeParam; diff --git a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_ultra_circuit_builder.cpp b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_ultra_circuit_builder.cpp index 58da2674ec5..da24f823887 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_ultra_circuit_builder.cpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_ultra_circuit_builder.cpp @@ -10,9 +10,6 @@ namespace proof_system { template void GoblinUltraCircuitBuilder_::finalize_circuit() { UltraCircuitBuilder_::finalize_circuit(); - - // Set internally the current and previous size of the aggregate op queue transcript - op_queue->set_size_data(); } /** diff --git a/barretenberg/cpp/src/barretenberg/stdlib/recursion/honk/verifier/ultra_recursive_verifier.cpp b/barretenberg/cpp/src/barretenberg/stdlib/recursion/honk/verifier/ultra_recursive_verifier.cpp index 4b97dd1c1ab..ac66bc3cf77 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/recursion/honk/verifier/ultra_recursive_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/recursion/honk/verifier/ultra_recursive_verifier.cpp @@ -30,7 +30,6 @@ std::array UltraRecursiveVerifier_::ve using VerifierCommitments = typename Flavor::VerifierCommitments; using CommitmentLabels = typename Flavor::CommitmentLabels; using RelationParams = ::proof_system::RelationParameters; - using UnivariateClaim = ::proof_system::honk::pcs::OpeningClaim; RelationParams relation_parameters; @@ -184,56 +183,6 @@ std::array UltraRecursiveVerifier_::ve ")"); prev_num_gates = builder->get_num_gates(); - // Perform ECC op queue transcript aggregation protocol - if constexpr (IsGoblinFlavor) { - // Receive commitments [t_i^{shift}], [T_{i-1}], and [T_i] - std::array prev_agg_op_queue_commitments; - std::array shifted_op_wire_commitments; - std::array agg_op_queue_commitments; - for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { - std::string suffix = std::to_string(idx + 1); - prev_agg_op_queue_commitments[idx] = - transcript.template receive_from_prover("PREV_AGG_OP_QUEUE_" + suffix); - shifted_op_wire_commitments[idx] = - transcript.template receive_from_prover("SHIFTED_OP_WIRE_" + suffix); - agg_op_queue_commitments[idx] = - transcript.template receive_from_prover("AGG_OP_QUEUE_" + suffix); - } - - // Receive claimed evaluations of t_i^{shift}, T_{i-1}, and T_i - FF kappa = transcript.get_challenge("kappa"); - std::array prev_agg_op_queue_evals; - std::array shifted_op_wire_evals; - std::array agg_op_queue_evals; - for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { - std::string suffix = std::to_string(idx + 1); - prev_agg_op_queue_evals[idx] = - transcript.template receive_from_prover("prev_agg_op_queue_eval_" + suffix); - shifted_op_wire_evals[idx] = transcript.template receive_from_prover("op_wire_eval_" + suffix); - agg_op_queue_evals[idx] = transcript.template receive_from_prover("agg_op_queue_eval_" + suffix); - - ASSERT(agg_op_queue_evals[idx].get_value() == - prev_agg_op_queue_evals[idx].get_value() + shifted_op_wire_evals[idx].get_value()); - - // Check the identity T_i(\kappa) = T_{i-1}(\kappa) + t_i^{shift}(\kappa). - agg_op_queue_evals[idx].assert_equal(prev_agg_op_queue_evals[idx] + shifted_op_wire_evals[idx]); - } - - // Add corresponding univariate opening claims {(\kappa, p(\kappa), [p(X)]} - for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { - univariate_opening_claims.emplace_back( - UnivariateClaim{ { kappa, prev_agg_op_queue_evals[idx] }, prev_agg_op_queue_commitments[idx] }); - } - for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { - univariate_opening_claims.emplace_back( - UnivariateClaim{ { kappa, shifted_op_wire_evals[idx] }, shifted_op_wire_commitments[idx] }); - } - for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { - univariate_opening_claims.emplace_back( - UnivariateClaim{ { kappa, agg_op_queue_evals[idx] }, agg_op_queue_commitments[idx] }); - } - } - // Produce a Shplonk claim: commitment [Q] - [Q_z], evaluation zero (at random challenge z) auto shplonk_claim = Shplonk::reduce_verification(pcs_verification_key, univariate_opening_claims, transcript); diff --git a/barretenberg/ts/CHANGELOG.md b/barretenberg/ts/CHANGELOG.md index e5b4df84298..30f69c98690 100644 --- a/barretenberg/ts/CHANGELOG.md +++ b/barretenberg/ts/CHANGELOG.md @@ -1,5 +1,12 @@ # Changelog +## [0.8.9](https://github.com/AztecProtocol/aztec-packages/compare/barretenberg.js-v0.8.8...barretenberg.js-v0.8.9) (2023-10-10) + + +### Miscellaneous + +* **barretenberg.js:** Synchronize aztec-packages versions + ## [0.8.8](https://github.com/AztecProtocol/aztec-packages/compare/barretenberg.js-v0.8.7...barretenberg.js-v0.8.8) (2023-10-09) diff --git a/barretenberg/ts/package.json b/barretenberg/ts/package.json index 2d479626552..f7b2456e90c 100644 --- a/barretenberg/ts/package.json +++ b/barretenberg/ts/package.json @@ -1,6 +1,6 @@ { "name": "@aztec/bb.js", - "version": "0.8.8", + "version": "0.8.9", "homepage": "https://github.com/AztecProtocol/aztec-packages/tree/master/barretenberg/ts", "license": "MIT", "type": "module", diff --git a/barretenberg/ts/src/index.ts b/barretenberg/ts/src/index.ts index 973998ba4c4..0998072f1de 100644 --- a/barretenberg/ts/src/index.ts +++ b/barretenberg/ts/src/index.ts @@ -1,3 +1,3 @@ export { Crs } from './crs/index.js'; export { Barretenberg } from './barretenberg/index.js'; -export { RawBuffer } from './types/index.js'; +export { RawBuffer, Fr } from './types/index.js'; diff --git a/build-system/.gitrepo b/build-system/.gitrepo index 68330138711..5c9627c47aa 100644 --- a/build-system/.gitrepo +++ b/build-system/.gitrepo @@ -6,7 +6,7 @@ [subrepo] remote = https://github.com/AztecProtocol/build-system branch = master - commit = 31a7d280abf01ffc86de9480536a191a8c3f6602 - parent = 171a5508b262ed60397da90bf4092a6122b8bbed + commit = 2818c655048bc9ce18b36eca693dd0b467724eaf + parent = b8199802bad3c05ebe4d1ded5338a09a04e0ed7e method = merge cmdver = 0.4.6 diff --git a/docs/.gitrepo b/docs/.gitrepo index 4ed0eea54de..486b0945adc 100644 --- a/docs/.gitrepo +++ b/docs/.gitrepo @@ -6,7 +6,7 @@ [subrepo] remote = https://github.com/AztecProtocol/docs branch = main - commit = 831393342a31e570c60e6b01e95265decf030746 - parent = 171a5508b262ed60397da90bf4092a6122b8bbed + commit = e8c36f82df8e9e048a180c801ae7bcf08058533d + parent = b8199802bad3c05ebe4d1ded5338a09a04e0ed7e method = merge cmdver = 0.4.6 diff --git a/docs/docs/about_aztec/roadmap/features_initial_ldt.md b/docs/docs/about_aztec/roadmap/features_initial_ldt.md index 34b86912cba..7b7f96d40d8 100644 --- a/docs/docs/about_aztec/roadmap/features_initial_ldt.md +++ b/docs/docs/about_aztec/roadmap/features_initial_ldt.md @@ -52,7 +52,7 @@ A typescript wrapper for making RPC calls to an Aztec Sandbox node. See the sour - Similar in purpose to `web3.js`/`ethers.js`/`viem`, but for interacting with Aztec Network nodes. The RPC interface for an Aztec node is necessarily different from that of an Ethereum node, because it deals with encrypted transactions and state variables. - A library for public/private key management. -- Construct `Contract` instances from a Aztec.nr contract's JSON ABI. +- Construct `Contract` instances from a Aztec.nr contract's JSON artifact. - Deploy new contracts to the Aztec Sandbox. - Construct tx requests, passing arguments to a function of a contract. - Sign tx requests. diff --git a/docs/docs/concepts/advanced/acir_simulator.md b/docs/docs/concepts/advanced/acir_simulator.md new file mode 100644 index 00000000000..bae95f54ff1 --- /dev/null +++ b/docs/docs/concepts/advanced/acir_simulator.md @@ -0,0 +1,35 @@ +--- +title: ACIR Simulator +--- + +The ACIR Simulator is responsible for simulation Aztec smart contract function execution. This component helps with correct execution of Aztec transactions. + +Simulating a function implies generating the partial witness and the public inputs of the function, as well as collecting all the data (such as created notes or nullifiers, or state changes) that are necessary for components upstream. + +## Simulating functions + +It simulates three types of functions: + +### Private Functions + +Private functions are simulated and proved client-side, and verified client-side in the private kernel circuit. + +They are run with the assistance of a DB oracle that provides any private data requested by the function. You can read more about oracle functions in the smart contract section [here](../../dev_docs/contracts/syntax/functions.md#oracle-functions). + +Private functions can call other private functions and can request to call a public function. The public function execution will be performed by the sequencer asynchronously, so private functions don't have direct access to the return values of public functions. + +### Public Functions + +Public functions are simulated and proved on the [sequencer](../foundation/nodes_clients/sequencer.md) side, and verified by the [public kernel circuit](./circuits/kernels/public_kernel.md). + +They are run with the assistance of an oracle that provides any value read from the public state tree. + +Public functions can call other public functions as well as private functions. Public function composability can happen atomically, but public to private function calls happen asynchronously (the private function call must happen in a future block). + +### Unconstrained Functions + +Unconstrained functions are used to extract useful data for users, such as the user balance. They are not proved, and are simulated client-side. + +They are run with the assistance of a DB oracle that provides any private data requested by the function. + +At the moment, unconstrained functions cannot call any other function. It is not possible for them to call constrained functions, but it is on the roadmap to allow them to call other unconstrained functions. diff --git a/docs/docs/concepts/advanced/contract_creation.md b/docs/docs/concepts/advanced/contract_creation.md index 9e054f5eacb..882e57f0743 100644 --- a/docs/docs/concepts/advanced/contract_creation.md +++ b/docs/docs/concepts/advanced/contract_creation.md @@ -40,7 +40,7 @@ Each contract's function data is stored in a Merkle tree, where each leaf contai ### How are function signatures defined? -We can take a leaf from Ethereum and make them the first 4 bytes of a hash of the function definition (defined according to the contract ABI, TBD). +We can take a leaf from Ethereum and make them the first 4 bytes of a hash of the function definition (defined according to the contract artifact, TBD). ## Contract Representation in Aztec @@ -284,7 +284,7 @@ The set of functions of a contract is represented as a mini Merkle tree of verif - Distributing L2 contract data - Linking to an L1 Portal Contract -These topics are reflected in the layout of the contract deployment ABI: +These topics are reflected in the layout of the contract deployment artifact: ```js publicInputs = { diff --git a/docs/docs/concepts/advanced/sequencer_selection.md b/docs/docs/concepts/advanced/sequencer_selection.md new file mode 100644 index 00000000000..6f1bdffc410 --- /dev/null +++ b/docs/docs/concepts/advanced/sequencer_selection.md @@ -0,0 +1,100 @@ +--- +title: Sequencer Selection +--- + +## Fernet + +_A protocol for random sequencer selection for the Aztec Network. Prior versions:_ + +- [Fernet 52 (Aug 2023)](https://hackmd.io/0cI_xVsaSVi7PToCJ9A2Ew?view) +- [Sequencer Selection: Fernet (Jun 2023)](https://hackmd.io/0FwyoEjKSUiHQsmowXnJPw?both) +- [Sequencer Selection: Fernet (Jun 2023, Forum)](https://discourse.aztec.network/t/proposal-sequencer-selection-fernet/533) + +## Introduction + +_Fair Election Randomized Natively on Ethereum Trustlessly_ (**Fernet**) is a protocol for random _sequencer_ selection. In each iteration, it relies on a VRF to assign a random score to each sequencer in order to rank them. The sequencer with the highest score can propose an ordering for transactions and the block they build upon, and then reveal its contents for the chain to advance under soft finality. _Provers_ must then assemble a proof for this block and submit it to L1 for the block to be finalised. + +## Staking + +Sequencers are required to stake on L1 in order to participate in the protocol. Each sequencer registers a public key when they stake, which will be used to verify their VRF submission. After staking, a sequencer needs to wait for an activation period of N L1 blocks until they can start proposing new blocks. Unstaking also requires a delay to allow for slashing of dishonest behaviour. + +## Randomness + +We use a verifiable random function to rank each sequencer. We propose a SNARK of a hash over the sequencer private key and a public input, borrowing [this proposal from the Espresso team](https://discourse.aztec.network/t/proposal-sequencer-selection-irish-coffee/483#vrf-specification-4). The public input is the current block number and a random beacon value from RANDAO. The value sourced from RANDAO should be old enough to prevent L1 reorgs from affecting sequencer selection on L2. This approach allows each individual proposer to secretly calculate the likelihood of being elected for a given block with enough anticipation. + +Alternatively, we can compute the VRF over the _public_ key of each sequencer. This opens the door to DoS attacks, since the leader for each block becomes public in advance, but it also provides clarity to all sequencers as to who the expected leader is, and facilitates off-protocol PBS. + +## Protocol phases + +Each block goes through three main phases in L1: proposal, reveal, and proving. Transactions can achieve soft finality at the end of the reveal phase. + +![](https://hackmd.io/_uploads/SyReMn1An.png) + +### Proposal phase + +During the initial proposal phase, proposers submit to L1 a **block commitment**, which includes a commitment to the transaction ordering in the proposed block, the previous block being built upon, and any additional metadata required by the protocol. + +**Block commitment contents:** + +- Hash of the ordered list of transaction identifiers for the block (with an optional salt). +- Identifier of the previous block in the chain. +- The output of the VRF for this sequencer. + +At the end of the proposal phase, the sequencer with the highest score submitted becomes the leader for this cycle, and has exclusive rights to deciding the contents of the block. Note that this plays nicely with private mempools, since having exclusive rights allows the leader to disclose private transaction data in the reveal phase. + +> _In the original version of Fernet, multiple competing proposals could enter the proving phase. Read more about the rationale for this change [here](https://hackmd.io/0cI_xVsaSVi7PToCJ9A2Ew?both#Mitigation-Elect-single-leader-after-proposal-phase)._ + +### Reveal phase + +The sequencer with the highest score in the proposal phase must then upload the block contents to either L1 or a verifiable DA layer. This guarantees that the next sequencer will have all data available to start building the next block, and clients will have the updated state to create new txs upon. It should be safe to assume that, in the happy path, this block would be proven and become final, so this provides early soft finality to transactions in the L2. + +> _This phase is a recent addition and a detour from the original version of Fernet. Read more about the rationale for this addition [here](https://hackmd.io/0cI_xVsaSVi7PToCJ9A2Ew?both#Mitigation-Block-reveal-phase)._ + +Should the leading sequencer fail to reveal the block contents, we flag that block as skipped, and the next sequencer is expected to build from the previous one. We could consider this to be a slashing condition for the sequencer. + +![](https://hackmd.io/_uploads/B1njcnJCn.png) + +### Proving phase + +During this phase, provers work to assemble an aggregated proof of the winning block. Before the end of this phase, it is expected for the block proof to be published to L1 for verification. + +> Prover selection is still being worked on and out of scope of this sequencer selection protocol. + +Once the proof for the winning block is submitted to L1, the block becomes final, assuming its parent block in the chain is also final. This triggers payouts to sequencer and provers (if applicable depending on the proving network design). + +**Canonical block selection:** + +- Has been proven during the proving phase. +- Its contents have been submitted to the DA layer in the reveal phase. +- It had the highest score on the proposal phase. +- Its referenced previous block is also canonical. + +## Next block + +The cycle for block N+1 (ie from the start of the proposal phase until the end of the proving phase) can start at the end of block N reveal phase, where the network has all data available on L1 or a DA to construct the next block. + +![](https://hackmd.io/_uploads/SJbPKJe0n.png) + +The only way to trigger an L2 reorg (without an L1 one) is if block N is revealed but doesn't get proven. In this case, all subsequent blocks become invalidated and the chain needs to restart from block N-1. + +![](https://hackmd.io/_uploads/HkMDHxxC2.png) + +To mitigate the effect of wasted effort by all sequencers from block N+1 until the reorg, we could implement uncle rewards for these sequencers. And if we are comfortable with slashing, take those rewards out of the pocket of the sequencer that failed to finalise their block. + +## Batching + +> _Read more approaches to batching [here](https://hackmd.io/0cI_xVsaSVi7PToCJ9A2Ew?both#Batching)._ + +As an extension to the protocol, we can bake in batching of multiple blocks. Rather than creating one proof per block, we can aggregate multiple blocks into a single proof, in order to amortise the cost of verifying the root rollup ZKP on L1, thus reducing fees. + +The tradeoff in batching is delayed finalisation: if we are not posting proofs to L1 for every block, then the network needs to wait until the batch proof is submitted for finalisation. This can also lead to deeper L2 reorgs. + +In a batching model, proving for each block happens immediately as the block is revealed, same as usual. But the resulting proof is not submitted to L1: instead, it is aggregated into the proof of the next block. + +![](https://hackmd.io/_uploads/H1Y61ABJT.png) + +Here all individual block proofs are valid as candidates to finalise the current batch. This opens the door to dynamic batch sizes, so the proof could be verified on L1 when it's economically convenient. + +## Resources + +- [Excalidraw diagrams](https://excalidraw.com/#json=DZcYDUKVImApNjj17KhAf,fMbieqJpOysX9obVitUDEA) diff --git a/docs/docs/concepts/foundation/accounts/authwit.md b/docs/docs/concepts/foundation/accounts/authwit.md new file mode 100644 index 00000000000..bb0e5ca367f --- /dev/null +++ b/docs/docs/concepts/foundation/accounts/authwit.md @@ -0,0 +1,199 @@ +--- +title: Authentication Witness +--- + +Authentication Witness is a scheme for authenticating actions on Aztec, so users can allow third-parties (eg protocols or other users) to execute an action on their behalf. + +## Background + +When building DeFi or other smart contracts, it is often desired to interact with other contracts to execute some action on behalf of the user. For example, when you want to deposit funds into a lending protocol, the protocol wants to perform a transfer of [ERC20](https://eips.ethereum.org/EIPS/eip-20) tokens from the user's account to the protocol's account. + +In the EVM world, this is often accomplished by having the user `approve` the protocol to transfer funds from their account, and then calling a `deposit` function on it afterwards. + +```mermaid +sequenceDiagram + actor Alice + Alice->>Token: approve(Defi, 1000); + Alice->>Defi: deposit(Token, 1000); + activate Defi + Defi->>Token: transferFrom(Alice, Defi, 1000); + deactivate Defi +``` + +This flow makes it rather simple for the application developer to implement the deposit function, but does not come without its downsides. + +One main downside, which births a bunch of other issues, is that the user needs to send two transactions to make the deposit - first the `approve` and then the `deposit`. + +To limit the annoyance for return-users, some front-ends will use the `approve` function with an infinite amount, which means that the user will only has to sign the `approve` transaction once, and every future `deposit` with then use some of that "allowance" to transfer funds from the user's account to the protocol's account. + +This can lead to a series of issues though, eg: + +- The user is not aware of how much they have allowed the protocol to transfer. +- The protocol can transfer funds from the user's account at any time. This means that if the protocol is rugged or exploited, it can transfer funds from the user's account without the user having to sign any transaction. This is especially an issue if the protocol is upgradable, as it could be made to steal the user's approved funds at any time in the future. + +To avoid this, many protocols implement the `permit` flow, which uses a meta-transaction to let the user sign the approval off-chain, and pass it as an input to the `deposit` function, that way the user only have to send one transaction to make the deposit. + +```mermaid +sequenceDiagram + actor Alice + Alice->>Alice: sign permit(Defi, 1000); + Alice->>Defi: deposit(Token, 1000, signature); + activate Defi + Defi->>Token: permit(Alice, Defi, 1000, signature); + Defi->>Token: transferFrom(Alice, Defi, 1000); + deactivate Defi +``` + +This is a great improvement to infinite approvals, but still has its own sets of issues. For example, if the user is using a smart-contract wallet (such as Argent or Gnosis Safe), they will not be able to sign the permit message since the usual signature validation does not work well with contracts. [EIP-1271](https://eips.ethereum.org/EIPS/eip-1271) was proposed to give contracts a way to emulate this, but it is not widely adopted. + +Separately, the message that the user signs can seem opaque to the user and they might not understand what they are signing. This is generally an issue with `approve` as well. + +All of these issues have been discussed in the community for a while, and there are many proposals to solve them. However, none of them have been widely adopted - ERC20 is so commonly used and changing a standard is hard. + +## In Aztec + +Adopting ERC20 for Aztec is not as simple as it might seem because of private state. + +If you recall from [State model](./../state_model.md), private state is generally only known by its owner and those they have shared it with. Because it relies on secrets, private state might be "owned" by a contract, but it needs someone with knowledge of these secrets to actually spend it. You might see where this is going. + +If we were to implement the `approve` with an allowance in private, you might know the allowance, but unless you also know about the individual notes that make up the user's balances, it would be of no use to you! It is private after all. To spend the user's funds you would need to know the decryption key, see [keys for more](../accounts/keys.md). + +While this might sound limiting in what we can actually do, the main use of approvals have been for simplifying contract interactions that the user is doing. In the case of private transactions, this is executed on the user device, so it is not a blocker that the user need to tell the executor a secret - the user is the executor! +### So what can we do? + +A few more things we need to remember about private execution: + +- To stay private, it all happens on the user device. +- Because it happens on the user device, additional user-provided information can be passed to the contract mid-execution via an oracle call. + +For example, when executing a private transfer, the wallet will be providing the notes that the user wants to transfer through one of these oracle calls instead of the function arguments. This allows us to keep the function signature simple, and have the user provide the notes they want to transfer through the oracle call. + +For a transfer, it could be the notes provided, but we could also use the oracle to provide any type of data to the contract. So we can borrow the idea from `permit` that the user can provide a signature (or witness) to the contract which allows it to perform some action on behalf of the user. + +:::info Witness or signature? +The doc refers to a witness instead of a signature because it is not necessarily a signature that is required to convince the account contract that we are allowed to perform the action. It depends on the contract implementation, and could also be a password or something similar. +::: + +Since the witness is used to authenticate that someone can execute an action on behalf of the user, we call it an Authentication Witness or `AuthWit` for short. An "action", in this meaning, is a blob of data that specifies what call is approved, what arguments it is approved with, and the actor that is authenticated to perform the call. + +In practice, this blob is currently outlined to be a hash of the content mentioned, but it might change over time to make ["simulating simulations"](https://discourse.aztec.network/t/simulating-simulations/2218) easier. + +Outlined more clearly, we have the following, where the `H` is a SNARK-friendly hash function and `argsHash` is the hash of function arguments: + +```rust +authentication_witness_action = H( + caller: AztecAddress, + contract: AztecAddress, + selector: Field, + argsHash: Field +); +``` + +To outline an example as mentioned earlier, let's say that we have a token that implements `AuthWit` such that transfer funds from A to B is valid if A is doing the transfer, or there is a witness that authenticates the caller to transfer funds from A's account. While this specifies the spending rules, one must also know of the notes to use them for anything. This means that a witness in itself is only half the information. + +Creating the authentication action for the transfer of funds to the Defi contract would look like this: + +```rust +action = H(defi, token, transfer_selector, H(alice_account, defi, 1000)); +``` + +This can be read as "defi is allowed to call token transfer function with the arguments (alice_account, defi, 1000)". + +With this out of the way, let's look at how this would work in the graph below. The exact contents of the witness will differ between implementations as mentioned before, but for the sake of simplicity you can think of it as a signature, which the account contract can then use to validate if it really should allow the action. + +```mermaid +sequenceDiagram + actor Alice + participant AC as Alice Account + participant Token + Alice->>AC: Defi.deposit(Token, 1000); + activate AC + AC->>Defi: deposit(Token, 1000); + activate Defi + Defi->>Token: transfer(Alice, Defi, 1000); + activate Token + Token->>AC: Check if Defi may call transfer(Alice, Defi, 1000); + AC-->>Alice: Please give me AuthWit for DeFi
calling transfer(Alice, Defi, 1000); + activate Alice + Alice-->>Alice: Produces Authentication witness + Alice-->>AC: AuthWit for transfer(Alice, Defi, 1000); + AC->>Token: AuthWit validity + deactivate Alice + Token->>Token: throw if invalid AuthWit + Token->>Token: transfer(Alice, Defi, 1000); + Token->>Defi: success + deactivate Token + Defi->>Defi: deposit(Token, 1000); + deactivate Defi + deactivate AC +``` + +:::info Static call for AuthWit checks +The call to the account contract for checking authentication should be a static call, meaning that it cannot change state or make calls that change state. If this call is not static, it could be used to re-enter the flow and change the state of the contract. +::: + +:::danger Static call currently unsupported +The current execution layer does not implement static call. So currently you will be passing along the control flow :grimacing:. +::: + +:::danger Re-entries +The above flow could be re-entered at token transfer. It is mainly for show to illustrate a logic outline. +::: + +### What about public + +As noted earlier, we could use the ERC20 standard for public. But this seems like a waste when we have the ability to try righting some wrongs. Instead, we can expand our AuthWit scheme to also work in public. This is actually quite simple, instead of asking an oracle (which we can't do as easily because not private execution) we can just store the AuthWit in the account contract, and look it up when we need it. While this needs the storage to be updated ahead of time, we can quite easily do so by batching the AuthWit updates with the interaction - a benefit of Account Contracts. + +```mermaid +sequenceDiagram + actor Alice + participant AC as Alice Account + participant Token + rect rgb(191, 223, 255) + note right of Alice: Alice sends a batch + Alice->>AC: Allow Defi to call transfer(Alice, Defi, 1000); + activate AC + Alice->>AC: Defi.deposit(Token, 1000); + end + AC->>Defi: deposit(Token, 1000); + activate Defi + Defi->>Token: transfer(Alice, Defi, 1000); + activate Token + Token->>AC: Check if Defi may call transfer(Alice, Defi, 1000); + AC->>Token: AuthWit validity + Token->>Token: throw if invalid AuthWit + Token->>Token: transfer(Alice, Defi, 1000); + Token->>Defi: success + deactivate Token + Defi->>Defi: deposit(Token, 1000); + deactivate Defi + deactivate AC +``` + +### Replays + +To ensure that the authentication witness can only be used once, we can emit the action itself as a nullifier. This way, the authentication witness can only be used once. This is similar to how notes are used, and we can use the same nullifier scheme for this. + +Note however, that it means that the same action cannot be authenticated twice, so if you want to allow the same action to be authenticated multiple times, we should include a nonce in the arguments, such that the action is different each time. + +For the transfer, this could be done simply by appending a nonce to the arguments. + +```rust +action = H(defi, token, transfer_selector, H(alice_account, defi, 1000, nonce)); +``` + +Beware that since the the account contract will be unable to emit the nullifier since it is checked with a static call, so the calling contract must do it. This is similar to nonces in ERC20 tokens today. We provide a small library that handles this which we will see in the [developer documentation](./../../../dev_docs/contracts/resources/common_patterns/authwit.md). + +### Differences to approval + +The main difference is that we are not setting up an allowance, but allowing the execution of a specific action. We decided on this option as the default since it is more explicit and the user can agree exactly what they are signing. + +Also, most uses of the approvals are for contracts where the following interactions are called by the user themselves, so it is not a big issue that they are not as easily "transferrable" as the `permit`s. + +### Other use-cases + +We don't need to limit ourselves to the `transfer` function, we can use the same scheme for any function that requires authentication. For example, for authenticating to burn or shield assets or to vote in a governance contract or perform an operation on a lending protocol. + +### Next Steps + +Check out the [developer documentation](./../../../dev_docs/contracts/resources/common_patterns/authwit.md) to see how to implement this in your own contracts. \ No newline at end of file diff --git a/docs/docs/concepts/foundation/contracts.md b/docs/docs/concepts/foundation/contracts.md index 03435fcf9eb..49321bcb9ed 100644 --- a/docs/docs/concepts/foundation/contracts.md +++ b/docs/docs/concepts/foundation/contracts.md @@ -2,10 +2,6 @@ title: Smart Contracts --- -import Disclaimer from '../../misc/common/\_disclaimer.mdx'; - - - A "smart contract" is defined as a set of public and private functions written as Noir circuits. These functions operate on public and private state stored by a contract. Each function is represented as a ZK SNARK verification key, where the contract is uniquely described by the set of its verification keys, and stored in the Aztec Contracts tree. [Noir](https://noir-lang.org) is a programming language designed for converting high-level programs into ZK circuits. Based on Rust, the goal is to present an idiomatic way of writing private smart contracts that is familiar to Ethereum developers. Noir is under active development adding features such as contracts, functions and storage variables. @@ -14,27 +10,6 @@ The end goal is a language that is intuitive to use for developers with no crypt There are no plans for EVM compatibility or to support Solidity in Aztec. The privacy-first nature of Aztec is fundamentally incompatible with the EVM architecture and Solidity's semantics. In addition, the heavy use of client-side proof construction makes this impractical. -## Enabling Transaction Semantics: The Aztec Kernel - -There are two kernel circuits in Aztec, the private kernel and the public kernel. Each circuit validates the correct execution of a particular function call. - -A transaction is built up by generating proofs for multiple recursive iterations of kernel circuits. Each call in the call stack is modelled as new iteration of the kernel circuit and are managed by a [FIFO]() queue containing pending function calls. There are two call stacks, one for private calls and one for public calls. - -One iteration of a kernel circuit will pop a call off of the stack and execute the call. If the call triggers subsequent contract calls, these are pushed onto the stack. - -Private kernel proofs are generated first. The transaction is ready to move to the next phase when the private call stack is empty. - -The public kernel circuit takes in proof of a public/private kernel circuit with an empty private call stack, and operates recursively until the public call stack is also empty. - -A transaction is considered complete when both call stacks are empty. - -The only information leaked about the transaction is: - -1. The number of private state updates triggered -2. The set of public calls generated - -The addresses of all private calls are hidden from observers. - ## Further reading Read more about writing Aztec contracts [here](../../dev_docs/contracts/main.md). diff --git a/docs/docs/concepts/foundation/main.md b/docs/docs/concepts/foundation/main.md index 2f3509e5e79..80427ba3eba 100644 --- a/docs/docs/concepts/foundation/main.md +++ b/docs/docs/concepts/foundation/main.md @@ -1,5 +1,48 @@ -import DocCardList from '@theme/DocCardList'; +--- +title: Foundational Concepts +--- -# Foundational Concepts +As a layer 2 rollup on Ethereum, the Aztec network includes components that look similar to other layer 2 networks, but since it handles private state it also includes many new components. - +On this page we will introduce the high level network architecture for Aztec with an emphasis on the concepts that are core to understanding Aztec, including: + +- [The state model](./state_model.md) +- [Accounts](./accounts/main.md) +- [Aztec Smart Contracts](./contracts.md) +- [Transactions](./transactions.md) +- [Communication between network components](./communication/main.md) + +## High level network architecture + +An overview of the Aztec network architecture will help contextualize the concepts introduced in this section. + +network architecture + +### Aztec.js + +A user of the Aztec network will interact with the network through Aztec.js. Aztec.js is a library that provides APIs for managing accounts and interacting with smart contracts (including account contracts) on the Aztec network. It communicates with the [Private eXecution Environment (PXE)](../../apis/pxe/interfaces/PXE) through a `PXE` implementation, allowing developers to easily register new accounts, deploy contracts, view functions, and send transactions. + +### Private Execution Environment + +The PXE provides a secure environment for the execution of sensitive operations, ensuring private information and decrypted data are not accessible to unauthorized applications. It hides the details of the [state model](./state_model.md) from end users, but the state model is important for Aztec developers to understand as it has implications for [private/public execution](./communication/public_private_calls.md) and [L1/L2 communication](./communication/cross_chain_calls.md). The PXE also includes the [ACIR Simulator](../advanced/acir_simulator.md) for private executions and the KeyStore for secure key management. + +Procedurally, the PXE sends results of private function execution and requests for public function executions to the [sequencer](./nodes_clients/sequencer.md), which will update the state of the rollup. + +### Sequencer + +The sequencer aggregates transactions into a block, generates proofs of the state updates (or delegates proof generate to the prover network) and posts it to the rollup contract on Ethereum, along with any required public data for data availability. + +## Further Reading + +Here are links to pages with more information about the network components mentioned above: + +- Aztec.js + - [Dapp tutorial](../../dev_docs/tutorials/writing_dapp/main.md) + - [API reference](../../apis/aztec-js) +- Private Execution Environment (PXE) + - [Dapp tutorial](../../dev_docs/tutorials/writing_dapp/pxe_service.md) + - [API reference](../../apis/pxe/index.md) +- [Private Kernel Circuit](../advanced/circuits/kernels/private_kernel.md) +- [Sequencer](./nodes_clients/sequencer.md) +- Prover Network (coming soontm) +- [Rollup Circuit](../advanced/circuits/rollup_circuits/main.md) -- a component of the rollup contract diff --git a/docs/docs/concepts/foundation/nodes_clients/sequencer.md b/docs/docs/concepts/foundation/nodes_clients/sequencer.md new file mode 100644 index 00000000000..e9a20081223 --- /dev/null +++ b/docs/docs/concepts/foundation/nodes_clients/sequencer.md @@ -0,0 +1,43 @@ +--- +title: Sequencer +--- + +The sequencer is a module responsible for creating and publishing new rollup blocks. This involves fetching txs from the P2P pool, ordering them, executing any public functions, running them through the rollup circuits, assembling the L2 block, and posting it to the L1 rollup contract along with any contract deployment public data. + +On every new block assembled, it modifies the world state database to reflect the txs processed, but these changes are only committed once the world state synchronizer sees the new block on L1. + +## Components + +The **block builder** is responsible for assembling an L2 block out of a set of processed transactions (we say a tx has been processed if all its function calls have been executed). This involves running the txs through the base, merge, and rollup circuits, updating the world state trees, and building the L2 block object. + +The **prover** generates proofs for every circuit used. For the time being, no proofs are being actually generated, so the only implementation is an empty one. + +The **publisher** deals with sending L1 transactions to the rollup and contract deployment emitter contracts. It is responsible for assembling the Ethereum tx, choosing reasonable gas settings, and monitoring the tx until it gets mined. Note that the current implementation does not handle unstable network conditions (gas price spikes, reorgs, etc). + +The **public processor** executes any public function calls in the transactions. Unlike private function calls, which are resolved in the client, public functions require access to the latest data trees, so they are executed by the sequencer, much like in any non-private L2. + +The **simulator** is an interface to the wasm implementations of the circuits used by the sequencer. + +The **sequencer** pulls txs from the P2P pool, orchestrates all the components above to assemble and publish a block, and updates the world state database. + +## Circuits + +What circuits does the sequencer depend on? + +The **public circuit** is responsible for proving the execution of Brillig (public function bytecode). At the moment, we are using a fake version that actually runs ACIR (intermediate representation for private functions) and does not emit any proofs. + +The **public kernel circuit** then validates the output of the public circuit, and outputs a set of changes to the world state in the same format as the private kernel circuit, meaning we get a standard representation for all txs, regardless of whether public or private functions (or both) were run. The kernel circuits are run iteratively for every recursive call in the transaction. + +The **base rollup circuit** aggregates the changes from two txs (more precisely, the outputs from their kernel circuits once all call stacks are emptied) into a single output. + +The **merge rollup circuit** aggregates two outputs from base rollup circuits into a single one. This circuit is executed recursively until only two outputs are left. This setup means that an L2 block needs to contain always a power-of-two number of txs; if there are not enough, then empty txs are added. + +The **root rollup circuit** consumes two outputs from base or merge rollups and outputs the data to assemble an L2 block. The L1 rollup contract then verifies the proof from this circuit, which implies that all txs included in it were correct. + +## Source code + +You can view the current implementation on Github [here](https://github.com/AztecProtocol/aztec-packages/tree/master/yarn-project/sequencer-client). + +## Further Reading + +- [Sequencer Selection](../../advanced/sequencer_selection.md) diff --git a/docs/docs/concepts/foundation/state_model.md b/docs/docs/concepts/foundation/state_model.md index 5041e310ca6..1ecc52700bb 100644 --- a/docs/docs/concepts/foundation/state_model.md +++ b/docs/docs/concepts/foundation/state_model.md @@ -2,9 +2,13 @@ title: State Model --- -import Disclaimer from '../../misc/common/\_disclaimer.mdx'; +Aztec has a hybrid public/private state model. Aztec contract developers can specify which data is public and which data is private, as well as the functions that can operate on that data. - +## Public State + +Aztec has public state that will be familiar to developers coming that have worked on other blockchains. Public state is transparent is managed by the associated smart contract logic. + +Internal to the Aztec network, public state is stored and updated by the sequencer. The sequencer executes state transitions, generates proofs of correct execution (or delegates proof generation to the prover network), and publishes the associated data to Ethereum. ## Private State @@ -27,9 +31,8 @@ This is achieved with two main features: 1. Users sign over transactions, not over specific UTXO's 2. Aztec.nr contracts support developer defined `unconstrained` getter functions to help dApp's make sense of UTXO's. e.g `getBalance()`. These functions can be called outside of a transaction context to read private state. -## Coming soon +## To be documented soon -- Public State - The lifecycle of a note - Custom notes - Injection of data by the kernel diff --git a/docs/docs/concepts/foundation/transactions.md b/docs/docs/concepts/foundation/transactions.md index d6cdc181661..5b5974802fa 100644 --- a/docs/docs/concepts/foundation/transactions.md +++ b/docs/docs/concepts/foundation/transactions.md @@ -2,12 +2,29 @@ title: Transactions --- -import Disclaimer from '../../misc/common/\_disclaimer.mdx'; +Sending a transaction - +See [this diagram](https://raw.githubusercontent.com/AztecProtocol/aztec-packages/2fa143e4d88b3089ebbe2a9e53645edf66157dc8/docs/static/img/sandbox_sending_a_tx.svg) for an in-depth overview of the transaction execution process. It highlights 3 different types of transaction execution: contract deployments, private transactions and public transactions. -See [here](https://miro.com/app/board/uXjVMQbDwNk=/?share_link_id=47681418582) for a gigantic diagram (readonly) showing the flow from user to L2, to L1, back to user. +See the page on [contract communication](./communication/main.md) for more context on transactions execution. -> Note: the protocol and its implementation are rapidly evolving, so some info in this diagram will be out of date. +## Enabling Transaction Semantics: The Aztec Kernel -See the page on [contract communication](./communication/main.md) for more context on transactions execute. +There are two kernel circuits in Aztec, the private kernel and the public kernel. Each circuit validates the correct execution of a particular function call. + +A transaction is built up by generating proofs for multiple recursive iterations of kernel circuits. Each call in the call stack is modelled as new iteration of the kernel circuit and are managed by a [FIFO]() queue containing pending function calls. There are two call stacks, one for private calls and one for public calls. + +One iteration of a kernel circuit will pop a call off of the stack and execute the call. If the call triggers subsequent contract calls, these are pushed onto the stack. + +Private kernel proofs are generated first. The transaction is ready to move to the next phase when the private call stack is empty. + +The public kernel circuit takes in proof of a public/private kernel circuit with an empty private call stack, and operates recursively until the public call stack is also empty. + +A transaction is considered complete when both call stacks are empty. + +The only information leaked about the transaction is: + +1. The number of private state updates triggered +2. The set of public calls generated + +The addresses of all private calls are hidden from observers. diff --git a/docs/docs/dev_docs/cli/main.md b/docs/docs/dev_docs/cli/main.md index 8f6d0a584ce..8abcec843d5 100644 --- a/docs/docs/dev_docs/cli/main.md +++ b/docs/docs/dev_docs/cli/main.md @@ -52,7 +52,7 @@ We have shipped a number of example contracts in the `@aztec/noir-contracts` npm You can see all of our example contracts in the monorepo [here](https://github.com/AztecProtocol/aztec-packages/tree/master/yarn-project/noir-contracts/src/contracts). -In the following sections there will be commands that require contracts as options. You can either specify the full directory path to the contract abi, or you can use the name of one of these examples as the option value. This will become clearer later on. +In the following sections there will be commands that require contracts as options. You can either specify the full directory path to the contract artifact, or you can use the name of one of these examples as the option value. This will become clearer later on. ## Creating Accounts @@ -101,7 +101,7 @@ export CONTRACT_ADDRESS= If you use a different address in the constructor above, you will get an error when running the deployment. This is because you need to register an account in the sandbox before it can receive private notes. When you create a new account, it gets automatically registered. Alternatively, you can register an account you do not own along with its public key using the `register-recipient` command. ::: -This command takes 1 mandatory positional argument which is the path to the contract ABI file in a JSON format (e.g. `contracts/target/PrivateToken.json`). +This command takes 1 mandatory positional argument which is the path to the contract artifact file in a JSON format (e.g. `contracts/target/PrivateToken.json`). Alternatively you can pass the name of an example contract as exported by `@aztec/noir-contracts` (run `aztec-cli example-contracts` to see the full list of contracts available). The command takes a few optional arguments while the most important one is: @@ -121,7 +121,7 @@ When we deployed the token contract, an initial supply of tokens was minted to t The `call` command calls a read-only method on a contract, one that will not generate a transaction to be sent to the network. The arguments here are: - `--args` - The address for which we want to retrieve the balance. -- `--contract-abi` - The abi of the contract we are calling. +- `--contract-artifact` - The artifact of the contract we are calling. - `--contract-address` - The address of the deployed contract As you can see from the result, this address has a balance of 1000000, as expected. When using the Sandbox, you are able to query the balance of any account that has been created in the system, even the accounts created by default. You may wonder why this is, as you haven't provided the private keys for these accounts. The Sandbox contains a component known as the Private Execution Environment (PXE). When an account is created, this component stores the provided encryption private key and is able to read the account's private state meaning that the Sandbox can report the balance of any of it's accounts. More information about the account model can be found [here](../../concepts/foundation/accounts/main.md). @@ -135,7 +135,7 @@ We can now send a transaction to the network. We will transfer funds from the ow We called the `transfer` function of the contract and provided these arguments: - `--args` - The list of arguments to the function call. -- `--contract-abi` - The abi of the contract to call. +- `--contract-artifact` - The artifact of the contract to call. - `--contract-address` - The deployed address of the contract to call. - `--private-key` - The private key of the sender diff --git a/docs/docs/dev_docs/contracts/compiling.md b/docs/docs/dev_docs/contracts/compiling.md index a9c1a346954..95b0388e4ca 100644 --- a/docs/docs/dev_docs/contracts/compiling.md +++ b/docs/docs/dev_docs/contracts/compiling.md @@ -28,11 +28,11 @@ Then run the `compile` command with the path to your [contract project folder](. aztec-cli compile ./path/to/my_aztec_contract_project ``` -This will output a JSON [artifact](./artifacts.md) for each contract in the project to a `target` folder containing their ABI, which you can use for deploying or interacting with your contracts. +This will output a JSON [artifact](./artifacts.md) for each contract in the project to a `target` folder containing their artifact, which you can use for deploying or interacting with your contracts. ### Typescript Interfaces -You can use the compiler to autogenerate type-safe typescript classes for each of your contracts. These classes define type-safe methods for deploying and interacting with your contract based on their ABI. +You can use the compiler to autogenerate type-safe typescript classes for each of your contracts. These classes define type-safe methods for deploying and interacting with your contract based on their artifact. To generate them, include a `--typescript` option in the compile command with a path to the target folder for the typescript files: @@ -77,7 +77,7 @@ Read more about interacting with contracts using `aztec.js` [here](../getting_st An Aztec.nr contract can [call a function](./syntax/functions.md) in another contract via `context.call_private_function` or `context.call_public_function`. However, this requires manually assembling the function selector and manually serialising the arguments, which is not type-safe. -To make this easier, the compiler can generate contract interface structs that expose a convenience method for each function listed in a given contract ABI. These structs are intended to be used from another contract project that calls into the current one. For each contract, two interface structs are generated: one to be used from private functions with a `PrivateContext`, and one to be used from open functions with a `PublicContext`. +To make this easier, the compiler can generate contract interface structs that expose a convenience method for each function listed in a given contract artifact. These structs are intended to be used from another contract project that calls into the current one. For each contract, two interface structs are generated: one to be used from private functions with a `PrivateContext`, and one to be used from open functions with a `PublicContext`. To generate them, include a `--interface` option in the compile command with a path to the target folder for the generated Aztec.nr interface files: @@ -140,8 +140,8 @@ You can also programmatically access the compiler via the `@aztec/noir-compiler` The compiler exposes the following functions: - `compileUsingNargo`: Compiles an Aztec.nr project in the target folder using the `nargo` binary available on the shell `PATH` and returns the generated ABIs. -- `generateTypescriptContractInterface`: Generates a typescript class for the given contract ABI. -- `generateNoirContractInterface`: Generates a Aztec.nr interface struct for the given contract ABI. +- `generateTypescriptContractInterface`: Generates a typescript class for the given contract artifact. +- `generateNoirContractInterface`: Generates a Aztec.nr interface struct for the given contract artifact. ## Next steps diff --git a/docs/docs/dev_docs/contracts/deploying.md b/docs/docs/dev_docs/contracts/deploying.md index 0753fa10c61..303196229e1 100644 --- a/docs/docs/dev_docs/contracts/deploying.md +++ b/docs/docs/dev_docs/contracts/deploying.md @@ -19,7 +19,7 @@ import TabItem from '@theme/TabItem'; ```bash -aztec-cli deploy /path/to/contract/abi.json +aztec-cli deploy /path/to/contract/artifact.json ``` @@ -31,7 +31,7 @@ Pre-requisite - Generate type-safe typescript classes for your contract when com import { readFileSync, writeFileSync } from 'fs'; import { compileUsingNargo, generateTypescriptContractInterface} from '@aztec/noir-compiler'; -const compiled: ContractAbi[] = await compileUsingNargo(projectPathToContractFolder); +const compiled: ContractArtifact[] = await compileUsingNargo(projectPathToContractFolder); const abiImportPath = "../target/Example.json"; writeFileSync(tsInterfaceDestFilePath, generateTypescriptContractInterface(compiled[0], abiImportPath)); ``` @@ -54,7 +54,7 @@ There are several optional arguments that can be passed: -`aztec-cli deploy` takes 1 mandatory argument which is the path to the contract ABI file in a JSON format (e.g. `contracts/target/PrivateToken.json`). Alternatively you can pass the name of an example contract as exported by `@aztec/noir-contracts` (run `aztec-cli example-contracts` to see the full list of contracts available). +`aztec-cli deploy` takes 1 mandatory argument which is the path to the contract artifact file in a JSON format (e.g. `contracts/target/PrivateToken.json`). Alternatively you can pass the name of an example contract as exported by `@aztec/noir-contracts` (run `aztec-cli example-contracts` to see the full list of contracts available). The command also takes the following optional arguments: - `-args ` (default: `[]`): Arguments to pass to the contract constructor. @@ -128,7 +128,7 @@ Once the recipient is registered we can deploy the contract: ```bash -aztec-cli deploy PrivateTokenContractAbi --args 1000 0x147392a39e593189902458f4303bc6e0a39128c5a1c1612f76527a162d36d529 +aztec-cli deploy PrivateTokenContractArtifact --args 1000 0x147392a39e593189902458f4303bc6e0a39128c5a1c1612f76527a162d36d529 ``` @@ -154,7 +154,7 @@ If we pass the salt as an argument: ```bash -aztec-cli deploy PrivateTokenContractAbi --args 1000 0x147392a39e593189902458f4303bc6e0a39128c5a1c1612f76527a162d36d529 --salt 0x123 +aztec-cli deploy PrivateTokenContractArtifact --args 1000 0x147392a39e593189902458f4303bc6e0a39128c5a1c1612f76527a162d36d529 --salt 0x123 ``` diff --git a/docs/docs/dev_docs/contracts/resources/common_patterns/authwit.md b/docs/docs/dev_docs/contracts/resources/common_patterns/authwit.md new file mode 100644 index 00000000000..6beb7ea887a --- /dev/null +++ b/docs/docs/dev_docs/contracts/resources/common_patterns/authwit.md @@ -0,0 +1,232 @@ +--- +title: Authentication Witness +description: Developer Documentation to use Authentication Witness for authentication actions on Aztec. +--- +## Prerequisite reading +- [Authwit from Foundational Concepts](./../../../../concepts/foundation/accounts/authwit.md) + +## Introduction + +Authentication Witness is a scheme for authentication actions on Aztec, so users can allow third-parties (eg protocols or other users) to execute an action on their behalf. + +How it works logically is explained in the [foundational concepts](./../../../../concepts/foundation/accounts/authwit.md) but we will do a short recap here. + +An authentication witness is defined for a specific action, such as allowing a Defi protocol to transfer funds on behalf of the user. An action is here something that could be explained as `A is allowed to perform X operation on behalf of B` and we define it as a hash computed as such: + +```rust +authentication_witness_action = H( + caller: AztecAddress, + contract: AztecAddress, + selector: Field, + argsHash: Field +); + +// Example action that authenticates: +// defi contract to transfer 1000 tokens to itself on behalf of alice_account +authentication_witness_action = H( + defi, + token, + transfer_selector, + H(alice_account, defi, 1000) +); +``` + +Given the action, the developer can ask the `on_behalf_of` account contract if the action is authenticated or not. + +```mermaid +sequenceDiagram + actor Alice + participant AC as Alice Account + participant Token + Alice->>AC: Defi.deposit(Token, 1000); + activate AC + AC->>Defi: deposit(Token, 1000); + activate Defi + Defi->>Token: transfer(Alice, Defi, 1000); + activate Token + Token->>AC: Check if Defi may call transfer(Alice, Defi, 1000); + AC-->>Alice: Please give me AuthWit for DeFi
calling transfer(Alice, Defi, 1000); + activate Alice + Alice-->>Alice: Produces Authentication witness + Alice-->>AC: AuthWit for transfer(Alice, Defi, 1000); + AC->>Token: AuthWit validity + deactivate Alice + Token->>Token: throw if invalid AuthWit + Token->>Token: transfer(Alice, Defi, 1000); + Token->>Defi: success + deactivate Token + Defi->>Defi: deposit(Token, 1000); + deactivate Defi + deactivate AC +``` + +:::info +Note in particular that the request for a witness is done by the token contract, and the user will have to provide it to the contract before it can continue execution. Since the request is made all the way into the contract where it is to be used, we don't need to pass it along as an extra input to the functions before it which gives us a cleaner interface. +::: + +As part of `AuthWit` we are assuming that the `on_behalf_of` implements the private and/or public functions: + +```rust +#[aztec(private)] +fn is_valid(message_hash: Field) -> Field; + +#[aztec(public)] +fn is_valid_public(message_hash: Field) -> Field; +``` + +Both return the value `0xe86ab4ff` (`is_valid` selector) for a successful authentication, and `0x00000000` for a failed authentication. You might be wondering why we are expecting the return value to be a selector instead of a boolean. This is mainly to account for a case of selector collisions where the same selector is used for different functions, and we don't want an account to mistakenly allow a different function to be called on its behalf - it is hard to return the selector by mistake, but you might have other functions returning a bool. + +## The `AuthWit` library. + +As part of [Aztec.nr](https://aztec.nr), we are providing a library that can be used to implement authentication witness for your contracts. + +This library also provides a basis for account implementations such that these can more easily implement authentication witness. For more on the wallets, see [writing an account contract](./../../../wallets/writing_an_account_contract.md). + +For our purposes here (not building a wallet), the most important part of the library is the `auth` utility which exposes a couple of helper methods for computing the action hash, retrieving witnesses, validating them and emitting the nullifier. + +### General utilities + +The primary general utility is the `compute_authwit_message_hash` function which computes the action hash from its components. This is useful for when you need to generate a hash that is not for the current call, such as when you want to update a public approval state value that is later used for [authentication in public](#updating-approval-state-in-noir). + +#include_code compute_authwit_message_hash /yarn-project/aztec-nr/authwit/src/auth.nr rust + +#### TypeScript utilities + +To make it convenient to compute the message hashes in TypeScript, the `aztec.js` package includes a `computeAuthWitMessageHash` function that you can use. + +#include_code authwit_computeAuthWitMessageHash /yarn-project/aztec.js/src/utils/authwit.ts typescript + +As you can see above, this function takes a `caller` and a `request`. So let's quickly see how we can get those. Luckily for us, the `request` can be easily prepared similarly to how we are making contract calls from TypeScript. + +#include_code authwit_computeAuthWitMessageHash /yarn-project/end-to-end/src/e2e_token_contract.test.ts typescript + +### Utilities for private calls + +For private calls where we allow execution on behalf of others, we generally want to check if the current call is authenticated by `on_behalf_of`. To easily do so, we can use the `assert_current_call_valid_authwit` which fetches information from the current context without us needing to provide much beyond the `on_behalf_of`. + +#include_code assert_current_call_valid_authwit /yarn-project/aztec-nr/authwit/src/auth.nr rust + +As seen above, we mainly compute the message hash, and then forward the call to the more generic `assert_valid_authwit`. This validating function will then: + +- make a call to `on_behalf_of` to validate that the call is authenticated +- emit a nullifier for the action to prevent replay attacks +- throw if the action is not authenticated by `on_behalf_of` + +#include_code assert_valid_authwit /yarn-project/aztec-nr/authwit/src/auth.nr rust + +### Utilities for public calls + +Very similar to above, we have variations that work in the public domain. These functions are wrapped to give a similar flow for both cases, but behind the scenes the logic of the account contracts is slightly different since they cannot use the oracle as they are not in the private domain. + +#include_code assert_current_call_valid_authwit_public /yarn-project/aztec-nr/authwit/src/auth.nr rust + +#include_code assert_valid_authwit_public /yarn-project/aztec-nr/authwit/src/auth.nr rust + +## Usage + +Ok, enough talking, how the hell do we use this? + +### Importing it + +To add it to your project, add the `authwit` library to your `Nargo.toml` file. + +```toml +[dependencies] +aztec = { git="https://github.com/AztecProtocol/aztec-packages/", tag="#include_aztec_version", directory="yarn-project/aztec-nr/aztec" } +authwit = { git="https://github.com/AztecProtocol/aztec-packages/", tag="#include_aztec_version", directory="yarn-project/aztec-nr/authwit"} +``` + +Then you will be able to import it into your contracts as follows. + +#include_code import_authwit /yarn-project/noir-contracts/src/contracts/token_contract/src/main.nr rust + +### Private Functions + +#### Checking if the current call is authenticated + +Based on the diagram earlier on this page let's take a look at how we can implement the `transfer` function such that it checks if the tokens are to be transferred `from` the caller or needs to be authenticated with an authentication witness. + +#include_code transfer /yarn-project/noir-contracts/src/contracts/token_contract/src/main.nr rust + +The first thing we see in the snippet above, is that if `from` is not the call we are calling the `assert_current_call_valid_authwit` function from [earlier](#private-functions). If the call is not throwing, we are all good and can continue with the transfer. + +In the snippet we are constraining the `else` case such that only `nonce = 0` is supported. This is not strictly necessary, but because I can't stand dangling useless values. By making it constrained, we can limit what people guess it does, I hope. + +#### Authenticating an action in TypeScript + +Cool, so we have a function that checks if the current call is authenticated, but how do we actually authenticate it? Well, assuming that we use a wallet that is following the spec, we import `computeAuthWitMessageHash` from `aztec.js` to help us compute the hash, and then we simply `addAuthWitness` to the wallet. Behind the scenes this will make the witness available to the oracle. + +#include_code authwit_transfer_example /yarn-project/end-to-end/src/e2e_token_contract.test.ts typescript + +### Public Functions + +With private functions covered, how can we use this in a public function? Well, the answer is that we simply change one name of a function and then we are good to go :eyes: (almost). + +#### Checking if the current call is authenticated + +#include_code transfer_public /yarn-project/noir-contracts/src/contracts/token_contract/src/main.nr rust + +#### Authenticating an action in TypeScript + +Authenticating an action in the public domain is quite similar to the private domain, with the difference that we are executing a function on the account contract to add the witness, if you recall, this is because we don't have access to the oracle in the public domain. + +In the snippet below, this is done as a separate contract call, but can also be done as part of a batch as mentioned in the [foundational concepts](./../../../../concepts/foundation/accounts/authwit.md#what-about-public). + +#include_code authwit_public_transfer_example /yarn-project/end-to-end/src/e2e_token_contract.test.ts typescript + +#### Updating approval state in Noir + +We have cases where we need a non-wallet contract to approve an action to be executed by another contract. One of the cases could be when making more complex defi where funds are passed along. When doing so, we need the intermediate contracts to support approving of actions on their behalf. + +To support this, we must implement the `is_valid_public` function as seen in the snippet below. + +#include_code authwit_uniswap_get /yarn-project/noir-contracts/src/contracts/uniswap_contract/src/main.nr rust + +It also needs a way to update those storage values. Since we want the updates to be trustless, we can compute the action based on the function inputs, and then have the contract compute the key at which it must add a `true` to approve the action. + +An example of this would be our Uniswap example which performs a cross chain swap on L1. In here, we both do private and public auth witnesses, where the public is set by the uniswap L2 contract itself. In the below snippet, you can see that we compute the action hash, and then update an `approved_action` mapping with the hash as key and `true` as value. When we then call the `token_bridge` to execute afterwards, it reads this value, burns the tokens, and consumes the authentication. + +#include_code authwit_uniswap_set /yarn-project/noir-contracts/src/contracts/uniswap_contract/src/main.nr rust + +Outlining more of the `swap` flow: this simplified diagram shows how it will look for contracts that are not wallets but also need to support authentication witnesses. + +```mermaid +sequenceDiagram + actor A as Alice + participant AC as Alice Account + participant CC as Crosschain Swap + participant TB as Token Bridge + participant T as Token + + A->>AC: Swap 1000 token A to B on Uniswap L1 + activate AC; + AC->>CC: Swap 1000 token A to B + activate CC; + CC->>T: unshield 1000 tokens from Alice Account to CCS + activate T; + T->>AC: Have you approved this?? + AC-->>A: Please give me an AuthWit + A-->>AC: Here is AuthWit + AC-->>AC: Validate AuthWit + AC->>T: Yes + deactivate T; + CC-->>CC: Setting flag to true + CC->>TB: Exit 1000 tokens to CCS + activate TB; + TB->>T: Burn 1000 tokens from CCS + activate T; + T->>CC: Have you approved this? + CC->>T: Yes + T-->>T: Burn + Token->>Defi: success + deactivate T; + TB-->>TB: Emit L2->L1 message + deactivate TB; + CC-->>CC: Emit L2->L1 message + deactivate CC; + deactivate AC; +``` + +:::info **TODO** +Add a link to the blog-posts. +::: diff --git a/docs/docs/dev_docs/getting_started/sandbox.md b/docs/docs/dev_docs/getting_started/sandbox.md index c855b41f545..6d4e2bf1eca 100644 --- a/docs/docs/dev_docs/getting_started/sandbox.md +++ b/docs/docs/dev_docs/getting_started/sandbox.md @@ -128,8 +128,8 @@ yarn add @aztec/aztec.js @aztec/noir-contracts typescript @types/node "start": "yarn build && DEBUG='token' node ./dest/index.js" }, "dependencies": { - "@aztec/aztec.js": "^0.8.7", - "@aztec/noir-contracts": "^0.8.7", + "@aztec/aztec.js": "latest", + "@aztec/noir-contracts": "latest", "@types/node": "^20.6.3", "typescript": "^5.2.2" } @@ -264,6 +264,7 @@ In this section, we created 2 instances of the `TokenContract` contract abstract We can see that each account has the expected balance of tokens. ### Diagram of calling an unconstrained (view) function + Unconstrained function call ## Creating and submitting transactions @@ -354,6 +355,7 @@ That's it! We have successfully deployed a private token contract to an instance You can find the [complete tutorial code here](https://github.com/AztecProtocol/dev-rel/tree/main/tutorials/sandbox-tutorial/token). ### Diagram of sending a transaction + Sending a transaction ## Next Steps diff --git a/docs/docs/dev_docs/tutorials/writing_dapp/contract_deployment.md b/docs/docs/dev_docs/tutorials/writing_dapp/contract_deployment.md index d91c0b3db30..5e4557e2dc8 100644 --- a/docs/docs/dev_docs/tutorials/writing_dapp/contract_deployment.md +++ b/docs/docs/dev_docs/tutorials/writing_dapp/contract_deployment.md @@ -64,7 +64,7 @@ Create a new file `src/deploy.mjs`: // src/deploy.mjs import { writeFileSync } from 'fs'; import { Contract, ContractDeployer, createPXEClient, getSandboxAccountsWallets } from '@aztec/aztec.js'; -import TokenContractAbi from "../contracts/token/target/Token.json" assert { type: "json" }; +import TokenContractArtifact from "../contracts/token/target/Token.json" assert { type: "json" }; #include_code dapp-deploy yarn-project/end-to-end/src/sample-dapp/deploy.mjs raw diff --git a/docs/docs/dev_docs/tutorials/writing_dapp/contract_interaction.md b/docs/docs/dev_docs/tutorials/writing_dapp/contract_interaction.md index daed14e63fb..4d3d2ae0e22 100644 --- a/docs/docs/dev_docs/tutorials/writing_dapp/contract_interaction.md +++ b/docs/docs/dev_docs/tutorials/writing_dapp/contract_interaction.md @@ -18,7 +18,7 @@ To do this, let's first initialize a new `Contract` instance using `aztec.js` th // src/contracts.mjs import { Contract } from "@aztec/aztec.js"; import { readFileSync } from "fs"; -import TokenContractAbi from "../contracts/token/target/Token.json" assert { type: "json" }; +import TokenContractArtifact from "../contracts/token/target/Token.json" assert { type: "json" }; ``` And then add the following code for initialising the `Contract` instances: diff --git a/docs/docs/dev_docs/tutorials/writing_dapp/testing.md b/docs/docs/dev_docs/tutorials/writing_dapp/testing.md index ff9732be801..4c1a5e947b6 100644 --- a/docs/docs/dev_docs/tutorials/writing_dapp/testing.md +++ b/docs/docs/dev_docs/tutorials/writing_dapp/testing.md @@ -27,7 +27,7 @@ Create a new file `src/index.test.mjs` with the imports we'll be using and an em ```js import { createSandbox } from "@aztec/aztec-sandbox"; import { Contract, createAccount } from "@aztec/aztec.js"; -import TokenContractAbi from "../contracts/token/target/Token.json" assert { type: "json" }; +import TokenContractArtifact from "../contracts/token/target/Token.json" assert { type: "json" }; describe("token", () => {}); ``` diff --git a/docs/docs/intro.md b/docs/docs/intro.md index 455a99adee2..f7dce2c8e8f 100644 --- a/docs/docs/intro.md +++ b/docs/docs/intro.md @@ -2,6 +2,7 @@ slug: "/" id: "intro" title: What is Aztec? +description: "Aztec introduces a privacy-centric zkRollup solution for Ethereum, enhancing confidentiality and scalability within the Ethereum ecosystem." --- # Aztec: Ethereum, encrypted diff --git a/docs/docusaurus.config.js b/docs/docusaurus.config.js index 19ab13a3b20..945bcc5ccf5 100644 --- a/docs/docusaurus.config.js +++ b/docs/docusaurus.config.js @@ -10,8 +10,9 @@ const fs = require("fs"); /** @type {import('@docusaurus/types').Config} */ const config = { - title: "Aztec Docs", - tagline: "Ethereum, encrypted", + title: "Privacy-first zkRollup | Aztec Documentation", + tagline: + "Aztec introduces a privacy-centric zkRollup solution for Ethereum, enhancing confidentiality and scalability within the Ethereum ecosystem.", url: "https://docs.aztec.network/", baseUrl: "/", trailingSlash: false, @@ -159,6 +160,7 @@ const config = { content: "aztec, noir, privacy, encrypted, ethereum, blockchain", }, ], + image: "img/docs-preview-image.png", algolia: { appId: "CL4NK79B0W", apiKey: "21d89dadaa37a4d1b6bf4b17978dcf7f", diff --git a/docs/internal_notes/dev_docs/sandbox/components.md b/docs/internal_notes/dev_docs/sandbox/components.md index cc3f335c784..58ba7718f20 100644 --- a/docs/internal_notes/dev_docs/sandbox/components.md +++ b/docs/internal_notes/dev_docs/sandbox/components.md @@ -89,9 +89,9 @@ Responsibilities: These tasks are lower priority than providing a handcrafted ABI. - The ability for a dev to enclose a collection of Aztec.nr functions in a 'contract scope'. -- The ability to create an Aztec.nr contract abi from the above. +- The ability to create an Aztec.nr contract artifact from the above. -Design an Aztec.nr Contract ABI, similar to a Solidity ABI which is output by Solc (see [here](https://docs.soliditylang.org/en/v0.8.13/abi-spec.html#json)). It might include for each function: +Design an Aztec.nr contract artifact, similar to a Solidity ABI which is output by Solc (see [here](https://docs.soliditylang.org/en/v0.8.13/abi-spec.html#json)). It might include for each function: - ACIR opcodes (akin to Solidity bytecode). - Function name and parameter names & types. @@ -113,7 +113,7 @@ aztec.js should always be stateless. It offers the ability to interact with stat The analogous AC component would be the AztecSdk (wraps the CoreSdk which is more analogous to the private client). - Allows a user to create an Aztec keypair. Call `create_account` on Wallet. -- Create a `Contract` instance (similar to web3.js), given a path to an Aztec.nr Contract ABI. +- Create a `Contract` instance (similar to web3.js), given a path to an Aztec.nr contract artifact. - Construct `tx_request` by calling e.g. `contract.get_deployment_request(constructor_args)`. - Call wallet `sign_tx_request(tx_request)` to get signature. - Call `simulate_tx(signed_tx_request)` on the Private Client. In future this would help compute gas, for now we won't actually return gas (it's hard). Returns success or failure, so client knows if it should proceed, and computed kernel circuit public outputs. diff --git a/docs/sidebars.js b/docs/sidebars.js index ab75b2abe2a..3327fbf99e8 100644 --- a/docs/sidebars.js +++ b/docs/sidebars.js @@ -80,11 +80,13 @@ const sidebars = { label: "Accounts", type: "category", link: { type: "doc", id: "concepts/foundation/accounts/main" }, - items: ["concepts/foundation/accounts/keys"], + items: [ + "concepts/foundation/accounts/keys", + "concepts/foundation/accounts/authwit", + ], }, "concepts/foundation/contracts", "concepts/foundation/transactions", - // "concepts/foundation/blocks", // "concepts/foundation/globals", { label: "Communication", @@ -98,19 +100,19 @@ const sidebars = { "concepts/foundation/communication/cross_chain_calls", ], }, - // { - // label: "Nodes and Clients", - // type: "category", - // link: { - // type: "doc", - // id: "concepts/foundation/nodes_clients/main", - // }, - // items: [ - // "concepts/foundation/nodes_clients/execution_client", - // "concepts/foundation/nodes_clients/prover_client", - // "concepts/foundation/nodes_clients/sequencer_client", - // ], - // }, + { + label: "Nodes and Clients", + type: "category", + // link: { + // type: "doc", + // id: "concepts/foundation/nodes_clients/main", + // }, + items: [ + // "concepts/foundation/nodes_clients/execution_client", + // "concepts/foundation/nodes_clients/prover_client", + "concepts/foundation/nodes_clients/sequencer", + ], + }, // "concepts/foundation/block_production", // "concepts/foundation/upgrade_mechanism", ], @@ -169,6 +171,8 @@ const sidebars = { }, "concepts/advanced/public_vm", "concepts/advanced/contract_creation", + "concepts/advanced/sequencer_selection", + "concepts/advanced/acir_simulator", ], }, @@ -272,27 +276,28 @@ const sidebars = { ], }, "dev_docs/contracts/common_errors", - // { - // label: "Resources", - // type: "category", - // items: [ - // "dev_docs/contracts/resources/style_guide", - // { - // label: "Common Patterns", - // type: "category", + { + label: "Resources", + type: "category", + items: [ + //"dev_docs/contracts/resources/style_guide", + { + label: "Common Patterns", + type: "category", // link: { // type: "doc", // id: "dev_docs/contracts/resources/common_patterns/main", // }, - // items: [ + items: [ + "dev_docs/contracts/resources/common_patterns/authwit", // "dev_docs/contracts/resources/common_patterns/sending_tokens_to_user", // "dev_docs/contracts/resources/common_patterns/sending_tokens_to_contract", // "dev_docs/contracts/resources/common_patterns/access_control", // "dev_docs/contracts/resources/common_patterns/interacting_with_l1", - // ], - // }, - // ], - // }, + ], + }, + ], + }, // { // label: "Security Considerations", // type: "category", diff --git a/docs/static/img/aztec_high_level_network_architecture.png b/docs/static/img/aztec_high_level_network_architecture.png new file mode 100644 index 00000000000..db7401112ff Binary files /dev/null and b/docs/static/img/aztec_high_level_network_architecture.png differ diff --git a/docs/static/img/aztec_high_level_network_architecture.svg b/docs/static/img/aztec_high_level_network_architecture.svg new file mode 100644 index 00000000000..9e3aca8d6b2 --- /dev/null +++ b/docs/static/img/aztec_high_level_network_architecture.svg @@ -0,0 +1,17 @@ + + + + + + + + Aztec NetworkPrivate Execution Environment (PXE)UserAztec.jsACIR SimulatorAztec NodeSequencerEthereumRollup ContractProver NetworkProverProverProverProverDatabasePrivate Kernel Circuit \ No newline at end of file diff --git a/docs/static/img/docs-preview-image.png b/docs/static/img/docs-preview-image.png new file mode 100644 index 00000000000..6708c95d794 Binary files /dev/null and b/docs/static/img/docs-preview-image.png differ diff --git a/docs/static/img/sandbox_unconstrained_function.svg b/docs/static/img/sandbox_unconstrained_function.svg index 947e30edf8d..8ae15542142 100644 --- a/docs/static/img/sandbox_unconstrained_function.svg +++ b/docs/static/img/sandbox_unconstrained_function.svg @@ -7,7 +7,7 @@ width="1684.16" height="537.27997" viewBox="0 0 1684.16 537.27997" - sodipodi:docname="sandbox_view_call.svg" + sodipodi:docname="sandbox_unconstrained_function.svg" inkscape:version="1.3 (0e150ed, 2023-07-21)" xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" @@ -37,8 +37,8 @@ clipPathUnits="userSpaceOnUse" id="clipPath6"> @@ -46,8 +46,8 @@ clipPathUnits="userSpaceOnUse" id="clipPath8"> @@ -56,7 +56,7 @@ id="clipPath10"> @@ -64,8 +64,8 @@ clipPathUnits="userSpaceOnUse" id="clipPath12"> @@ -73,8 +73,8 @@ clipPathUnits="userSpaceOnUse" id="clipPath14"> @@ -83,7 +83,7 @@ id="clipPath16"> @@ -91,8 +91,8 @@ clipPathUnits="userSpaceOnUse" id="clipPath18"> @@ -101,7 +101,7 @@ id="clipPath20"> @@ -109,8 +109,8 @@ clipPathUnits="userSpaceOnUse" id="clipPath22"> @@ -119,7 +119,7 @@ id="clipPath24"> @@ -127,15 +127,15 @@ clipPathUnits="userSpaceOnUse" id="clipPath25"> @@ -160,7 +160,7 @@ id="clipPath32"> @@ -169,7 +169,7 @@ id="clipPath34"> @@ -178,7 +178,7 @@ id="clipPath36"> @@ -187,7 +187,7 @@ id="clipPath38"> @@ -196,7 +196,7 @@ id="clipPath40"> @@ -205,7 +205,7 @@ id="clipPath42"> @@ -214,7 +214,7 @@ id="clipPath44"> @@ -223,7 +223,7 @@ id="clipPath46"> @@ -232,7 +232,7 @@ id="clipPath48"> @@ -241,7 +241,7 @@ id="clipPath50"> @@ -250,7 +250,7 @@ id="clipPath52"> @@ -259,7 +259,7 @@ id="clipPath54"> @@ -267,15 +267,15 @@ clipPathUnits="userSpaceOnUse" id="clipPath55"> @@ -300,7 +300,7 @@ id="clipPath62"> @@ -309,7 +309,7 @@ id="clipPath64"> @@ -318,7 +318,7 @@ id="clipPath66"> @@ -327,7 +327,7 @@ id="clipPath68"> @@ -335,15 +335,15 @@ clipPathUnits="userSpaceOnUse" id="clipPath69"> @@ -368,7 +368,7 @@ id="clipPath76"> @@ -377,7 +377,7 @@ id="clipPath78"> @@ -385,15 +385,15 @@ clipPathUnits="userSpaceOnUse" id="clipPath79"> @@ -418,7 +418,7 @@ id="clipPath86"> @@ -427,7 +427,7 @@ id="clipPath88"> @@ -436,7 +436,7 @@ id="clipPath90"> @@ -445,7 +445,7 @@ id="clipPath92"> @@ -454,7 +454,7 @@ id="clipPath94"> @@ -463,7 +463,7 @@ id="clipPath96"> @@ -471,15 +471,15 @@ clipPathUnits="userSpaceOnUse" id="clipPath97"> @@ -504,7 +504,7 @@ id="clipPath104"> @@ -512,15 +512,15 @@ clipPathUnits="userSpaceOnUse" id="clipPath105"> @@ -545,7 +545,7 @@ id="clipPath112"> @@ -554,7 +554,7 @@ id="clipPath114"> @@ -562,15 +562,15 @@ clipPathUnits="userSpaceOnUse" id="clipPath115"> @@ -595,7 +595,7 @@ id="clipPath122"> @@ -604,7 +604,7 @@ id="clipPath124"> @@ -613,7 +613,7 @@ id="clipPath126"> @@ -622,7 +622,7 @@ id="clipPath128"> @@ -631,7 +631,7 @@ id="clipPath130"> @@ -640,7 +640,7 @@ id="clipPath132"> @@ -649,7 +649,7 @@ id="clipPath134"> @@ -658,7 +658,7 @@ id="clipPath136"> @@ -667,7 +667,7 @@ id="clipPath138"> @@ -676,7 +676,7 @@ id="clipPath140"> @@ -685,7 +685,7 @@ id="clipPath142"> @@ -694,7 +694,7 @@ id="clipPath144"> @@ -703,7 +703,7 @@ id="clipPath146"> @@ -711,15 +711,15 @@ clipPathUnits="userSpaceOnUse" id="clipPath147"> @@ -744,7 +744,7 @@ id="clipPath154"> @@ -753,7 +753,7 @@ id="clipPath156"> @@ -762,7 +762,7 @@ id="clipPath158"> @@ -770,8 +770,8 @@ clipPathUnits="userSpaceOnUse" id="clipPath160"> @@ -780,7 +780,7 @@ id="clipPath162"> @@ -789,7 +789,7 @@ id="clipPath164"> @@ -797,8 +797,8 @@ clipPathUnits="userSpaceOnUse" id="clipPath166"> @@ -807,7 +807,7 @@ id="clipPath168"> @@ -815,8 +815,8 @@ clipPathUnits="userSpaceOnUse" id="clipPath170"> @@ -825,7 +825,7 @@ id="clipPath172"> @@ -833,8 +833,8 @@ clipPathUnits="userSpaceOnUse" id="clipPath174"> @@ -843,7 +843,7 @@ id="clipPath176"> @@ -852,7 +852,7 @@ id="clipPath178"> @@ -861,7 +861,7 @@ id="clipPath180"> @@ -870,7 +870,7 @@ id="clipPath182"> @@ -879,7 +879,7 @@ id="clipPath184"> @@ -888,7 +888,7 @@ id="clipPath186"> @@ -897,7 +897,7 @@ id="clipPath188"> @@ -906,7 +906,7 @@ id="clipPath190"> @@ -915,7 +915,7 @@ id="clipPath192"> @@ -923,8 +923,8 @@ clipPathUnits="userSpaceOnUse" id="clipPath194"> @@ -933,7 +933,7 @@ id="clipPath196"> @@ -942,7 +942,7 @@ id="clipPath198"> @@ -951,7 +951,7 @@ id="clipPath200"> @@ -960,7 +960,7 @@ id="clipPath202"> @@ -969,7 +969,7 @@ id="clipPath204"> @@ -978,7 +978,7 @@ id="clipPath206"> @@ -987,7 +987,7 @@ id="clipPath208"> @@ -996,7 +996,7 @@ id="clipPath210"> @@ -1005,7 +1005,7 @@ id="clipPath212"> @@ -1014,7 +1014,7 @@ id="clipPath214"> @@ -1023,7 +1023,7 @@ id="clipPath216"> @@ -1031,15 +1031,15 @@ clipPathUnits="userSpaceOnUse" id="clipPath217"> @@ -1064,7 +1064,7 @@ id="clipPath224"> @@ -1078,13 +1078,13 @@ inkscape:pageopacity="0.0" inkscape:pagecheckerboard="0" inkscape:deskcolor="#d1d1d1" - inkscape:zoom="0.46515707" - inkscape:cx="860.99949" - inkscape:cy="125.76397" + inkscape:zoom="0.52310942" + inkscape:cx="841.12421" + inkscape:cy="268.58625" inkscape:window-width="1408" - inkscape:window-height="1212" - inkscape:window-x="629" - inkscape:window-y="100" + inkscape:window-height="1200" + inkscape:window-x="782" + inkscape:window-y="35" inkscape:window-maximized="0" inkscape:current-layer="g1"> @@ -1206,109 +1206,109 @@ id="g31" /> @@ -1332,31 +1332,31 @@ id="g61" /> @@ -1380,19 +1380,19 @@ id="g75" /> @@ -1416,55 +1416,55 @@ id="g85" /> @@ -1488,10 +1488,10 @@ id="g103" /> @@ -1515,19 +1515,19 @@ id="g111" /> @@ -1551,100 +1551,100 @@ id="g121" /> @@ -1668,250 +1668,250 @@ id="g153" /> @@ -1935,10 +1935,10 @@ id="g223" /> diff --git a/scripts/ci/aggregate_e2e_benchmark.js b/scripts/ci/aggregate_e2e_benchmark.js index d59a1330910..53eb747cf6c 100644 --- a/scripts/ci/aggregate_e2e_benchmark.js +++ b/scripts/ci/aggregate_e2e_benchmark.js @@ -27,14 +27,26 @@ const { NOTE_SUCCESSFUL_DECRYPTING_TIME, NOTE_TRIAL_DECRYPTING_TIME, NOTE_PROCESSOR_CAUGHT_UP, + L2_BLOCK_BUILT, + L2_BLOCK_BUILD_TIME, + L2_BLOCK_ROLLUP_SIMULATION_TIME, + L2_BLOCK_PUBLIC_TX_PROCESS_TIME, + NODE_HISTORY_SYNC_TIME, + NODE_SYNCED_CHAIN, + NOTE_HISTORY_TRIAL_DECRYPTING_TIME, + NOTE_HISTORY_SUCCESSFUL_DECRYPTING_TIME, + PXE_DB_SIZE, ROLLUP_SIZES, + CHAIN_LENGTHS, BENCHMARK_FILE_JSON, + BLOCK_SIZE, + NODE_DB_SIZE, } = require("./benchmark_shared.js"); // Folder where to load logs from const logsDir = process.env.LOGS_DIR ?? `log`; -// Appends a datapoint to the final results for the given metric in the given bucket +// Appends a data point to the final results for the given metric in the given bucket function append(results, metric, bucket, value) { if (value === undefined) { console.error(`Undefined value for ${metric} in bucket ${bucket}`); @@ -79,13 +91,49 @@ function processCircuitSimulation(entry, results) { } // Processes an entry with event name 'note-processor-caught-up' and updates results -// Buckets are rollup sizes +// Buckets are rollup sizes for NOTE_DECRYPTING_TIME, or chain sizes for NOTE_HISTORY_DECRYPTING_TIME function processNoteProcessorCaughtUp(entry, results) { - const { seen, decrypted } = entry; + const { seen, decrypted, blocks, duration, dbSize } = entry; if (ROLLUP_SIZES.includes(decrypted)) - append(results, NOTE_SUCCESSFUL_DECRYPTING_TIME, decrypted, entry.duration); + append(results, NOTE_SUCCESSFUL_DECRYPTING_TIME, decrypted, duration); if (ROLLUP_SIZES.includes(seen) && decrypted === 0) - append(results, NOTE_TRIAL_DECRYPTING_TIME, seen, entry.duration); + append(results, NOTE_TRIAL_DECRYPTING_TIME, seen, duration); + if (CHAIN_LENGTHS.includes(blocks) && decrypted > 0) { + append(results, NOTE_HISTORY_SUCCESSFUL_DECRYPTING_TIME, blocks, duration); + append(results, PXE_DB_SIZE, blocks, dbSize); + } + if (CHAIN_LENGTHS.includes(blocks) && decrypted === 0) + append(results, NOTE_HISTORY_TRIAL_DECRYPTING_TIME, blocks, duration); +} + +// Processes an entry with event name 'l2-block-built' and updates results +// Buckets are rollup sizes +function processL2BlockBuilt(entry, results) { + const bucket = entry.txCount; + if (!ROLLUP_SIZES.includes(bucket)) return; + append(results, L2_BLOCK_BUILD_TIME, bucket, entry.duration); + append( + results, + L2_BLOCK_ROLLUP_SIMULATION_TIME, + bucket, + entry.rollupCircuitsDuration + ); + append( + results, + L2_BLOCK_PUBLIC_TX_PROCESS_TIME, + bucket, + entry.publicProcessDuration + ); +} + +// Processes entries with event name node-synced-chain-history emitted by benchmark tests +// Buckets are chain lengths +function processNodeSyncedChain(entry, results) { + const bucket = entry.blockCount; + if (!CHAIN_LENGTHS.includes(bucket)) return; + if (entry.txsPerBlock !== BLOCK_SIZE) return; + append(results, NODE_HISTORY_SYNC_TIME, bucket, entry.duration); + append(results, NODE_DB_SIZE, bucket, entry.dbSize); } // Processes a parsed entry from a logfile and updates results @@ -99,6 +147,10 @@ function processEntry(entry, results) { return processCircuitSimulation(entry, results); case NOTE_PROCESSOR_CAUGHT_UP: return processNoteProcessorCaughtUp(entry, results); + case L2_BLOCK_BUILT: + return processL2BlockBuilt(entry, results); + case NODE_SYNCED_CHAIN: + return processNodeSyncedChain(entry, results); default: return; } diff --git a/scripts/ci/benchmark_shared.js b/scripts/ci/benchmark_shared.js index df0548eab1c..fe3735b4035 100644 --- a/scripts/ci/benchmark_shared.js +++ b/scripts/ci/benchmark_shared.js @@ -1,8 +1,16 @@ -// Rollup sizes to track (duplicated from yarn-project/end-to-end/src/benchmarks/bench_publish_rollup.test.ts) +// Block sizes to track (duplicated from yarn-project/end-to-end/src/benchmarks/bench_publish_rollup.test.ts) const ROLLUP_SIZES = process.env.ROLLUP_SIZES ? process.env.ROLLUP_SIZES.split(",").map(Number) : [8, 32, 128]; +// Block size to use for building chains of multiple length (duplicated from yarn-project/end-to-end/src/benchmarks/bench_process_history.test.ts) +const BLOCK_SIZE = process.env.BLOCK_SIZE ? +process.env.BLOCK_SIZE : 16; + +// Chain lengths to test (duplicated from yarn-project/end-to-end/src/benchmarks/bench_process_history.test.ts) +const CHAIN_LENGTHS = process.env.CHAIN_LENGTHS + ? process.env.CHAIN_LENGTHS.split(",").map(Number) + : [10, 20, 30]; + // Output files const BENCHMARK_FILE_JSON = process.env.BENCHMARK_FILE_JSON ?? "benchmark.json"; @@ -15,14 +23,28 @@ module.exports = { CIRCUIT_SIMULATION_TIME: "circuit_simulation_time_in_ms", CIRCUIT_INPUT_SIZE: "circuit_input_size_in_bytes", CIRCUIT_OUTPUT_SIZE: "circuit_output_size_in_bytes", - NOTE_SUCCESSFUL_DECRYPTING_TIME: "note_successful_decrypting_time", - NOTE_TRIAL_DECRYPTING_TIME: "note_unsuccessful_decrypting_time", + NOTE_SUCCESSFUL_DECRYPTING_TIME: "note_successful_decrypting_time_in_ms", + NOTE_TRIAL_DECRYPTING_TIME: "note_trial_decrypting_time_in_ms", + L2_BLOCK_BUILD_TIME: "l2_block_building_time_in_ms", + L2_BLOCK_ROLLUP_SIMULATION_TIME: "l2_block_rollup_simulation_time_in_ms", + L2_BLOCK_PUBLIC_TX_PROCESS_TIME: "l2_block_public_tx_process_time_in_ms", + NODE_HISTORY_SYNC_TIME: "node_history_sync_time_in_ms", + NOTE_HISTORY_SUCCESSFUL_DECRYPTING_TIME: + "note_history_successful_decrypting_time_in_ms", + NOTE_HISTORY_TRIAL_DECRYPTING_TIME: + "note_history_trial_decrypting_time_in_ms", + NODE_DB_SIZE: "node_database_size_in_bytes", + PXE_DB_SIZE: "pxe_database_size_in_bytes", // Events to track L2_BLOCK_PUBLISHED_TO_L1: "rollup-published-to-l1", L2_BLOCK_SYNCED: "l2-block-handled", + L2_BLOCK_BUILT: "l2-block-built", CIRCUIT_SIMULATED: "circuit-simulation", NOTE_PROCESSOR_CAUGHT_UP: "note-processor-caught-up", + NODE_SYNCED_CHAIN: "node-synced-chain-history", // Other ROLLUP_SIZES, + BLOCK_SIZE, + CHAIN_LENGTHS, BENCHMARK_FILE_JSON, }; diff --git a/scripts/ci/comment_e2e_benchmark.js b/scripts/ci/comment_e2e_benchmark.js index 16f6541a048..38c8aea4c06 100644 --- a/scripts/ci/comment_e2e_benchmark.js +++ b/scripts/ci/comment_e2e_benchmark.js @@ -5,34 +5,136 @@ const https = require("https"); const fs = require("fs"); -const GITHUB_TOKEN = process.env.GITHUB_TOKEN; +const GITHUB_TOKEN = process.env.AZTEC_BOT_COMMENTER_GITHUB_TOKEN; const OWNER = "AztecProtocol"; const REPO = "aztec3-packages"; const COMMENT_MARK = ""; -const { ROLLUP_SIZES, BENCHMARK_FILE_JSON } = require("./benchmark_shared.js"); +const { + ROLLUP_SIZES, + BLOCK_SIZE, + BENCHMARK_FILE_JSON, + L1_ROLLUP_CALLDATA_SIZE_IN_BYTES, + L1_ROLLUP_CALLDATA_GAS, + L1_ROLLUP_EXECUTION_GAS, + L2_BLOCK_PROCESSING_TIME, + CIRCUIT_SIMULATION_TIME, + CIRCUIT_INPUT_SIZE, + CIRCUIT_OUTPUT_SIZE, + NOTE_SUCCESSFUL_DECRYPTING_TIME, + NOTE_TRIAL_DECRYPTING_TIME, + L2_BLOCK_BUILD_TIME, + L2_BLOCK_ROLLUP_SIMULATION_TIME, + L2_BLOCK_PUBLIC_TX_PROCESS_TIME, + NODE_HISTORY_SYNC_TIME, + NOTE_HISTORY_SUCCESSFUL_DECRYPTING_TIME, + NOTE_HISTORY_TRIAL_DECRYPTING_TIME, + NODE_DB_SIZE, + PXE_DB_SIZE, +} = require("./benchmark_shared.js"); + +const METRICS_GROUPED_BY_ROLLUP_SIZE = [ + L1_ROLLUP_CALLDATA_SIZE_IN_BYTES, + L1_ROLLUP_CALLDATA_GAS, + L1_ROLLUP_EXECUTION_GAS, + L2_BLOCK_PROCESSING_TIME, + NOTE_SUCCESSFUL_DECRYPTING_TIME, + NOTE_TRIAL_DECRYPTING_TIME, + L2_BLOCK_BUILD_TIME, + L2_BLOCK_ROLLUP_SIMULATION_TIME, + L2_BLOCK_PUBLIC_TX_PROCESS_TIME, +]; + +const METRICS_GROUPED_BY_CHAIN_LENGTH = [ + NODE_HISTORY_SYNC_TIME, + NOTE_HISTORY_SUCCESSFUL_DECRYPTING_TIME, + NOTE_HISTORY_TRIAL_DECRYPTING_TIME, + NODE_DB_SIZE, + PXE_DB_SIZE, +]; + +const METRICS_GROUPED_BY_CIRCUIT_NAME = [ + CIRCUIT_SIMULATION_TIME, + CIRCUIT_INPUT_SIZE, + CIRCUIT_OUTPUT_SIZE, +]; + +function formatValue(value) { + return value; +} -// Returns the md content to post -function getContent() { - const benchmark = JSON.parse(fs.readFileSync(BENCHMARK_FILE_JSON, "utf-8")); - delete benchmark.timestamp; +function transpose(obj) { + const transposed = {}; + for (const outerKey in obj) { + const innerObj = obj[outerKey]; + for (const innerKey in innerObj) { + if (!transposed[innerKey]) transposed[innerKey] = {}; + transposed[innerKey][outerKey] = innerObj[innerKey]; + } + } + return transposed; +} + +function pick(benchmark, keys) { + const result = {}; + for (const key of keys) { + result[key] = benchmark[key]; + } + return result; +} - const sizes = ROLLUP_SIZES; - const header = `| Metric | ${sizes.map((i) => `${i} txs`).join(" | ")} |`; - const separator = `| - | ${sizes.map(() => "-").join(" | ")} |`; - const rows = Object.keys(benchmark).map((key) => { +function getTableContent(benchmark, groupUnit = "", col1Title = "Metric") { + const rowKeys = Object.keys(benchmark); + const groups = [ + ...new Set(rowKeys.flatMap((key) => Object.keys(benchmark[key]))), + ]; + console.log(groups); + const header = `| ${col1Title} | ${groups + .map((i) => `${i} ${groupUnit}`) + .join(" | ")} |`; + const separator = `| - | ${groups.map(() => "-").join(" | ")} |`; + const rows = rowKeys.map((key) => { const metric = benchmark[key]; - return `${key} | ${sizes.map((i) => metric[i]).join(" | ")} |`; + return `${key} | ${groups + .map((i) => formatValue(metric[i])) + .join(" | ")} |`; }); return ` -## Benchmark results - -### Rollup published to L1 - ${header} ${separator} ${rows.join("\n")} + `; +} + +// Returns the md content to post +function getPostContent() { + const benchmark = JSON.parse(fs.readFileSync(BENCHMARK_FILE_JSON, "utf-8")); + delete benchmark.timestamp; + + return ` +## Benchmark results + +All benchmarks are run on txs on the \`Benchmarking\` contract on the repository. Each tx consists of a batch call to \`create_note\` and \`increment_balance\`, which guarantees that each tx has a private call, a nested private call, a public call, and a nested public call, as well as an emitted private note, an unencrypted log, and public storage read and write. + +### L2 block published to L1 + +Each column represents the number of txs on an L2 block published to L1. +${getTableContent(pick(benchmark, METRICS_GROUPED_BY_ROLLUP_SIZE), "txs")} + +### L2 chain processing + +Each column represents the number of blocks on the L2 chain where each block has ${BLOCK_SIZE} txs. +${getTableContent(pick(benchmark, METRICS_GROUPED_BY_CHAIN_LENGTH), "blocks")} + +### Circuits stats + +Stats on running time and I/O sizes collected for every circuit run across all benchmarks. +${getTableContent( + transpose(pick(benchmark, METRICS_GROUPED_BY_CIRCUIT_NAME)), + "", + "Circuit" +)} ${COMMENT_MARK} `; @@ -61,7 +163,7 @@ async function getExistingComment() { // Function to create or update a comment async function upsertComment(existingCommentId) { try { - const commentContent = getContent(); + const commentContent = getPostContent(); const commentData = { body: commentContent }; const requestMethod = existingCommentId ? "PATCH" : "POST"; diff --git a/yarn-project/Dockerfile b/yarn-project/Dockerfile index 57e3ba2bca3..331795d6dc0 100644 --- a/yarn-project/Dockerfile +++ b/yarn-project/Dockerfile @@ -8,7 +8,7 @@ COPY . . # Generate Noir contract TypeScript artifacts. COPY --from=noir /usr/src/yarn-project/noir-contracts/target /usr/src/yarn-project/noir-contracts/target -# Run yarn build to have the json ABIs available for the types generator, generate types, build again. +# Run yarn build to have the json artifacts available for the types generator, generate types, build again. RUN apk add perl RUN cd /usr/src/yarn-project/noir-contracts && yarn build && ./scripts/types_all.sh && yarn build # Cleanup to reduce final image size. diff --git a/yarn-project/README.md b/yarn-project/README.md index 2561fc4b85a..d5b5201d71b 100644 --- a/yarn-project/README.md +++ b/yarn-project/README.md @@ -40,3 +40,25 @@ To add a new package, make sure to add it to the `build_manifest.json`, to the ` - `tsconfig.json` You may also need to modify the [Dockerfile](yarn-project/yarn-project-base/Dockerfile) to copy your new `package.json` into the container to get CI to pass. + +## Deploying npm packages +`deploy-npm` script handles the releases of npm packages within yarn-project. But the initial release is a manual process: + +1. Ensure relevant folders are copied in by docker in `yarn-project/yarn-project-base/Dockerfile` and `yarn-project/Dockerfile` +2. SSH into the CI +3. Run the following: +```sh +cd project +./build-system/scripts/setup_env "$(git rev-parse HEAD)" "" "" "" +source /tmp/.bash_env* +BUILD_SYSTEM_DEBUG=1 +COMMIT_TAG= +``` +4. Follow the [`deploy-npm` script](./deploy_npm.sh). + - Best to run the `deploy_package()` method line by line by manually setting `REPOSITORY` var. + - Extract `VERSION` as the script shows (in the eg it should be 0.8.8) + - Skip the version existing checks like `if [ "$VERSION" == "$PUBLISHED_VERSION" ]` and `if [ "$VERSION" != "$HIGHER_VERSION" ]`. Since this is our first time deploying the package, `PUBLISHED_VERSION` and `HIGHER_VERSION` will be empty and hence these checks would fail. These checks are necessary in the CI for continual releases. + - Locally update the package version in package.json using `jq` as shown in the script + - Do a dry-run + - If dry run succeeds, publish the package! +5. Create a PR by adding your package into the `deploy-npm` script so next release onwards, CI can cut releases for your package. \ No newline at end of file diff --git a/yarn-project/acir-simulator/src/client/client_execution_context.ts b/yarn-project/acir-simulator/src/client/client_execution_context.ts index 2627802ef89..6e21f43e9d7 100644 --- a/yarn-project/acir-simulator/src/client/client_execution_context.ts +++ b/yarn-project/acir-simulator/src/client/client_execution_context.ts @@ -11,7 +11,7 @@ import { } from '@aztec/circuits.js'; import { computeUniqueCommitment, siloCommitment } from '@aztec/circuits.js/abis'; import { Grumpkin } from '@aztec/circuits.js/barretenberg'; -import { FunctionAbi } from '@aztec/foundation/abi'; +import { FunctionArtifact } from '@aztec/foundation/abi'; import { AztecAddress } from '@aztec/foundation/aztec-address'; import { Fr, Point } from '@aztec/foundation/fields'; import { createDebugLogger } from '@aztec/foundation/log'; @@ -306,8 +306,8 @@ export class ClientExecutionContext extends ViewDataOracle { `Calling private function ${this.contractAddress}:${functionSelector} from ${this.callContext.storageContractAddress}`, ); - const targetAbi = await this.db.getFunctionABI(targetContractAddress, functionSelector); - const targetFunctionData = FunctionData.fromAbi(targetAbi); + const targetArtifact = await this.db.getFunctionArtifact(targetContractAddress, functionSelector); + const targetFunctionData = FunctionData.fromAbi(targetArtifact); const derivedTxContext = new TxContext( false, @@ -318,7 +318,7 @@ export class ClientExecutionContext extends ViewDataOracle { this.txContext.version, ); - const derivedCallContext = await this.deriveCallContext(targetContractAddress, targetAbi, false, false); + const derivedCallContext = await this.deriveCallContext(targetContractAddress, targetArtifact, false, false); const context = new ClientExecutionContext( targetContractAddress, @@ -336,7 +336,7 @@ export class ClientExecutionContext extends ViewDataOracle { const childExecutionResult = await executePrivateFunction( context, - targetAbi, + targetArtifact, targetContractAddress, targetFunctionData, ); @@ -360,14 +360,14 @@ export class ClientExecutionContext extends ViewDataOracle { functionSelector: FunctionSelector, argsHash: Fr, ): Promise { - const targetAbi = await this.db.getFunctionABI(targetContractAddress, functionSelector); - const derivedCallContext = await this.deriveCallContext(targetContractAddress, targetAbi, false, false); + const targetArtifact = await this.db.getFunctionArtifact(targetContractAddress, functionSelector); + const derivedCallContext = await this.deriveCallContext(targetContractAddress, targetArtifact, false, false); const args = this.packedArgsCache.unpack(argsHash); const sideEffectCounter = this.sideEffectCounter.count(); const enqueuedRequest = PublicCallRequest.from({ args, callContext: derivedCallContext, - functionData: FunctionData.fromAbi(targetAbi), + functionData: FunctionData.fromAbi(targetArtifact), contractAddress: targetContractAddress, sideEffectCounter, }); @@ -388,14 +388,14 @@ export class ClientExecutionContext extends ViewDataOracle { /** * Derives the call context for a nested execution. * @param targetContractAddress - The address of the contract being called. - * @param targetAbi - The ABI of the function being called. + * @param targetArtifact - The artifact of the function being called. * @param isDelegateCall - Whether the call is a delegate call. * @param isStaticCall - Whether the call is a static call. * @returns The derived call context. */ private async deriveCallContext( targetContractAddress: AztecAddress, - targetAbi: FunctionAbi, + targetArtifact: FunctionArtifact, isDelegateCall = false, isStaticCall = false, ) { @@ -404,7 +404,7 @@ export class ClientExecutionContext extends ViewDataOracle { this.contractAddress, targetContractAddress, portalContractAddress, - FunctionSelector.fromNameAndParameters(targetAbi.name, targetAbi.parameters), + FunctionSelector.fromNameAndParameters(targetArtifact.name, targetArtifact.parameters), isDelegateCall, isStaticCall, false, diff --git a/yarn-project/acir-simulator/src/client/db_oracle.ts b/yarn-project/acir-simulator/src/client/db_oracle.ts index 3a259db8693..da9233c5453 100644 --- a/yarn-project/acir-simulator/src/client/db_oracle.ts +++ b/yarn-project/acir-simulator/src/client/db_oracle.ts @@ -1,5 +1,5 @@ import { CompleteAddress, GrumpkinPrivateKey, HistoricBlockData, PublicKey } from '@aztec/circuits.js'; -import { FunctionAbi, FunctionDebugMetadata, FunctionSelector } from '@aztec/foundation/abi'; +import { FunctionArtifact, FunctionDebugMetadata, FunctionSelector } from '@aztec/foundation/abi'; import { AztecAddress } from '@aztec/foundation/aztec-address'; import { EthAddress } from '@aztec/foundation/eth-address'; import { Fr } from '@aztec/foundation/fields'; @@ -8,9 +8,9 @@ import { NoteData } from '../acvm/index.js'; import { CommitmentsDB } from '../public/index.js'; /** - * A function ABI with optional debug metadata + * A function artifact with optional debug metadata */ -export interface FunctionAbiWithDebugMetadata extends FunctionAbi { +export interface FunctionArtifactWithDebugMetadata extends FunctionArtifact { /** * Debug metadata for the function. */ @@ -59,14 +59,17 @@ export interface DBOracle extends CommitmentsDB { getNotes(contractAddress: AztecAddress, storageSlot: Fr): Promise; /** - * Retrieve the ABI information of a specific function within a contract. + * Retrieve the artifact information of a specific function within a contract. * The function is identified by its selector, which is a unique identifier generated from the function signature. * * @param contractAddress - The contract address. * @param selector - The corresponding function selector. - * @returns A Promise that resolves to a FunctionAbi object containing the ABI information of the target function. + * @returns A Promise that resolves to a FunctionArtifact object. */ - getFunctionABI(contractAddress: AztecAddress, selector: FunctionSelector): Promise; + getFunctionArtifact( + contractAddress: AztecAddress, + selector: FunctionSelector, + ): Promise; /** * Retrieves the portal contract address associated with the given contract address. diff --git a/yarn-project/acir-simulator/src/client/private_execution.test.ts b/yarn-project/acir-simulator/src/client/private_execution.test.ts index d861fa8c6bf..ae918a69879 100644 --- a/yarn-project/acir-simulator/src/client/private_execution.test.ts +++ b/yarn-project/acir-simulator/src/client/private_execution.test.ts @@ -24,7 +24,7 @@ import { } from '@aztec/circuits.js/abis'; import { pedersenPlookupCommitInputs } from '@aztec/circuits.js/barretenberg'; import { makeContractDeploymentData } from '@aztec/circuits.js/factories'; -import { FunctionAbi, FunctionSelector, encodeArguments } from '@aztec/foundation/abi'; +import { FunctionArtifact, FunctionSelector, encodeArguments } from '@aztec/foundation/abi'; import { asyncMap } from '@aztec/foundation/async-map'; import { AztecAddress } from '@aztec/foundation/aztec-address'; import { toBufferBE } from '@aztec/foundation/bigint-buffer'; @@ -33,14 +33,14 @@ import { Fr, GrumpkinScalar } from '@aztec/foundation/fields'; import { DebugLogger, createDebugLogger } from '@aztec/foundation/log'; import { AppendOnlyTree, Pedersen, StandardTree, newTree } from '@aztec/merkle-tree'; import { - ChildContractAbi, - ImportTestContractAbi, - NonNativeTokenContractAbi, - ParentContractAbi, - PendingCommitmentsContractAbi, - PrivateTokenAirdropContractAbi, - StatefulTestContractAbi, - TestContractAbi, + ChildContractArtifact, + ImportTestContractArtifact, + NonNativeTokenContractArtifact, + ParentContractArtifact, + PendingCommitmentsContractArtifact, + PrivateTokenAirdropContractArtifact, + StatefulTestContractArtifact, + TestContractArtifact, } from '@aztec/noir-contracts/artifacts'; import { PackedArguments, TxExecutionRequest } from '@aztec/types'; @@ -49,7 +49,7 @@ import { MockProxy, mock } from 'jest-mock-extended'; import { default as levelup } from 'levelup'; import { type MemDown, default as memdown } from 'memdown'; -import { buildL1ToL2Message, getFunctionAbi } from '../test/utils.js'; +import { buildL1ToL2Message, getFunctionArtifact } from '../test/utils.js'; import { computeSlotForMapping } from '../utils.js'; import { DBOracle } from './db_oracle.js'; import { AcirSimulator } from './simulator.js'; @@ -90,22 +90,22 @@ describe('Private Execution test suite', () => { }; const runSimulator = async ({ - abi, + artifact, args = [], msgSender = AztecAddress.ZERO, contractAddress = defaultContractAddress, portalContractAddress = EthAddress.ZERO, txContext = {}, }: { - abi: FunctionAbi; + artifact: FunctionArtifact; msgSender?: AztecAddress; contractAddress?: AztecAddress; portalContractAddress?: EthAddress; args?: any[]; txContext?: Partial>; }) => { - const packedArguments = await PackedArguments.fromArgs(encodeArguments(abi, args), circuitsWasm); - const functionData = FunctionData.fromAbi(abi); + const packedArguments = await PackedArguments.fromArgs(encodeArguments(artifact, args), circuitsWasm); + const functionData = FunctionData.fromAbi(artifact); const txRequest = TxExecutionRequest.from({ origin: contractAddress, argsHash: packedArguments.hash, @@ -117,7 +117,7 @@ describe('Private Execution test suite', () => { return acirSimulator.run( txRequest, - abi, + artifact, functionData.isConstructor ? AztecAddress.ZERO : contractAddress, portalContractAddress, msgSender, @@ -179,10 +179,10 @@ describe('Private Execution test suite', () => { describe('empty constructor', () => { it('should run the empty constructor', async () => { - const abi = getFunctionAbi(TestContractAbi, 'constructor'); + const artifact = getFunctionArtifact(TestContractArtifact, 'constructor'); const contractDeploymentData = makeContractDeploymentData(100); const txContext = { isContractDeploymentTx: true, contractDeploymentData }; - const result = await runSimulator({ abi, txContext }); + const result = await runSimulator({ artifact, txContext }); const emptyCommitments = new Array(MAX_NEW_COMMITMENTS_PER_CALL).fill(Fr.ZERO); expect(result.callStackItem.publicInputs.newCommitments).toEqual(emptyCommitments); @@ -226,16 +226,16 @@ describe('Private Execution test suite', () => { throw new Error(`Unknown address ${address}`); }); - oracle.getFunctionABI.mockImplementation((_, selector) => + oracle.getFunctionArtifact.mockImplementation((_, selector) => Promise.resolve( - PrivateTokenAirdropContractAbi.functions.find(f => + PrivateTokenAirdropContractArtifact.functions.find(f => selector.equals(FunctionSelector.fromNameAndParameters(f.name, f.parameters)), )!, ), ); }); - it('should have an abi for computing note hash and nullifier', async () => { + it('should have an artifact for computing note hash and nullifier', async () => { const storageSlot = Fr.random(); const note = buildNote(60n, owner, storageSlot); @@ -262,9 +262,9 @@ describe('Private Execution test suite', () => { }); it('should a constructor with arguments that inserts notes', async () => { - const abi = getFunctionAbi(PrivateTokenAirdropContractAbi, 'constructor'); + const artifact = getFunctionArtifact(PrivateTokenAirdropContractArtifact, 'constructor'); - const result = await runSimulator({ args: [140, owner], abi }); + const result = await runSimulator({ args: [140, owner], artifact }); expect(result.newNotes).toHaveLength(1); const newNote = result.newNotes[0]; @@ -280,9 +280,9 @@ describe('Private Execution test suite', () => { }); it('should run the mint function', async () => { - const abi = getFunctionAbi(PrivateTokenAirdropContractAbi, 'mint'); + const artifact = getFunctionArtifact(PrivateTokenAirdropContractArtifact, 'mint'); - const result = await runSimulator({ args: [140, owner], abi }); + const result = await runSimulator({ args: [140, owner], artifact }); expect(result.newNotes).toHaveLength(1); const newNote = result.newNotes[0]; @@ -299,7 +299,7 @@ describe('Private Execution test suite', () => { it('should run the transfer function', async () => { const amountToTransfer = 100n; - const abi = getFunctionAbi(PrivateTokenAirdropContractAbi, 'transfer'); + const artifact = getFunctionArtifact(PrivateTokenAirdropContractArtifact, 'transfer'); const storageSlot = computeSlotForMapping(new Fr(1n), owner.toField(), circuitsWasm); const recipientStorageSlot = computeSlotForMapping(new Fr(1n), recipient.toField(), circuitsWasm); @@ -313,7 +313,7 @@ describe('Private Execution test suite', () => { await insertLeaves(consumedNotes.map(n => n.siloedNoteHash)); const args = [amountToTransfer, recipient]; - const result = await runSimulator({ args, abi, msgSender: owner }); + const result = await runSimulator({ args, artifact, msgSender: owner }); // The two notes were nullified const newNullifiers = result.callStackItem.publicInputs.newNullifiers.filter(field => !field.equals(Fr.ZERO)); @@ -346,7 +346,7 @@ describe('Private Execution test suite', () => { it('should be able to transfer with dummy notes', async () => { const amountToTransfer = 100n; const balance = 160n; - const abi = getFunctionAbi(PrivateTokenAirdropContractAbi, 'transfer'); + const artifact = getFunctionArtifact(PrivateTokenAirdropContractArtifact, 'transfer'); const storageSlot = computeSlotForMapping(new Fr(1n), owner.toField(), circuitsWasm); @@ -359,7 +359,7 @@ describe('Private Execution test suite', () => { await insertLeaves(consumedNotes.map(n => n.siloedNoteHash)); const args = [amountToTransfer, recipient]; - const result = await runSimulator({ args, abi, msgSender: owner }); + const result = await runSimulator({ args, artifact, msgSender: owner }); const newNullifiers = result.callStackItem.publicInputs.newNullifiers.filter(field => !field.equals(Fr.ZERO)); expect(newNullifiers).toEqual(consumedNotes.map(n => n.innerNullifier)); @@ -407,16 +407,16 @@ describe('Private Execution test suite', () => { throw new Error(`Unknown address ${address}`); }); - oracle.getFunctionABI.mockImplementation((_, selector) => + oracle.getFunctionArtifact.mockImplementation((_, selector) => Promise.resolve( - StatefulTestContractAbi.functions.find(f => + StatefulTestContractArtifact.functions.find(f => selector.equals(FunctionSelector.fromNameAndParameters(f.name, f.parameters)), )!, ), ); }); - it('should have an abi for computing note hash and nullifier', async () => { + it('should have an artifact for computing note hash and nullifier', async () => { const storageSlot = Fr.random(); const note = buildNote(60n, owner, storageSlot); @@ -443,9 +443,9 @@ describe('Private Execution test suite', () => { }); it('should a constructor with arguments that inserts notes', async () => { - const abi = getFunctionAbi(StatefulTestContractAbi, 'constructor'); + const artifact = getFunctionArtifact(StatefulTestContractArtifact, 'constructor'); - const result = await runSimulator({ args: [owner, 140], abi }); + const result = await runSimulator({ args: [owner, 140], artifact }); expect(result.newNotes).toHaveLength(1); const newNote = result.newNotes[0]; @@ -461,9 +461,9 @@ describe('Private Execution test suite', () => { }); it('should run the create_note function', async () => { - const abi = getFunctionAbi(StatefulTestContractAbi, 'create_note'); + const artifact = getFunctionArtifact(StatefulTestContractArtifact, 'create_note'); - const result = await runSimulator({ args: [owner, 140], abi }); + const result = await runSimulator({ args: [owner, 140], artifact }); expect(result.newNotes).toHaveLength(1); const newNote = result.newNotes[0]; @@ -480,7 +480,7 @@ describe('Private Execution test suite', () => { it('should run the destroy_and_create function', async () => { const amountToTransfer = 100n; - const abi = getFunctionAbi(StatefulTestContractAbi, 'destroy_and_create'); + const artifact = getFunctionArtifact(StatefulTestContractArtifact, 'destroy_and_create'); const storageSlot = computeSlotForMapping(new Fr(1n), owner.toField(), circuitsWasm); const recipientStorageSlot = computeSlotForMapping(new Fr(1n), recipient.toField(), circuitsWasm); @@ -494,7 +494,7 @@ describe('Private Execution test suite', () => { await insertLeaves(consumedNotes.map(n => n.siloedNoteHash)); const args = [recipient, amountToTransfer]; - const result = await runSimulator({ args, abi, msgSender: owner }); + const result = await runSimulator({ args, artifact, msgSender: owner }); // The two notes were nullified const newNullifiers = result.callStackItem.publicInputs.newNullifiers.filter(field => !field.equals(Fr.ZERO)); @@ -527,7 +527,7 @@ describe('Private Execution test suite', () => { it('should be able to destroy_and_create with dummy notes', async () => { const amountToTransfer = 100n; const balance = 160n; - const abi = getFunctionAbi(StatefulTestContractAbi, 'destroy_and_create'); + const artifact = getFunctionArtifact(StatefulTestContractArtifact, 'destroy_and_create'); const storageSlot = computeSlotForMapping(new Fr(1n), owner.toField(), circuitsWasm); @@ -540,7 +540,7 @@ describe('Private Execution test suite', () => { await insertLeaves(consumedNotes.map(n => n.siloedNoteHash)); const args = [recipient, amountToTransfer]; - const result = await runSimulator({ args, abi, msgSender: owner }); + const result = await runSimulator({ args, artifact, msgSender: owner }); const newNullifiers = result.callStackItem.publicInputs.newNullifiers.filter(field => !field.equals(Fr.ZERO)); expect(newNullifiers).toEqual(consumedNotes.map(n => n.innerNullifier)); @@ -557,30 +557,30 @@ describe('Private Execution test suite', () => { it('child function should be callable', async () => { const initialValue = 100n; - const abi = getFunctionAbi(ChildContractAbi, 'value'); - const result = await runSimulator({ args: [initialValue], abi }); + const artifact = getFunctionArtifact(ChildContractArtifact, 'value'); + const result = await runSimulator({ args: [initialValue], artifact }); expect(result.callStackItem.publicInputs.returnValues[0]).toEqual(new Fr(initialValue + privateIncrement)); }); it('parent should call child', async () => { - const childAbi = getFunctionAbi(ChildContractAbi, 'value'); - const parentAbi = getFunctionAbi(ParentContractAbi, 'entryPoint'); + const childArtifact = getFunctionArtifact(ChildContractArtifact, 'value'); + const parentArtifact = getFunctionArtifact(ParentContractArtifact, 'entryPoint'); const parentAddress = AztecAddress.random(); const childAddress = AztecAddress.random(); - const childSelector = FunctionSelector.fromNameAndParameters(childAbi.name, childAbi.parameters); + const childSelector = FunctionSelector.fromNameAndParameters(childArtifact.name, childArtifact.parameters); - oracle.getFunctionABI.mockImplementation(() => Promise.resolve(childAbi)); + oracle.getFunctionArtifact.mockImplementation(() => Promise.resolve(childArtifact)); oracle.getPortalContractAddress.mockImplementation(() => Promise.resolve(EthAddress.ZERO)); logger(`Parent deployed at ${parentAddress.toShortString()}`); logger(`Calling child function ${childSelector.toString()} at ${childAddress.toShortString()}`); const args = [Fr.fromBuffer(childAddress.toBuffer()), Fr.fromBuffer(childSelector.toBuffer())]; - const result = await runSimulator({ args, abi: parentAbi }); + const result = await runSimulator({ args, artifact: parentArtifact }); expect(result.callStackItem.publicInputs.returnValues[0]).toEqual(new Fr(privateIncrement)); - expect(oracle.getFunctionABI.mock.calls[0]).toEqual([childAddress, childSelector]); + expect(oracle.getFunctionArtifact.mock.calls[0]).toEqual([childAddress, childSelector]); expect(oracle.getPortalContractAddress.mock.calls[0]).toEqual([childAddress]); expect(result.nestedExecutions).toHaveLength(1); expect(result.nestedExecutions[0].callStackItem.publicInputs.returnValues[0]).toEqual(new Fr(privateIncrement)); @@ -595,42 +595,42 @@ describe('Private Execution test suite', () => { describe('nested calls through autogenerated interface', () => { let args: any[]; let argsHash: Fr; - let testCodeGenAbi: FunctionAbi; + let testCodeGenArtifact: FunctionArtifact; beforeAll(async () => { // These args should match the ones hardcoded in importer contract const dummyNote = { amount: 1, secretHash: 2 }; const deepStruct = { aField: 1, aBool: true, aNote: dummyNote, manyNotes: [dummyNote, dummyNote, dummyNote] }; args = [1, true, 1, [1, 2], dummyNote, deepStruct]; - testCodeGenAbi = TestContractAbi.functions.find(f => f.name === 'testCodeGen')!; - const serializedArgs = encodeArguments(testCodeGenAbi, args); + testCodeGenArtifact = TestContractArtifact.functions.find(f => f.name === 'testCodeGen')!; + const serializedArgs = encodeArguments(testCodeGenArtifact, args); argsHash = await computeVarArgsHash(await CircuitsWasm.get(), serializedArgs); }); it('test function should be directly callable', async () => { logger(`Calling testCodeGen function`); - const result = await runSimulator({ args, abi: testCodeGenAbi }); + const result = await runSimulator({ args, artifact: testCodeGenArtifact }); expect(result.callStackItem.publicInputs.returnValues[0]).toEqual(argsHash); }); it('test function should be callable through autogenerated interface', async () => { const testAddress = AztecAddress.random(); - const parentAbi = ImportTestContractAbi.functions.find(f => f.name === 'main')!; + const parentArtifact = ImportTestContractArtifact.functions.find(f => f.name === 'main')!; const testCodeGenSelector = FunctionSelector.fromNameAndParameters( - testCodeGenAbi.name, - testCodeGenAbi.parameters, + testCodeGenArtifact.name, + testCodeGenArtifact.parameters, ); - oracle.getFunctionABI.mockResolvedValue(testCodeGenAbi); + oracle.getFunctionArtifact.mockResolvedValue(testCodeGenArtifact); oracle.getPortalContractAddress.mockResolvedValue(EthAddress.ZERO); logger(`Calling importer main function`); const args = [testAddress]; - const result = await runSimulator({ args, abi: parentAbi }); + const result = await runSimulator({ args, artifact: parentArtifact }); expect(result.callStackItem.publicInputs.returnValues[0]).toEqual(argsHash); - expect(oracle.getFunctionABI.mock.calls[0]).toEqual([testAddress, testCodeGenSelector]); + expect(oracle.getFunctionArtifact.mock.calls[0]).toEqual([testAddress, testCodeGenSelector]); expect(oracle.getPortalContractAddress.mock.calls[0]).toEqual([testAddress]); expect(result.nestedExecutions).toHaveLength(1); expect(result.nestedExecutions[0].callStackItem.publicInputs.returnValues[0]).toEqual(argsHash); @@ -649,7 +649,7 @@ describe('Private Execution test suite', () => { it('Should be able to consume a dummy cross chain message', async () => { const bridgedAmount = 100n; - const abi = getFunctionAbi(NonNativeTokenContractAbi, 'mint'); + const artifact = getFunctionArtifact(NonNativeTokenContractArtifact, 'mint'); const secret = new Fr(1n); const canceller = EthAddress.random(); @@ -674,7 +674,7 @@ describe('Private Execution test suite', () => { }); const args = [bridgedAmount, recipient, messageKey, secret, canceller.toField()]; - const result = await runSimulator({ contractAddress, abi, args }); + const result = await runSimulator({ contractAddress, artifact, args }); // Check a nullifier has been inserted const newNullifiers = result.callStackItem.publicInputs.newNullifiers.filter(field => !field.equals(Fr.ZERO)); @@ -683,7 +683,7 @@ describe('Private Execution test suite', () => { it('Should be able to consume a dummy public to private message', async () => { const amount = 100n; - const abi = getFunctionAbi(NonNativeTokenContractAbi, 'redeemShield'); + const artifact = getFunctionArtifact(NonNativeTokenContractArtifact, 'redeemShield'); const wasm = await CircuitsWasm.get(); const secret = new Fr(1n); @@ -706,7 +706,7 @@ describe('Private Execution test suite', () => { ]); const result = await runSimulator({ - abi, + artifact, args: [amount, secret, recipient], }); @@ -723,26 +723,29 @@ describe('Private Execution test suite', () => { describe('enqueued calls', () => { it.each([false, true])('parent should enqueue call to child (internal %p)', async isInternal => { - const parentAbi = ParentContractAbi.functions.find(f => f.name === 'enqueueCallToChild')!; - const childContractAbi = ParentContractAbi.functions[0]; + const parentArtifact = ParentContractArtifact.functions.find(f => f.name === 'enqueueCallToChild')!; + const childContractArtifact = ParentContractArtifact.functions[0]; const childAddress = AztecAddress.random(); const childPortalContractAddress = EthAddress.random(); - const childSelector = FunctionSelector.fromNameAndParameters(childContractAbi.name, childContractAbi.parameters); + const childSelector = FunctionSelector.fromNameAndParameters( + childContractArtifact.name, + childContractArtifact.parameters, + ); const parentAddress = AztecAddress.random(); oracle.getPortalContractAddress.mockImplementation(() => Promise.resolve(childPortalContractAddress)); - oracle.getFunctionABI.mockImplementation(() => Promise.resolve({ ...childContractAbi, isInternal })); + oracle.getFunctionArtifact.mockImplementation(() => Promise.resolve({ ...childContractArtifact, isInternal })); const args = [Fr.fromBuffer(childAddress.toBuffer()), childSelector.toField(), 42n]; const result = await runSimulator({ msgSender: parentAddress, contractAddress: parentAddress, - abi: parentAbi, + artifact: parentArtifact, args, }); - // Alter function data (abi) to match the manipulated oracle - const functionData = FunctionData.fromAbi(childContractAbi); + // Alter function data to match the manipulated oracle + const functionData = FunctionData.fromAbi(childContractArtifact); functionData.isInternal = isInternal; const publicCallRequest = PublicCallRequest.from({ @@ -781,9 +784,9 @@ describe('Private Execution test suite', () => { }); beforeEach(() => { - oracle.getFunctionABI.mockImplementation((_, selector) => + oracle.getFunctionArtifact.mockImplementation((_, selector) => Promise.resolve( - PendingCommitmentsContractAbi.functions.find(f => + PendingCommitmentsContractArtifact.functions.find(f => selector.equals(FunctionSelector.fromNameAndParameters(f.name, f.parameters)), )!, ), @@ -796,14 +799,14 @@ describe('Private Execution test suite', () => { const amountToTransfer = 100n; const contractAddress = AztecAddress.random(); - const abi = PendingCommitmentsContractAbi.functions.find( + const artifact = PendingCommitmentsContractArtifact.functions.find( f => f.name === 'test_insert_then_get_then_nullify_flat', )!; const args = [amountToTransfer, owner]; const result = await runSimulator({ args: args, - abi: abi, + artifact: artifact, contractAddress, }); @@ -839,19 +842,26 @@ describe('Private Execution test suite', () => { const amountToTransfer = 100n; const contractAddress = AztecAddress.random(); - const abi = PendingCommitmentsContractAbi.functions.find( + const artifact = PendingCommitmentsContractArtifact.functions.find( f => f.name === 'test_insert_then_get_then_nullify_all_in_nested_calls', )!; - const insertAbi = PendingCommitmentsContractAbi.functions.find(f => f.name === 'insert_note')!; - const getThenNullifyAbi = PendingCommitmentsContractAbi.functions.find(f => f.name === 'get_then_nullify_note')!; - const getZeroAbi = PendingCommitmentsContractAbi.functions.find(f => f.name === 'get_note_zero_balance')!; + const insertArtifact = PendingCommitmentsContractArtifact.functions.find(f => f.name === 'insert_note')!; + const getThenNullifyArtifact = PendingCommitmentsContractArtifact.functions.find( + f => f.name === 'get_then_nullify_note', + )!; + const getZeroArtifact = PendingCommitmentsContractArtifact.functions.find( + f => f.name === 'get_note_zero_balance', + )!; - const insertFnSelector = FunctionSelector.fromNameAndParameters(insertAbi.name, insertAbi.parameters); + const insertFnSelector = FunctionSelector.fromNameAndParameters(insertArtifact.name, insertArtifact.parameters); const getThenNullifyFnSelector = FunctionSelector.fromNameAndParameters( - getThenNullifyAbi.name, - getThenNullifyAbi.parameters, + getThenNullifyArtifact.name, + getThenNullifyArtifact.parameters, + ); + const getZeroFnSelector = FunctionSelector.fromNameAndParameters( + getZeroArtifact.name, + getZeroArtifact.parameters, ); - const getZeroFnSelector = FunctionSelector.fromNameAndParameters(getZeroAbi.name, getZeroAbi.parameters); oracle.getPortalContractAddress.mockImplementation(() => Promise.resolve(EthAddress.ZERO)); @@ -864,7 +874,7 @@ describe('Private Execution test suite', () => { ]; const result = await runSimulator({ args: args, - abi: abi, + artifact: artifact, contractAddress: contractAddress, }); @@ -910,12 +920,14 @@ describe('Private Execution test suite', () => { const amountToTransfer = 100n; const contractAddress = AztecAddress.random(); - const abi = PendingCommitmentsContractAbi.functions.find(f => f.name === 'test_bad_get_then_insert_flat')!; + const artifact = PendingCommitmentsContractArtifact.functions.find( + f => f.name === 'test_bad_get_then_insert_flat', + )!; const args = [amountToTransfer, owner]; const result = await runSimulator({ args: args, - abi: abi, + artifact: artifact, contractAddress, }); @@ -948,9 +960,9 @@ describe('Private Execution test suite', () => { describe('get public key', () => { it('gets the public key for an address', async () => { - // Tweak the contract ABI so we can extract return values - const abi = getFunctionAbi(TestContractAbi, 'getPublicKey'); - abi.returnTypes = [{ kind: 'array', length: 2, type: { kind: 'field' } }]; + // Tweak the contract artifact so we can extract return values + const artifact = getFunctionArtifact(TestContractArtifact, 'getPublicKey'); + artifact.returnTypes = [{ kind: 'array', length: 2, type: { kind: 'field' } }]; // Generate a partial address, pubkey, and resulting address const completeAddress = await CompleteAddress.random(); @@ -958,7 +970,7 @@ describe('Private Execution test suite', () => { const pubKey = completeAddress.publicKey; oracle.getCompleteAddress.mockResolvedValue(completeAddress); - const result = await runSimulator({ abi, args }); + const result = await runSimulator({ artifact, args }); expect(result.returnValues).toEqual([pubKey.x.value, pubKey.y.value]); }); }); @@ -968,39 +980,39 @@ describe('Private Execution test suite', () => { const portalContractAddress = EthAddress.random(); const aztecAddressToQuery = AztecAddress.random(); - // Tweak the contract ABI so we can extract return values - const abi = getFunctionAbi(TestContractAbi, 'getPortalContractAddress'); - abi.returnTypes = [{ kind: 'field' }]; + // Tweak the contract artifact so we can extract return values + const artifact = getFunctionArtifact(TestContractArtifact, 'getPortalContractAddress'); + artifact.returnTypes = [{ kind: 'field' }]; const args = [aztecAddressToQuery.toField()]; // Overwrite the oracle return value oracle.getPortalContractAddress.mockResolvedValue(portalContractAddress); - const result = await runSimulator({ abi, args }); + const result = await runSimulator({ artifact, args }); expect(result.returnValues).toEqual(portalContractAddress.toField().value); }); it('this_address should return the current context address', async () => { const contractAddress = AztecAddress.random(); - // Tweak the contract ABI so we can extract return values - const abi = getFunctionAbi(TestContractAbi, 'getThisAddress'); - abi.returnTypes = [{ kind: 'field' }]; + // Tweak the contract artifact so we can extract return values + const artifact = getFunctionArtifact(TestContractArtifact, 'getThisAddress'); + artifact.returnTypes = [{ kind: 'field' }]; // Overwrite the oracle return value - const result = await runSimulator({ abi, args: [], contractAddress }); + const result = await runSimulator({ artifact, args: [], contractAddress }); expect(result.returnValues).toEqual(contractAddress.toField().value); }); it("this_portal_address should return the current context's portal address", async () => { const portalContractAddress = EthAddress.random(); - // Tweak the contract ABI so we can extract return values - const abi = getFunctionAbi(TestContractAbi, 'getThisPortalAddress'); - abi.returnTypes = [{ kind: 'field' }]; + // Tweak the contract artifact so we can extract return values + const artifact = getFunctionArtifact(TestContractArtifact, 'getThisPortalAddress'); + artifact.returnTypes = [{ kind: 'field' }]; // Overwrite the oracle return value - const result = await runSimulator({ abi, args: [], portalContractAddress }); + const result = await runSimulator({ artifact, args: [], portalContractAddress }); expect(result.returnValues).toEqual(portalContractAddress.toField().value); }); }); diff --git a/yarn-project/acir-simulator/src/client/private_execution.ts b/yarn-project/acir-simulator/src/client/private_execution.ts index 3de7e625b5f..c6e8483641a 100644 --- a/yarn-project/acir-simulator/src/client/private_execution.ts +++ b/yarn-project/acir-simulator/src/client/private_execution.ts @@ -9,7 +9,7 @@ import { extractPrivateCircuitPublicInputs } from '../acvm/deserialize.js'; import { Oracle, acvm, extractCallStack } from '../acvm/index.js'; import { ExecutionError } from '../common/errors.js'; import { ClientExecutionContext } from './client_execution_context.js'; -import { FunctionAbiWithDebugMetadata } from './db_oracle.js'; +import { FunctionArtifactWithDebugMetadata } from './db_oracle.js'; import { ExecutionResult } from './execution_result.js'; import { AcirSimulator } from './simulator.js'; @@ -18,7 +18,7 @@ import { AcirSimulator } from './simulator.js'; */ export async function executePrivateFunction( context: ClientExecutionContext, - abi: FunctionAbiWithDebugMetadata, + artifact: FunctionArtifactWithDebugMetadata, contractAddress: AztecAddress, functionData: FunctionData, log = createDebugLogger('aztec:simulator:secret_execution'), @@ -26,7 +26,7 @@ export async function executePrivateFunction( const functionSelector = functionData.selector; log(`Executing external function ${contractAddress}:${functionSelector}`); - const acir = Buffer.from(abi.bytecode, 'base64'); + const acir = Buffer.from(artifact.bytecode, 'base64'); const initialWitness = context.getInitialWitness(); const acvmCallback = new Oracle(context); const { partialWitness } = await acvm(await AcirSimulator.getSolver(), acir, initialWitness, acvmCallback).catch( @@ -37,7 +37,7 @@ export async function executePrivateFunction( contractAddress, functionSelector, }, - extractCallStack(err, abi.debug), + extractCallStack(err, artifact.debug), { cause: err }, ); }, @@ -54,7 +54,7 @@ export async function executePrivateFunction( publicInputs.unencryptedLogPreimagesLength = new Fr(unencryptedLogs.getSerializedLength()); const callStackItem = new PrivateCallStackItem(contractAddress, functionData, publicInputs, false); - const returnValues = decodeReturnValues(abi, publicInputs.returnValues); + const returnValues = decodeReturnValues(artifact, publicInputs.returnValues); const readRequestPartialWitnesses = context.getReadRequestPartialWitnesses(publicInputs.readRequests); const newNotes = context.getNewNotes(); const nestedExecutions = context.getNestedExecutions(); @@ -69,7 +69,7 @@ export async function executePrivateFunction( returnValues, readRequestPartialWitnesses, newNotes, - vk: Buffer.from(abi.verificationKey!, 'hex'), + vk: Buffer.from(artifact.verificationKey!, 'hex'), nestedExecutions, enqueuedPublicFunctionCalls, encryptedLogs, diff --git a/yarn-project/acir-simulator/src/client/simulator.ts b/yarn-project/acir-simulator/src/client/simulator.ts index 2679f15e02a..46521e35c60 100644 --- a/yarn-project/acir-simulator/src/client/simulator.ts +++ b/yarn-project/acir-simulator/src/client/simulator.ts @@ -13,7 +13,7 @@ import { createSimulationError } from '../common/errors.js'; import { SideEffectCounter } from '../common/index.js'; import { PackedArgsCache } from '../common/packed_args_cache.js'; import { ClientExecutionContext } from './client_execution_context.js'; -import { DBOracle, FunctionAbiWithDebugMetadata } from './db_oracle.js'; +import { DBOracle, FunctionArtifactWithDebugMetadata } from './db_oracle.js'; import { ExecutionNoteCache } from './execution_note_cache.js'; import { ExecutionResult } from './execution_result.js'; import { executePrivateFunction } from './private_execution.js'; @@ -52,7 +52,7 @@ export class AcirSimulator { /** * Runs a private function. * @param request - The transaction request. - * @param entryPointABI - The ABI of the entry point function. + * @param entryPointArtifact - The artifact of the entry point function. * @param contractAddress - The address of the contract (should match request.origin) * @param portalContractAddress - The address of the portal contract. * @param msgSender - The address calling the function. This can be replaced to simulate a call from another contract or a specific account. @@ -60,13 +60,13 @@ export class AcirSimulator { */ public async run( request: TxExecutionRequest, - entryPointABI: FunctionAbiWithDebugMetadata, + entryPointArtifact: FunctionArtifactWithDebugMetadata, contractAddress: AztecAddress, portalContractAddress: EthAddress, msgSender = AztecAddress.ZERO, ): Promise { - if (entryPointABI.functionType !== FunctionType.SECRET) { - throw new Error(`Cannot run ${entryPointABI.functionType} function as secret`); + if (entryPointArtifact.functionType !== FunctionType.SECRET) { + throw new Error(`Cannot run ${entryPointArtifact.functionType} function as secret`); } if (request.origin !== contractAddress) { @@ -82,7 +82,7 @@ export class AcirSimulator { msgSender, contractAddress, portalContractAddress, - FunctionSelector.fromNameAndParameters(entryPointABI.name, entryPointABI.parameters), + FunctionSelector.fromNameAndParameters(entryPointArtifact.name, entryPointArtifact.parameters), false, false, request.functionData.isConstructor, @@ -104,7 +104,7 @@ export class AcirSimulator { try { const executionResult = await executePrivateFunction( context, - entryPointABI, + entryPointArtifact, contractAddress, request.functionData, ); @@ -117,18 +117,18 @@ export class AcirSimulator { /** * Runs an unconstrained function. * @param request - The transaction request. - * @param entryPointABI - The ABI of the entry point function. + * @param entryPointArtifact - The artifact of the entry point function. * @param contractAddress - The address of the contract. * @param aztecNode - The AztecNode instance. */ public async runUnconstrained( request: FunctionCall, - entryPointABI: FunctionAbiWithDebugMetadata, + entryPointArtifact: FunctionArtifactWithDebugMetadata, contractAddress: AztecAddress, aztecNode?: AztecNode, ) { - if (entryPointABI.functionType !== FunctionType.UNCONSTRAINED) { - throw new Error(`Cannot run ${entryPointABI.functionType} function as constrained`); + if (entryPointArtifact.functionType !== FunctionType.UNCONSTRAINED) { + throw new Error(`Cannot run ${entryPointArtifact.functionType} function as constrained`); } const historicBlockData = await this.db.getHistoricBlockData(); @@ -137,7 +137,7 @@ export class AcirSimulator { try { return await executeUnconstrainedFunction( context, - entryPointABI, + entryPointArtifact, contractAddress, request.functionData, request.args, @@ -161,38 +161,38 @@ export class AcirSimulator { storageSlot: Fr, notePreimage: Fr[], ) { - let abi: FunctionAbiWithDebugMetadata | undefined = undefined; + let artifact: FunctionArtifactWithDebugMetadata | undefined = undefined; // Brute force for (let i = notePreimage.length; i < MAX_NOTE_FIELDS_LENGTH; i++) { const signature = `compute_note_hash_and_nullifier(Field,Field,Field,[Field;${i}])`; const selector = FunctionSelector.fromSignature(signature); try { - abi = await this.db.getFunctionABI(contractAddress, selector); - if (abi !== undefined) break; + artifact = await this.db.getFunctionArtifact(contractAddress, selector); + if (artifact !== undefined) break; } catch (e) { // ignore } } - if (abi == undefined) { + if (artifact == undefined) { throw new Error( `Mandatory implementation of "compute_note_hash_and_nullifier" missing in noir contract ${contractAddress.toString()}.`, ); } - const preimageLen = (abi.parameters[3].type as ArrayType).length; + const preimageLen = (artifact.parameters[3].type as ArrayType).length; const extendedPreimage = notePreimage.concat(Array(preimageLen - notePreimage.length).fill(Fr.ZERO)); const execRequest: FunctionCall = { to: AztecAddress.ZERO, functionData: FunctionData.empty(), - args: encodeArguments(abi, [contractAddress, nonce, storageSlot, extendedPreimage]), + args: encodeArguments(artifact, [contractAddress, nonce, storageSlot, extendedPreimage]), }; const [innerNoteHash, siloedNoteHash, uniqueSiloedNoteHash, innerNullifier] = (await this.runUnconstrained( execRequest, - abi, + artifact, AztecAddress.ZERO, )) as bigint[]; @@ -209,7 +209,6 @@ export class AcirSimulator { * @param contractAddress - The address of the contract. * @param storageSlot - The storage slot. * @param notePreimage - The note preimage. - * @param abi - The ABI of the function `compute_note_hash`. * @returns The note hash. */ public async computeInnerNoteHash(contractAddress: AztecAddress, storageSlot: Fr, notePreimage: Fr[]) { @@ -228,7 +227,6 @@ export class AcirSimulator { * @param nonce - The nonce of the note hash. * @param storageSlot - The storage slot. * @param notePreimage - The note preimage. - * @param abi - The ABI of the function `compute_note_hash`. * @returns The note hash. */ public async computeUniqueSiloedNoteHash( @@ -252,7 +250,6 @@ export class AcirSimulator { * @param nonce - The nonce of the note hash. * @param storageSlot - The storage slot. * @param notePreimage - The note preimage. - * @param abi - The ABI of the function `compute_note_hash`. * @returns The note hash. */ public async computeSiloedNoteHash(contractAddress: AztecAddress, nonce: Fr, storageSlot: Fr, notePreimage: Fr[]) { @@ -271,7 +268,6 @@ export class AcirSimulator { * @param nonce - The nonce of the unique note hash. * @param storageSlot - The storage slot. * @param notePreimage - The note preimage. - * @param abi - The ABI of the function `compute_note_hash`. * @returns The note hash. */ public async computeInnerNullifier(contractAddress: AztecAddress, nonce: Fr, storageSlot: Fr, notePreimage: Fr[]) { diff --git a/yarn-project/acir-simulator/src/client/unconstrained_execution.test.ts b/yarn-project/acir-simulator/src/client/unconstrained_execution.test.ts index 2cb8fa654ac..34a4a9ced6e 100644 --- a/yarn-project/acir-simulator/src/client/unconstrained_execution.test.ts +++ b/yarn-project/acir-simulator/src/client/unconstrained_execution.test.ts @@ -2,7 +2,7 @@ import { CompleteAddress, FunctionData, HistoricBlockData } from '@aztec/circuit import { FunctionSelector, encodeArguments } from '@aztec/foundation/abi'; import { AztecAddress } from '@aztec/foundation/aztec-address'; import { Fr, GrumpkinScalar } from '@aztec/foundation/fields'; -import { StatefulTestContractAbi } from '@aztec/noir-contracts/artifacts'; +import { StatefulTestContractArtifact } from '@aztec/noir-contracts/artifacts'; import { FunctionCall } from '@aztec/types'; import { mock } from 'jest-mock-extended'; @@ -40,7 +40,7 @@ describe('Unconstrained Execution test suite', () => { it('should run the summed_values function', async () => { const contractAddress = AztecAddress.random(); - const abi = StatefulTestContractAbi.functions.find(f => f.name === 'summed_values')!; + const artifact = StatefulTestContractArtifact.functions.find(f => f.name === 'summed_values')!; const preimages = [...Array(5).fill(buildNote(1n, owner)), ...Array(2).fill(buildNote(2n, owner))]; @@ -61,10 +61,10 @@ describe('Unconstrained Execution test suite', () => { const execRequest: FunctionCall = { to: contractAddress, functionData: new FunctionData(FunctionSelector.empty(), false, true, true), - args: encodeArguments(abi, [owner]), + args: encodeArguments(artifact, [owner]), }; - const result = await acirSimulator.runUnconstrained(execRequest, abi, AztecAddress.random()); + const result = await acirSimulator.runUnconstrained(execRequest, artifact, AztecAddress.random()); expect(result).toEqual(9n); }, 30_000); diff --git a/yarn-project/acir-simulator/src/client/unconstrained_execution.ts b/yarn-project/acir-simulator/src/client/unconstrained_execution.ts index 85504664c83..d197ec386fb 100644 --- a/yarn-project/acir-simulator/src/client/unconstrained_execution.ts +++ b/yarn-project/acir-simulator/src/client/unconstrained_execution.ts @@ -8,7 +8,7 @@ import { extractReturnWitness } from '../acvm/deserialize.js'; import { ACVMField, Oracle, acvm, extractCallStack, fromACVMField, toACVMWitness } from '../acvm/index.js'; import { ExecutionError } from '../common/errors.js'; import { AcirSimulator } from '../index.js'; -import { FunctionAbiWithDebugMetadata } from './db_oracle.js'; +import { FunctionArtifactWithDebugMetadata } from './db_oracle.js'; import { ViewDataOracle } from './view_data_oracle.js'; /** @@ -16,7 +16,7 @@ import { ViewDataOracle } from './view_data_oracle.js'; */ export async function executeUnconstrainedFunction( oracle: ViewDataOracle, - abi: FunctionAbiWithDebugMetadata, + artifact: FunctionArtifactWithDebugMetadata, contractAddress: AztecAddress, functionData: FunctionData, args: Fr[], @@ -25,7 +25,7 @@ export async function executeUnconstrainedFunction( const functionSelector = functionData.selector; log(`Executing unconstrained function ${contractAddress}:${functionSelector}`); - const acir = Buffer.from(abi.bytecode, 'base64'); + const acir = Buffer.from(artifact.bytecode, 'base64'); const initialWitness = toACVMWitness(1, args); const { partialWitness } = await acvm( await AcirSimulator.getSolver(), @@ -39,11 +39,11 @@ export async function executeUnconstrainedFunction( contractAddress, functionSelector, }, - extractCallStack(err, abi.debug), + extractCallStack(err, artifact.debug), { cause: err }, ); }); const returnValues: ACVMField[] = extractReturnWitness(acir, partialWitness); - return decodeReturnValues(abi, returnValues.map(fromACVMField)); + return decodeReturnValues(artifact, returnValues.map(fromACVMField)); } diff --git a/yarn-project/acir-simulator/src/public/index.test.ts b/yarn-project/acir-simulator/src/public/index.test.ts index 24fb47cad49..c8bb5c90924 100644 --- a/yarn-project/acir-simulator/src/public/index.test.ts +++ b/yarn-project/acir-simulator/src/public/index.test.ts @@ -8,17 +8,17 @@ import { L1_TO_L2_MSG_TREE_HEIGHT, } from '@aztec/circuits.js'; import { pedersenPlookupCommitInputs } from '@aztec/circuits.js/barretenberg'; -import { FunctionAbi, FunctionSelector, encodeArguments } from '@aztec/foundation/abi'; +import { FunctionArtifact, FunctionSelector, encodeArguments } from '@aztec/foundation/abi'; import { AztecAddress } from '@aztec/foundation/aztec-address'; import { EthAddress } from '@aztec/foundation/eth-address'; import { Fr } from '@aztec/foundation/fields'; import { toBigInt } from '@aztec/foundation/serialize'; import { - ChildContractAbi, - NonNativeTokenContractAbi, - ParentContractAbi, - PublicTokenContractAbi, - TestContractAbi, + ChildContractArtifact, + NonNativeTokenContractArtifact, + ParentContractArtifact, + PublicTokenContractArtifact, + TestContractArtifact, } from '@aztec/noir-contracts/artifacts'; import { MockProxy, mock } from 'jest-mock-extended'; @@ -63,9 +63,9 @@ describe('ACIR public execution simulator', () => { describe('mint', () => { it('should run the mint function', async () => { const contractAddress = AztecAddress.random(); - const mintAbi = PublicTokenContractAbi.functions.find(f => f.name === 'mint')!; - const functionData = FunctionData.fromAbi(mintAbi); - const args = encodeArguments(mintAbi, [140, recipient]); + const mintArtifact = PublicTokenContractArtifact.functions.find(f => f.name === 'mint')!; + const functionData = FunctionData.fromAbi(mintArtifact); + const args = encodeArguments(mintArtifact, [140, recipient]); const callContext = CallContext.from({ msgSender: AztecAddress.random(), @@ -77,7 +77,7 @@ describe('ACIR public execution simulator', () => { isStaticCall: false, }); - publicContracts.getBytecode.mockResolvedValue(Buffer.from(mintAbi.bytecode, 'base64')); + publicContracts.getBytecode.mockResolvedValue(Buffer.from(mintArtifact.bytecode, 'base64')); // Mock the old value for the recipient balance to be 20 const previousBalance = new Fr(20n); @@ -100,7 +100,7 @@ describe('ACIR public execution simulator', () => { describe('transfer', () => { let contractAddress: AztecAddress; - let abi: FunctionAbi; + let artifact: FunctionArtifact; let functionData: FunctionData; let args: Fr[]; let sender: AztecAddress; @@ -111,9 +111,9 @@ describe('ACIR public execution simulator', () => { beforeEach(() => { contractAddress = AztecAddress.random(); - abi = PublicTokenContractAbi.functions.find(f => f.name === 'transfer')!; + artifact = PublicTokenContractArtifact.functions.find(f => f.name === 'transfer')!; functionData = new FunctionData(FunctionSelector.empty(), false, false, false); - args = encodeArguments(abi, [140, recipient]); + args = encodeArguments(artifact, [140, recipient]); sender = AztecAddress.random(); callContext = CallContext.from({ @@ -129,7 +129,7 @@ describe('ACIR public execution simulator', () => { recipientStorageSlot = computeSlotForMapping(new Fr(1n), recipient.toField(), circuitsWasm); senderStorageSlot = computeSlotForMapping(new Fr(1n), Fr.fromBuffer(sender.toBuffer()), circuitsWasm); - publicContracts.getBytecode.mockResolvedValue(Buffer.from(abi.bytecode, 'base64')); + publicContracts.getBytecode.mockResolvedValue(Buffer.from(artifact.bytecode, 'base64')); execution = { contractAddress, functionData, args, callContext }; }); @@ -202,14 +202,14 @@ describe('ACIR public execution simulator', () => { 'calls the public entry point in the parent', async isInternal => { const parentContractAddress = AztecAddress.random(); - const parentEntryPointFn = ParentContractAbi.functions.find(f => f.name === 'pubEntryPoint')!; + const parentEntryPointFn = ParentContractArtifact.functions.find(f => f.name === 'pubEntryPoint')!; const parentEntryPointFnSelector = FunctionSelector.fromNameAndParameters( parentEntryPointFn.name, parentEntryPointFn.parameters, ); const childContractAddress = AztecAddress.random(); - const childValueFn = ChildContractAbi.functions.find(f => f.name === 'pubGetValue')!; + const childValueFn = ChildContractArtifact.functions.find(f => f.name === 'pubGetValue')!; const childValueFnSelector = FunctionSelector.fromNameAndParameters(childValueFn.name, childValueFn.parameters); const initialValue = 3n; @@ -285,8 +285,8 @@ describe('ACIR public execution simulator', () => { }); it('Should be able to create a commitment from the public context', async () => { - const shieldAbi = NonNativeTokenContractAbi.functions.find(f => f.name === 'shield')!; - const args = encodeArguments(shieldAbi, params); + const shieldArtifact = NonNativeTokenContractArtifact.functions.find(f => f.name === 'shield')!; + const args = encodeArguments(shieldArtifact, params); const callContext = CallContext.from({ msgSender: AztecAddress.random(), @@ -298,7 +298,7 @@ describe('ACIR public execution simulator', () => { isStaticCall: false, }); - publicContracts.getBytecode.mockResolvedValue(Buffer.from(shieldAbi.bytecode, 'base64')); + publicContracts.getBytecode.mockResolvedValue(Buffer.from(shieldArtifact.bytecode, 'base64')); // mock initial balance to be greater than the amount being sent publicState.storageRead.mockResolvedValue(amount); @@ -318,8 +318,10 @@ describe('ACIR public execution simulator', () => { }); it('Should be able to create a L2 to L1 message from the public context', async () => { - const createL2ToL1MessagePublicAbi = TestContractAbi.functions.find(f => f.name === 'createL2ToL1MessagePublic')!; - const args = encodeArguments(createL2ToL1MessagePublicAbi, params); + const createL2ToL1MessagePublicArtifact = TestContractArtifact.functions.find( + f => f.name === 'createL2ToL1MessagePublic', + )!; + const args = encodeArguments(createL2ToL1MessagePublicArtifact, params); const callContext = CallContext.from({ msgSender: AztecAddress.random(), @@ -331,7 +333,7 @@ describe('ACIR public execution simulator', () => { isStaticCall: false, }); - publicContracts.getBytecode.mockResolvedValue(Buffer.from(createL2ToL1MessagePublicAbi.bytecode, 'base64')); + publicContracts.getBytecode.mockResolvedValue(Buffer.from(createL2ToL1MessagePublicArtifact.bytecode, 'base64')); const execution: PublicExecution = { contractAddress, functionData, args, callContext }; const result = await executor.simulate(execution, GlobalVariables.empty()); @@ -347,7 +349,7 @@ describe('ACIR public execution simulator', () => { }); it('Should be able to consume an Ll to L2 message in the public context', async () => { - const mintPublicAbi = NonNativeTokenContractAbi.functions.find(f => f.name === 'mintPublic')!; + const mintPublicArtifact = NonNativeTokenContractArtifact.functions.find(f => f.name === 'mintPublic')!; // Set up cross chain message const canceller = EthAddress.random(); @@ -366,7 +368,7 @@ describe('ACIR public execution simulator', () => { // Stub message key const messageKey = Fr.random(); - const args = encodeArguments(mintPublicAbi, [ + const args = encodeArguments(mintPublicArtifact, [ bridgedAmount, recipient.toField(), messageKey, @@ -384,7 +386,7 @@ describe('ACIR public execution simulator', () => { isStaticCall: false, }); - publicContracts.getBytecode.mockResolvedValue(Buffer.from(mintPublicAbi.bytecode, 'base64')); + publicContracts.getBytecode.mockResolvedValue(Buffer.from(mintPublicArtifact.bytecode, 'base64')); publicState.storageRead.mockResolvedValue(Fr.ZERO); // Mock response @@ -403,9 +405,11 @@ describe('ACIR public execution simulator', () => { }); it('Should be able to create a nullifier from the public context', async () => { - const createNullifierPublicAbi = TestContractAbi.functions.find(f => f.name === 'createNullifierPublic')!; + const createNullifierPublicArtifact = TestContractArtifact.functions.find( + f => f.name === 'createNullifierPublic', + )!; - const args = encodeArguments(createNullifierPublicAbi, params); + const args = encodeArguments(createNullifierPublicArtifact, params); const callContext = CallContext.from({ msgSender: AztecAddress.random(), @@ -417,7 +421,7 @@ describe('ACIR public execution simulator', () => { isStaticCall: false, }); - publicContracts.getBytecode.mockResolvedValue(Buffer.from(createNullifierPublicAbi.bytecode, 'base64')); + publicContracts.getBytecode.mockResolvedValue(Buffer.from(createNullifierPublicArtifact.bytecode, 'base64')); const execution: PublicExecution = { contractAddress, functionData, args, callContext }; const result = await executor.simulate(execution, GlobalVariables.empty()); diff --git a/yarn-project/acir-simulator/src/test/utils.ts b/yarn-project/acir-simulator/src/test/utils.ts index 7b0baafe2ae..6ba0b9d013e 100644 --- a/yarn-project/acir-simulator/src/test/utils.ts +++ b/yarn-project/acir-simulator/src/test/utils.ts @@ -1,10 +1,10 @@ import { AztecAddress, CircuitsWasm, EthAddress, Fr } from '@aztec/circuits.js'; import { computeSecretMessageHash } from '@aztec/circuits.js/abis'; -import { ContractAbi, getFunctionDebugMetadata } from '@aztec/foundation/abi'; +import { ContractArtifact, getFunctionDebugMetadata } from '@aztec/foundation/abi'; import { sha256ToField } from '@aztec/foundation/crypto'; import { L1Actor, L1ToL2Message, L2Actor } from '@aztec/types'; -import { FunctionAbiWithDebugMetadata } from '../index.js'; +import { FunctionArtifactWithDebugMetadata } from '../index.js'; /** * Test utility function to craft an L1 to L2 message. @@ -42,14 +42,17 @@ export const buildL1ToL2Message = async ( ); }; -export const getFunctionAbi = (abi: ContractAbi, functionName: string): FunctionAbiWithDebugMetadata => { - const functionIndex = abi.functions.findIndex(f => f.name === functionName); +export const getFunctionArtifact = ( + artifact: ContractArtifact, + functionName: string, +): FunctionArtifactWithDebugMetadata => { + const functionIndex = artifact.functions.findIndex(f => f.name === functionName); if (functionIndex < 0) { throw new Error(`Unknown function ${functionName}`); } - const functionAbi = abi.functions[functionIndex]; + const functionArtifact = artifact.functions[functionIndex]; - const debug = getFunctionDebugMetadata(abi, functionName); + const debug = getFunctionDebugMetadata(artifact, functionName); - return { ...functionAbi, debug }; + return { ...functionArtifact, debug }; }; diff --git a/yarn-project/aztec-nr/.gitrepo b/yarn-project/aztec-nr/.gitrepo index 7dfb7e1084b..84ed9ddf9ae 100644 --- a/yarn-project/aztec-nr/.gitrepo +++ b/yarn-project/aztec-nr/.gitrepo @@ -6,7 +6,7 @@ [subrepo] remote = https://github.com/AztecProtocol/aztec-nr branch = master - commit = 38dfa9f2d58631628558ab1495304dd26369ad47 + commit = a4f9ece9b7f0f4073c367339743d1e7c4109f8e4 method = merge cmdver = 0.4.6 - parent = 171a5508b262ed60397da90bf4092a6122b8bbed + parent = b8199802bad3c05ebe4d1ded5338a09a04e0ed7e diff --git a/yarn-project/aztec-nr/address-note/Nargo.toml b/yarn-project/aztec-nr/address-note/Nargo.toml new file mode 100644 index 00000000000..1d50983678b --- /dev/null +++ b/yarn-project/aztec-nr/address-note/Nargo.toml @@ -0,0 +1,8 @@ +[package] +name = "address_note" +authors = ["aztec-labs"] +compiler_version = "0.7.1" +type = "lib" + +[dependencies] +aztec = { path = "../aztec" } \ No newline at end of file diff --git a/yarn-project/noir-contracts/src/contracts/escrow_contract/src/address_note.nr b/yarn-project/aztec-nr/address-note/src/address_note.nr similarity index 100% rename from yarn-project/noir-contracts/src/contracts/escrow_contract/src/address_note.nr rename to yarn-project/aztec-nr/address-note/src/address_note.nr diff --git a/yarn-project/aztec-nr/address-note/src/lib.nr b/yarn-project/aztec-nr/address-note/src/lib.nr new file mode 100644 index 00000000000..f016584e48c --- /dev/null +++ b/yarn-project/aztec-nr/address-note/src/lib.nr @@ -0,0 +1 @@ +mod address_note; \ No newline at end of file diff --git a/yarn-project/aztec-nr/authwit/src/auth.nr b/yarn-project/aztec-nr/authwit/src/auth.nr index 751818587a3..2844308f52f 100644 --- a/yarn-project/aztec-nr/authwit/src/auth.nr +++ b/yarn-project/aztec-nr/authwit/src/auth.nr @@ -12,36 +12,57 @@ global IS_VALID_PUBLIC_SELECTOR = 0xf3661153; // @todo #2676 Should use different generator than the payload to limit probability of collisions. -// Assert that `whom` have authorized `message_hash` with a valid authentication witness -pub fn assert_valid_authwit(context: &mut PrivateContext, whom: AztecAddress, message_hash: Field) { - let result = context.call_private_function(whom.address, IS_VALID_SELECTOR, [message_hash])[0]; +// docs:start:assert_valid_authwit +// Assert that `on_behalf_of` have authorized `message_hash` with a valid authentication witness +pub fn assert_valid_authwit(context: &mut PrivateContext, on_behalf_of: AztecAddress, message_hash: Field) { + let result = context.call_private_function(on_behalf_of.address, IS_VALID_SELECTOR, [message_hash])[0]; context.push_new_nullifier(message_hash, EMPTY_NULLIFIED_COMMITMENT); assert(result == IS_VALID_SELECTOR, "Message not authorized by account"); } +// docs:end:assert_valid_authwit -// Assert that `whom` have authorized the current call with a valid authentication witness -pub fn assert_current_call_valid_authwit(context: &mut PrivateContext, whom: AztecAddress) { - let args = [context.msg_sender(), context.this_address(), context.selector(), context.args_hash]; - let message_hash = pedersen_with_separator(args, GENERATOR_INDEX__SIGNATURE_PAYLOAD)[0]; - assert_valid_authwit(context, whom, message_hash); +// docs:start:assert_current_call_valid_authwit +// Assert that `on_behalf_of` have authorized the current call with a valid authentication witness +pub fn assert_current_call_valid_authwit(context: &mut PrivateContext, on_behalf_of: AztecAddress) { + // message_hash = H(caller, contract_this, selector, args_hash) + let message_hash = pedersen_with_separator( + [context.msg_sender(), context.this_address(), context.selector(), context.args_hash], + GENERATOR_INDEX__SIGNATURE_PAYLOAD + )[0]; + assert_valid_authwit(context, on_behalf_of, message_hash); } +// docs:end:assert_current_call_valid_authwit -// Assert that `whom` have authorized `message_hash` in a public context -pub fn assert_valid_authwit_public(context: &mut PublicContext, whom: AztecAddress, message_hash: Field) { - let result = context.call_public_function(whom.address, IS_VALID_PUBLIC_SELECTOR, [message_hash])[0]; +// docs:start:assert_valid_authwit_public +// Assert that `on_behalf_of` have authorized `message_hash` in a public context +pub fn assert_valid_authwit_public(context: &mut PublicContext, on_behalf_of: AztecAddress, message_hash: Field) { + let result = context.call_public_function(on_behalf_of.address, IS_VALID_PUBLIC_SELECTOR, [message_hash])[0]; context.push_new_nullifier(message_hash, EMPTY_NULLIFIED_COMMITMENT); assert(result == IS_VALID_SELECTOR, "Message not authorized by account"); } +// docs:end:assert_valid_authwit_public -// Assert that `whom` have authorized the current call in a public context -pub fn assert_current_call_valid_authwit_public(context: &mut PublicContext, whom: AztecAddress) { - let args = [context.msg_sender(), context.this_address(), context.selector(), context.args_hash]; - let message_hash = pedersen_with_separator(args, GENERATOR_INDEX__SIGNATURE_PAYLOAD)[0]; - assert_valid_authwit_public(context, whom, message_hash); +// docs:start:assert_current_call_valid_authwit_public +// Assert that `on_behalf_of` have authorized the current call in a public context +pub fn assert_current_call_valid_authwit_public(context: &mut PublicContext, on_behalf_of: AztecAddress) { + // message_hash = H(caller, contract_this, selector, args_hash) + let message_hash = pedersen_with_separator( + [context.msg_sender(), context.this_address(), context.selector(), context.args_hash], + GENERATOR_INDEX__SIGNATURE_PAYLOAD + )[0]; + assert_valid_authwit_public(context, on_behalf_of, message_hash); } +// docs:end:assert_current_call_valid_authwit_public +// docs:start:compute_authwit_message_hash // Compute the message hash to be used by an authentication witness -pub fn compute_authwit_message_hash(caller: AztecAddress, target: AztecAddress, selector: Field, args: [Field; N]) -> Field { +pub fn compute_authwit_message_hash( + caller: AztecAddress, + target: AztecAddress, + selector: Field, + args: [Field; N] +) -> Field { let args_hash = hash_args(args); pedersen_with_separator([caller.address, target.address, selector, args_hash], GENERATOR_INDEX__SIGNATURE_PAYLOAD)[0] -} \ No newline at end of file +} +// docs:end:compute_authwit_message_hash \ No newline at end of file diff --git a/yarn-project/aztec-nr/aztec/src/abi.nr b/yarn-project/aztec-nr/aztec/src/abi.nr index 916c6eafac4..fc8a0c50cb1 100644 --- a/yarn-project/aztec-nr/aztec/src/abi.nr +++ b/yarn-project/aztec-nr/aztec/src/abi.nr @@ -313,9 +313,6 @@ struct PublicCircuitPublicInputs { unencrypted_log_preimages_length: Field, block_data: HistoricBlockData, prover_address: Field, - - // TODO: include globals in here and check them elsewhere - // https://github.com/AztecProtocol/aztec-packages/issues/1567 } impl PublicCircuitPublicInputs { @@ -336,11 +333,9 @@ impl PublicCircuitPublicInputs { inputs.push_array(self.new_nullifiers); inputs.push_array(self.new_l2_to_l1_msgs); - // We do not include block_data since it's not in the cpp hash - inputs.push_array(self.unencrypted_logs_hash); inputs.push(self.unencrypted_log_preimages_length); - inputs.push_array(self.block_data.serialize()); // see https://github.com/AztecProtocol/aztec-packages/issues/1473 + inputs.push_array(self.block_data.serialize()); inputs.push(self.prover_address); dep::std::hash::pedersen_with_separator(inputs.storage, GENERATOR_INDEX__PUBLIC_CIRCUIT_PUBLIC_INPUTS)[0] diff --git a/yarn-project/aztec-sandbox/src/examples/private_token_contract.ts b/yarn-project/aztec-sandbox/src/examples/private_token_contract.ts index 1617c170a08..8b71aa22f99 100644 --- a/yarn-project/aztec-sandbox/src/examples/private_token_contract.ts +++ b/yarn-project/aztec-sandbox/src/examples/private_token_contract.ts @@ -25,7 +25,7 @@ const SECONDARY_AMOUNT = 33n; /** * Deploys the Private Token contract. * @param owner - The address that the initial balance will belong to. - * @returns An Aztec Contract object with the private token's ABI. + * @returns An Aztec Contract object with the private token's artifact. */ async function deployZKContract(owner: AztecAddress) { logger('Deploying L2 contract...'); diff --git a/yarn-project/aztec.js/README.md b/yarn-project/aztec.js/README.md index d031fb53bb0..e1041cab73f 100644 --- a/yarn-project/aztec.js/README.md +++ b/yarn-project/aztec.js/README.md @@ -21,7 +21,7 @@ console.log(`New account deployed at ${wallet.getAddress()}`); ```typescript import { Contract } from '@aztec/aztec.js'; -const contract = await Contract.deploy(wallet, MyContractAbi, [...constructorArgs]).send().deployed(); +const contract = await Contract.deploy(wallet, MyContractArtifact, [...constructorArgs]).send().deployed(); console.log(`Contract deployed at ${contract.address}`); ``` @@ -30,7 +30,7 @@ console.log(`Contract deployed at ${contract.address}`); ```typescript import { Contract } from '@aztec/aztec.js'; -const contract = await Contract.at(contractAddress, MyContractAbi, wallet); +const contract = await Contract.at(contractAddress, MyContractArtifact, wallet); const tx = await contract.methods.transfer(amount, recipientAddress).send().wait(); console.log(`Transferred ${amount} to ${recipientAddress} on block ${tx.blockNumber}`); ``` @@ -40,7 +40,7 @@ console.log(`Transferred ${amount} to ${recipientAddress} on block ${tx.blockNum ```typescript import { Contract } from '@aztec/aztec.js'; -const contract = await Contract.at(contractAddress, MyContractAbi, wallet); +const contract = await Contract.at(contractAddress, MyContractArtifact, wallet); const balance = await contract.methods.getBalance(wallet.getAddress()).view(); console.log(`Account balance is ${balance}`); ``` diff --git a/yarn-project/aztec.js/src/account/contract/base_account_contract.ts b/yarn-project/aztec.js/src/account/contract/base_account_contract.ts index 42c76f1380a..d749a767fe8 100644 --- a/yarn-project/aztec.js/src/account/contract/base_account_contract.ts +++ b/yarn-project/aztec.js/src/account/contract/base_account_contract.ts @@ -1,4 +1,4 @@ -import { ContractAbi } from '@aztec/foundation/abi'; +import { ContractArtifact } from '@aztec/foundation/abi'; import { CompleteAddress, NodeInfo } from '@aztec/types'; import { DefaultAccountInterface } from '../defaults/default_interface.js'; @@ -13,10 +13,10 @@ export abstract class BaseAccountContract implements AccountContract { abstract getAuthWitnessProvider(address: CompleteAddress): AuthWitnessProvider; abstract getDeploymentArgs(): Promise; - constructor(private abi: ContractAbi) {} + constructor(private artifact: ContractArtifact) {} - getContractAbi(): ContractAbi { - return this.abi; + getContractArtifact(): ContractArtifact { + return this.artifact; } getInterface(address: CompleteAddress, nodeInfo: NodeInfo): Promise { diff --git a/yarn-project/aztec.js/src/account/contract/ecdsa_account_contract.ts b/yarn-project/aztec.js/src/account/contract/ecdsa_account_contract.ts index 3d057db434f..d238b8c417f 100644 --- a/yarn-project/aztec.js/src/account/contract/ecdsa_account_contract.ts +++ b/yarn-project/aztec.js/src/account/contract/ecdsa_account_contract.ts @@ -1,9 +1,9 @@ import { Ecdsa } from '@aztec/circuits.js/barretenberg'; -import { ContractAbi } from '@aztec/foundation/abi'; +import { ContractArtifact } from '@aztec/foundation/abi'; import { Fr } from '@aztec/foundation/fields'; import { AuthWitness, CompleteAddress } from '@aztec/types'; -import EcdsaAccountContractAbi from '../../abis/ecdsa_account_contract.json' assert { type: 'json' }; +import EcdsaAccountContractArtifact from '../../artifacts/ecdsa_account_contract.json' assert { type: 'json' }; import { AuthWitnessProvider } from '../interface.js'; import { BaseAccountContract } from './base_account_contract.js'; @@ -13,7 +13,7 @@ import { BaseAccountContract } from './base_account_contract.js'; */ export class EcdsaAccountContract extends BaseAccountContract { constructor(private signingPrivateKey: Buffer) { - super(EcdsaAccountContractAbi as ContractAbi); + super(EcdsaAccountContractArtifact as ContractArtifact); } async getDeploymentArgs() { diff --git a/yarn-project/aztec.js/src/account/contract/index.ts b/yarn-project/aztec.js/src/account/contract/index.ts index 730acfda80b..611721b64c8 100644 --- a/yarn-project/aztec.js/src/account/contract/index.ts +++ b/yarn-project/aztec.js/src/account/contract/index.ts @@ -1,4 +1,4 @@ -import { ContractAbi } from '@aztec/foundation/abi'; +import { ContractArtifact } from '@aztec/foundation/abi'; import { CompleteAddress, NodeInfo } from '@aztec/types'; import { AccountInterface } from '../interface.js'; @@ -10,14 +10,14 @@ export * from './base_account_contract.js'; // docs:start:account-contract-interface /** - * An account contract instance. Knows its ABI, deployment arguments, how to create + * An account contract instance. Knows its artifact, deployment arguments, how to create * transaction execution requests out of function calls, and how to authorize actions. */ export interface AccountContract { /** - * Returns the ABI of this account contract. + * Returns the artifact of this account contract. */ - getContractAbi(): ContractAbi; + getContractArtifact(): ContractArtifact; /** * Returns the deployment arguments for this instance. diff --git a/yarn-project/aztec.js/src/account/contract/schnorr_account_contract.ts b/yarn-project/aztec.js/src/account/contract/schnorr_account_contract.ts index d185a5a70c4..2f963a468de 100644 --- a/yarn-project/aztec.js/src/account/contract/schnorr_account_contract.ts +++ b/yarn-project/aztec.js/src/account/contract/schnorr_account_contract.ts @@ -1,9 +1,9 @@ import { Schnorr } from '@aztec/circuits.js/barretenberg'; -import { ContractAbi } from '@aztec/foundation/abi'; +import { ContractArtifact } from '@aztec/foundation/abi'; import { Fr } from '@aztec/foundation/fields'; import { AuthWitness, CompleteAddress, GrumpkinPrivateKey } from '@aztec/types'; -import SchnorrAccountContractAbi from '../../abis/schnorr_account_contract.json' assert { type: 'json' }; +import SchnorrAccountContractArtifact from '../../artifacts/schnorr_account_contract.json' assert { type: 'json' }; import { AuthWitnessProvider } from '../interface.js'; import { BaseAccountContract } from './base_account_contract.js'; @@ -13,7 +13,7 @@ import { BaseAccountContract } from './base_account_contract.js'; */ export class SchnorrAccountContract extends BaseAccountContract { constructor(private signingPrivateKey: GrumpkinPrivateKey) { - super(SchnorrAccountContractAbi as ContractAbi); + super(SchnorrAccountContractArtifact as ContractArtifact); } async getDeploymentArgs() { diff --git a/yarn-project/aztec.js/src/account/contract/single_key_account_contract.ts b/yarn-project/aztec.js/src/account/contract/single_key_account_contract.ts index 5b8f8c9c199..63cfc44524e 100644 --- a/yarn-project/aztec.js/src/account/contract/single_key_account_contract.ts +++ b/yarn-project/aztec.js/src/account/contract/single_key_account_contract.ts @@ -1,10 +1,10 @@ import { PartialAddress } from '@aztec/circuits.js'; import { Schnorr } from '@aztec/circuits.js/barretenberg'; -import { ContractAbi } from '@aztec/foundation/abi'; +import { ContractArtifact } from '@aztec/foundation/abi'; import { Fr } from '@aztec/foundation/fields'; import { AuthWitness, CompleteAddress, GrumpkinPrivateKey } from '@aztec/types'; -import SchnorrSingleKeyAccountContractAbi from '../../abis/schnorr_single_key_account_contract.json' assert { type: 'json' }; +import SchnorrSingleKeyAccountContractArtifact from '../../artifacts/schnorr_single_key_account_contract.json' assert { type: 'json' }; import { generatePublicKey } from '../../index.js'; import { AuthWitnessProvider } from '../interface.js'; import { BaseAccountContract } from './base_account_contract.js'; @@ -15,7 +15,7 @@ import { BaseAccountContract } from './base_account_contract.js'; */ export class SingleKeyAccountContract extends BaseAccountContract { constructor(private encryptionPrivateKey: GrumpkinPrivateKey) { - super(SchnorrSingleKeyAccountContractAbi as ContractAbi); + super(SchnorrSingleKeyAccountContractArtifact as ContractArtifact); } getDeploymentArgs(): Promise { diff --git a/yarn-project/aztec.js/src/account/defaults/default_entrypoint.ts b/yarn-project/aztec.js/src/account/defaults/default_entrypoint.ts index d036f1fbade..974d0fa6bde 100644 --- a/yarn-project/aztec.js/src/account/defaults/default_entrypoint.ts +++ b/yarn-project/aztec.js/src/account/defaults/default_entrypoint.ts @@ -1,5 +1,5 @@ import { AztecAddress, Fr, FunctionData, TxContext } from '@aztec/circuits.js'; -import { FunctionAbiHeader, encodeArguments } from '@aztec/foundation/abi'; +import { FunctionAbi, encodeArguments } from '@aztec/foundation/abi'; import { FunctionCall, PackedArguments, TxExecutionRequest } from '@aztec/types'; import { DEFAULT_CHAIN_ID, DEFAULT_VERSION } from '../../utils/defaults.js'; @@ -97,6 +97,6 @@ export class DefaultAccountEntrypoint implements EntrypointInterface { }, ], returnTypes: [], - } as FunctionAbiHeader; + } as FunctionAbi; } } diff --git a/yarn-project/aztec.js/src/account/manager/index.ts b/yarn-project/aztec.js/src/account/manager/index.ts index 3e8248d935b..69f73ebea8d 100644 --- a/yarn-project/aztec.js/src/account/manager/index.ts +++ b/yarn-project/aztec.js/src/account/manager/index.ts @@ -64,7 +64,7 @@ export class AccountManager { if (!this.completeAddress) { const encryptionPublicKey = await generatePublicKey(this.encryptionPrivateKey); const contractDeploymentInfo = await getContractDeploymentInfo( - this.accountContract.getContractAbi(), + this.accountContract.getContractArtifact(), await this.accountContract.getDeploymentArgs(), this.salt!, encryptionPublicKey, @@ -107,7 +107,7 @@ export class AccountManager { if (!this.salt) throw new Error(`Cannot deploy account contract without known salt.`); await this.register(); const encryptionPublicKey = await this.getEncryptionPublicKey(); - const deployer = new ContractDeployer(this.accountContract.getContractAbi(), this.pxe, encryptionPublicKey); + const deployer = new ContractDeployer(this.accountContract.getContractArtifact(), this.pxe, encryptionPublicKey); const args = await this.accountContract.getDeploymentArgs(); this.deployMethod = deployer.deploy(...args); } diff --git a/yarn-project/aztec.js/src/abis/ecdsa_account_contract.json b/yarn-project/aztec.js/src/artifacts/ecdsa_account_contract.json similarity index 99% rename from yarn-project/aztec.js/src/abis/ecdsa_account_contract.json rename to yarn-project/aztec.js/src/artifacts/ecdsa_account_contract.json index f8617f768f2..4dbcf778894 100644 --- a/yarn-project/aztec.js/src/abis/ecdsa_account_contract.json +++ b/yarn-project/aztec.js/src/artifacts/ecdsa_account_contract.json @@ -694,5 +694,6 @@ "bytecode": "H4sIAAAAAAAA/+2dB5hdx3me9+yiDQaNJIheFiABsHN3APaCJXnBXkCQBECwAgRIAgQBEoW99wJ2NarTsuI4juIoVoktK7as2LJiy4oty7Ks2JLiOI7jOO690DPnzid++3N8lmvO/+zdZ2ee58edmbNnvvf/v7nl3Hux+0NdXV1VV7v1+JjZ9faG4wPxtu/dtf4q31p9mpzdo4SzZ5RwjhslnONHCeeEUcI5cZRwTholnGaUcE4eJZx2lHBOGSWcU0cJ57RRwjl9lHDOGCWcB40SzoNHCecho4RzZkbOucR5aLydFW9nx9s58RY/Oy/ezo+3C2KO4+J4oY9FPhb76I3HUJAlPpb6OMzH4T6W+VjuY4WPI3wc6eMoH0f7OMbHsT6O83F8XKPfh/Ox0scqHyf4ONHHST5O9nGKj1N9nObjdB9n+DjTx+pYs7N8nO3jHB8tH2t8nOvjPB/n+7jAx4U+LvJxsY9LfFzq4zIfa2MuvTGXy32s83GFjyt9XOVjvY8NPjb6uNrHJh/X+LjWx3U+rvdxg48bfWz2scXHTT62+tjm42Yft/i41cd2Hzt83OZjp4/bfezysdvHHaLmd/rY42Ovj33x2Ix4bL+Pu3zc7eMeH/f6uM/H/T4e8PGgj4d8POzjER+P+njMx+M+nhBrPenjKR9P+3jGx7M+nvPxvI8DPl7w8aKPl3y87OMVH6/6eM3He+Ja3XGt9/p4n5h7v48PxP7r8faD8fZD8fbD8fYj8faj8fZj8fbj8faNrrfa56a1b8NrTuzzQ7remsP7BQfTHI4fRHM4PoPmcHw6zeH4NJrD8ak0h+NTaA7HLc3h+GSa4+O4xfFJNIfjE2kOxyfQHI6PpzkcH0dzON5DczjeTXM4XtEcjncJ/dAG4m3fu2wTurI/5vaFnPspj65Evvz+k8x3XKIu4xP1Yz9wnH3DcfaXj+MWx3m/4DjvGxzn/YfjvE9xnPczjvO+x3G+f+A4349wnO9vOM73SxyfSXM4fijN4fgsmsPx2TSH43NoDsfn0hyO47ku5DWBxgPxtu/dNWdIF60S4wHqQz+wLFBgmT8MlgVUr4WxfyjxLVLgWxzXmks6vXl16vd1F4ucMYaWJYZFiiw2oa2gM6i2aE3e9xLLkrws9UvFpaQFriVUexyfSRxLM9e+Ik2sizHzvVPWhSPMamhu/gizWGJYTHP4uV49PmcEX2hNe30psRyelaW/L7AcNgyWw4llWVaW9uum5ZnXDGusIH7kCnZLx5dTbivyctR7clnX4JpizHyFtbAW1sJaWAtrYS2shXVssxqaWzLCLHx9dJgaS3+fTWhrXG/w+3FYO7wP+gZpLs6cG39+g/dRwACtHvqZT0x9i+uTcW4yHV+ixupuCmsuzLpm+9oX71ehNV1v8nsWed9v7O8L7ylPpFouED6As5sYMr//Wl+f8vuv0IRO8BrvU/D7w3jPGMfC/vi0Hue/+n1ik+DP72X78Wu+YOHPh0Pj90y7FVlsQrtT3y/n9+Eyv19ee7JQsGAMLa5VjyKLTWhrfUbAOYfW5An0+f1vfszrzVyHd/rZAhjGKbJ0+mcL/D7tYuIbqferwTBekcUmtDvpPXB+v5pf++R9X7ztyWGCBWNoWcGgxWIT2go67l/7WQBfo/B1Qeb38huvUaDFtZqoyGIT2go6zoicQ2vyBPrhPFw7LSO+IzLXoeoafC09QGNoca0mKbLYhLaCjjMi59CaPIF+OO/I2Ofr2qMy16EiHayLMbS4VkaRxSa0FXScETmH1uTJUZT70bF/JPEdk7kOFelgXYyhxbWarMhiE9oKOs6InENr8gT64bxjY/9o4jsucx0q0sG6GEOLa2UVWWxCW0HHGZFzaE2eQD+cd3zsH0t8fZnrUJEO1sW4j3wAwxRFFpvQVtBxRuQcWpMn0A/n4TuhxxOfy1yHinSwLsbQ4lpNVWSxCW0FHWdEzqE1eQL9cN7K2O8nvlWZ61CRDtbFGFpcq2mKLDahraDjjMg5tCZPoB/OOyH2VxLfiZnrUJEO1sUYWlyr6YosNqGtoOOMyDm0Jk+gH847KfZPIL6TM9ehIh2sizG0uFYzFFlsQltBxxmRc2hNnkA/nHdK7J9EfKdmrkNFOlgXY2hxrQ5SZLEJbQUdZ0TOoTV5Av1w3mmxfwrxnZ65DhXpYF2MocW1OliRxSa0FXScETmH1uQJ9MN5Z8T+acR3ZuY6VKSDdTGGFtfqEEUWm9BW0HFG5BxakyfQD+etjv0ziG8gcx0q0sG6q4UG12q2IotNaCvoOK4tWpMnzHJ2XpZVgeWsYbCcTSzn5GWpv6/eyrxmWGMN8SNXsFs63qLc1uTlqPf5OV2Da4ox8xXWsc1qaG71CLPw4+FZeiyrbEJbQccZkXNoTY917Mm5sX8O8Z2Xl6/25FzBgjG0uFYtRRab0FbQcUbkHFqTJ8xyQVaWlfX3F88fBssFxHJhVpb289ZFpAUu6Fg6zvvgorwc9Z68UOSPMfMV1sJaWAtrYS2shbWwFtbCWlgLa2EtrIW1sBbWwlpYC2thLayFtbAW1sJaWAtrYS2shTU/q6G5c0eYxRLD+WosK+vfcyW1FXJ2RuQcWtP3RNiTi2P/QuK7JC9f7cnFggVjaHGtWoosNqGtoOOMyDm0Jk+Y5bKsLK7+HtGlw2C5jFjWZmVpf4/octICF3QsHed9cHlejnpPrhX5Y8x8hbWwFtbCWlgLa2EtrIW1sBbWwlpYC2thLayFtbAW1sI6WlgNzV08wiz8Xvylaiyu/hxGaivk7IzIObSm99nZk3Wxv5b4rsjLV3uyTrBgDC2uVUuRxSa0FXScETmH1uQJs1yVl6X+3SZXDoPlKmJZn5elL6yxgbTABR1Lx3kfbMjLUe/J9SJ/jJmvsI5tVkNz60aYhR+7rtRjqX8PidRW0HFG5Bxa0+MSe7Ix9tcT39V5+WpPNgoWjKHFtWopstiEtoKOMyLn0Jo8YZZrsrK0/4b0pmGwXEMs12ZlaT9vXUda4IKOpeO8D67Ly1HvyWtF/hgzX2EtrIW1sBbWwlpYC2thHdushuY2jjALX8tsUmNp/71nqa2QszMi59CarlPYk+tj/1riuyEvX+3J9YIFY2hxrVqKLDahraDjjMg5tCZPmGVzVpb2deyNw2DZTCxbsrK0r2NvIq0b4y10LB3nfXBTXo56T24R+WPMfIW1sBbWwlpYC2thLayFdWyzGpq7foRZ+FrmRjWW9nWs1FbI2RmRc2hN1ynsydbY30J82/Ly1Z5sFSwYQ4tr1VJksQltBR1nRM6hNXnCLLcosNw8DJZbiOXWvCz1dex20gIXdCwd532wPS9HvSdvFfljzHyjhdXQ3NYRZuH72M16LM4mtDV0jMg5tKb7D3uyI/ZvJb7b8vLVnuwQLBhDi2vVUmSxCW0FHWdEzqE1ecIstyuw7BwGy+3EsisvS/34upu0wAUdS8d5H+zOy1HvyV0if4yZb7SwGprbMcIsfB/bqcdSP75KbQ0dI3IOren+w57cEfu7iO/OvHy1J3cIFoyhxbVqKbLYhLaCjjMi59CaPIF+OG9P7N9BfHsz16EiHayLMbS4Vi1FFpvQVtBxRuQcWpMn0A/n7Yv9PcS3P3MdKtLBuhhDi2tlFFlsQltBxxmRc2hNnuyn3O+K/X3Ed3fmOlSkg3UxhhbXarIii01oK+g4I3IOrckT6Ifz7on9u4jv3sx1qEgH62IMLa6VVWSxCW0FHWdEzqE1eQL9cN59sX8P8d2fuQ4V6WBdjO8nH8AwRZHFJrQVdJwROYfW5An0w3kPxP59xPdg5jpUpIN1MYYW12qqIotNaCvoOCNyDq3JE+iH8x6K/QeI7+HMdahIB+tiDC2u1TRFFpvQVtBxRuQcWpMn0A/nPRL7DxHfo5nrUJEO1sUYWlyr6YosNqGtoOO4tmhNnkA/nPdY7D9CfI9nrkNFOlgXY2hxrVqKLDahraDjjMg5tCZPoB/OeyL2HyO+JzPXoSIdrIsxtLhWLUUWm9BW0HFG5BxakyfQD+c9FftPEN/TmetQkQ7WxRhaXKuWIotNaCvoOCNyDq3JE+iH856J/aeI79nMdahIB+tiDC2uVUuRxSa0FXScETmH1uQJ9MN5z8X+M8T3fOY6VKSDdTGGFteqpchiE9oKOs6InENr8oRZDuRlqb8L/kJc6znSeTFzbSvSwboYQ4vrf0CRxSa0FXScETmH1uQz9MN5L8X+C8T3cuY6VKSDdTGGFtfqgCKLTWgr6Dgjcg6tyRPoh/Neif2XiO/VzHWoSAfrYgwtrtUBRRab0FbQcUbkHFqTJ9AP570W+68Q33sy16EiHayLMbS4Vi1FFpvQVtBxRuQcWpMn0A/nvTf2XyO+92WuQ0U6WBdjaHGtWoosNqGtoOOMyDm0Jk+gH857f+y/l/g+kLkOFelgXYyhxbVqKbLYhLaCjjMi59CaPIF+OO/12H8/8X0wcx0q0sG6GEOLa9VSZLEJbQUdZ0TOoTV5Av1w3odi/3Xi+3DmOlSkg3UxhhbXqqXIYhPaCjrOiJxDa/IE+uG8j8T+h4jvo5nrUJEO1sUYWlyrliKLTWgr6Dgjcg6tyRPoh/M+FvsfIb6PZ65DRTpYF2Noca1aiiw2oa2g47i2aE2efDzehvPeiP2PEd8PZa5DRTpYF2Noca1aiiw2oa2g44zIObQmT6AfzvtE7L9BfD+cuQ4V6WBdjKHFtWopstiEtoKOMyLn0Jo8gX4475Ox/wni+zeZ61CRDtbFGFpcq5Yii01oK+g4I3IOrckT6IfzfiT2P0l8/zZzHSrSwboYQ4tr1VJksQltBR1nRM6hNXkC/XDej8b+jxDfv8tch4p0sC7G0OJatRRZbEJbQccZkXNoTZ5AP5z3Y7H/o8T37zPXoSIdrIsxtLhWLUUWm9BW0HFG5BxakyfQD+d9KvZ/jPj+Q+Y6VKSDdTGGFteqpchiE9oKOs6InENr8gT64bwfj/1PEd9/zFyHinSwLsbQ4lq1FFlsQltBxxmRc2hNnkA/nPfp2P9x4vtPmetQkQ7WxRhaXKuWIotNaCvoOCNyDq3JE2b5TF6W+m87/MQwWD5DLJ/Ny1L/n8HPkRa4oGPpOO+Dz+XlqPfkZ0X+GDNfYR3brIbmPj3CLPzY9RN6LPXfdpDaCjrOiJxDa3pcYk8+H/ufJb7/nJev9uTzggVjaHGtWoosNqGtoOOMyDm0Jk+Y5aeysrj6e1A/OQyWnyKWL2RlaT9v/TRpgQs6lo7zPvjpvBz1nvyCyB9j5iushbWwFtbCWlgLa2EtrIW1sBbWwlpYC2thLayFtbAW1tHCamju8yPMwu/F/6Qai6t/p7vUVsjZGZFzaE3vs7MnX4z9LxDff8nLV3vyRcGCMbS4Vi1FFpvQVtBxRuQcWpMnzPKzWVnaf5vsZ4bB8rPE8qWsLO3PYX6OtMAFHUvHeR/8XF6Oek9+SeSPMfMV1sJaWAtrYS2shbWwFtaxzWpo7osjzMLXMj+jxtL+22RSWyFnZ0TOoVViPEB99uTLsf8l4vuveflqT74sWDCGFteqpchiE9oKOs6InENr8oRZfiErS/s69ueHwfILxPKVrCzt69hfJC1wQcfScd4Hv5iXo96TXxH5Y8x8hbWwFtbCWlgLa2EtrIV1bLMamvvyCLPwtczPq7G0r2OltkLOzoicQ2u6TmFPvhr7XyG+/5aXr/bkq4IFY2hxrVqKLDahraDjjMg5tCZPmOWXs7K0r2N/aRgsv0wsX8vK0r6O/RXSAhd0LB3nffAreTnqPfk1kT/GzFdYC2thLayFtbAW1sJaWMc2q6G5r44wC1/L/JIaS/s6Vmor5OyMyDm0pusU9uTrsf814vvveflqT74uWDCGFteqpchiE9oKOs6InENr8oRZfi0vS/17yX51GCy/RizfyMtSX8f+OmmBCzqWjvM++PW8HPWe/IbIH2PmK6xjm9XQ3NdHmIUfu35Vj6X+vWRSW0HHGZFzaE2PS+zJN2P/G8T3G3n5ak++KVgwhhbXqqXIYhPaCjrOiJxDa/KEWX4zL0v9vPWtYbD8JrF8Oy9L/bz1W6QFLuhYOs774LfyctR78tsif4yZr7CObVZDc98cYRZ+7PqWHkv9vCW1FXScETmH1vS4xJ58J/a/TXz/Iy9f7cl3BAvG0OJatRRZbEJbQccZkXNoTZ4wy+8osPz2MFh+h1i+m5elft76HmmBCzqWjvM++F5ejnpPflfkj/H3aH60sBqa+84Is/B97Lf1WJxNaGvocG3Rmu4/34u34bzvx/53ie9/5uWrPfm+YMEYWlyrliKLTWgr6Dgjcg6tyRPoh/N+N/a/T3z/K3MdKtLBuhhDi2vVUmSxCW0FHWdEzqE1eQL9cN7vxf7vEt//zlyHinSwLsbQ4lq1FFlsQltBxxmRc2hNnkA/nPf7sf97xPd/MtehIh2sizG0uFYtRRab0FbQcUbkHFqTJ9AP5/1B7P8+8f3fzHWoSAfrYgwtrlVLkcUmtBV0nBE5h9bkCfTDeX8Y+39AfP8vcx0q0sG6GEOLa9VSZLEJbQUdZ0TOoTV5Av1w3h/F/h8S3//PXIeKdLAuxtDiWrUUWWxCW0HHGZFzaE2eQD+c98ex/0fE9yeZ61CRDtbFGFpcq5Yii01oK+g4I3IOrckT6Ifz/jT2/5j4/ixzHSrSwboYQ4tr1VJksQltBR1nRM6hNXkC/XDen8f+nxLfX2SuQ0U6WBdjaHGtWoosNqGtoOOMyDm0Jk+gH877y9j/c+L7q8x1qEgH62IMLa5VS5HFJrQVdJwROYfW5An0w3l/Hft/SXx/k7kOFelgXYyhxbVqKbLYhLaCjjMi59CaPIF+OO9vY/+vie/vMtehIh2sizG0uFYHFFlsQltBxxmRc2hNnkA/nPf3sf+3xPcPmetQkQ7WxRhaXKsDiiw2oa2g44zIObQmT6AfzvvH2P974vunzHWoSAfrYgwtrtUBRRab0FbQcUbkHFqTJ9AP570Z+/9IfDg5E1/tyZuC5U0ByrVqKbLYhLaCjjMiZ5IYVAd5MJxXxf6bVIduBU+qajALxt3kyZsJT7oVPJHaCjrOiJyp7IPqgNZNnvTEfkV84xQ86RGeYDyOPAEDezJOwROpraDjjMh5KE/GkSfjY7+H+CYoeDJeeILxBPKkJ+HJBAVPpLaCjjMi56E8mUCeTIz98cQ3ScGTicITjCeRJ+MTnkxS8ERqK+g4I3IeypNJ5ImJ/YnEN1nBEyM8wXgyeTIx4clkBU+ktoKOMyLnoTyZTJ5Y9IlvioInVniC8RTyxCQ8maLgidRW0HFG5DyUJ1PIk6moCfFNU/BkqvAE42nkiU14Mk3BE6mtoOOMyHkoT6aRJ9NjfyrxzVDwZLrwBOMZ5MnUhCczFDyR2go6zoich/JkBnlyUOxPJ76DFTw5SHiC8cHkyfSEJwcreCK1FXScETkP5cnB5MkhsX8Q8c1U8OQQ4QnGM8mTgxKezFTwRGor6Dgjch7Kk5nkyaGxfwjxzVLw5FDhCcazyJNDEp7MUvBEaivoOCNyHsqTWeTJ7Ng/lPjmKHgyW3iC8Rzy5NCEJ3MUPJHaCjrOiJyH8mQOeTI39mcT3zwFT+YKTzCeR57MTngyT8ETqa2g44zIeShP5pEn82N/LvEtUPBkvvAE4wXkydyEJwsUPJHaCjrOiJyH8mQBebIw9ucT3yIFTxYKTzBeRJ7MT3iySMETqa2g44zIeShPmKU3L0v9/20XD4Oll1iW5GWp/9/SUhIH1xLaB0sT+2Cpwp5cIvYkxsxXWMc2q6G5hdXIsvBj12I9lvr/20ptBR1nRM6hNT0usSeHiT0T+A5X8OQw4QnGh5MnSxJ7NTeLTWgr6Dgjch7KE2ZZnpXF1b+nd9kwWJYTy4q8damft44gcXCtoH1wRGIfHKGwJ1eIPYkx8xXWwlpYC2thLayFtbAW1sJaWAtrYS2shbWwFtbCWlgL62hhNTSH96dHioXfi1+mxuLqvzMhtRVydkbkHJoYDnqfnT05UuyZwHeUgidHCk8wPoo8WZHYq7lZbEJbQccZkfNQnjDLMVlZ2n8v8ehhsBxDLMfmrUv9OcxxJA6uY2kfHJfYB8cp7MljxZ7EmPkKa2EtrIW1sBbWwlpYC+vYZjU0h9f3I8XC1zJHq7G0/16i1FbI2RmRc2hiOOg6hT05XuyZwNen4MnxwhOM+8iTYxN7NTeLTWgr6Dgjch7KE2ZxmfdhYOkfBosjlpV561Jfx64icXCtpH2wKrEPVinsyZViT2LMfIW1sBbWwlpYC2thLayFdWyzGprD6/uRYuFrmX41lvZ1rNRWyNkZkXNoYjjoOoU9OUHsmcB3ooInJwhPMD6RPFmZ2Ku5WWxCW0HHGZHzUJ4wy8mZ92FgOWkYLCcTyyl561Jfx55K4uA6hfbBqYl9cKrCnjxF7EmMma+wFtbCWlgLa2EtrIW1sI5tVkNzeH0/Uix8LXOSGkv7OlZqK+TsjMg5NDEcdJ3Cnpwm9kzgO13Bk9OEJxifTp6cktiruVlsQltBxxmR81CeMMuZeVnq30t2xjBYziSW1XlZ6uvYARIH12raBwOJfTCgsCdXiz35g1x5vrCOaVZDc6dVI8vCj11n6LHUv5dMaivoOCNyDq3pcYk9OUvsmcB3toInZwlPMD6bPFmd2Ku5WWxCW0HHGZHzUJ4wS0vheeucYbC0iGVNXpb6eetcEgfXGtoH5yb2wbkKe3KN2JMYM19hHdushubOqkaWhR+7ztFjqZ+3pLaCjjMi59CaHpfYk/PEngl85yt4cp7wBOPzyZM1ib2am8UmtBV0nBE5D+UJs1yowHLBMFguJJaL8rLUz1sXkzi4LqJ9cHFiH1yssCcvEnsSY+YbLayG5s6rRpaF72MX6LE4m9DW0DEi59Ca7j/sySVizwS+SxU8uUR4gvGl5MlFib2am8UmtBV0nBE5D+XJpeTJZbF/CfGtVfDkMuEJxmvJk0sSnqxV8ERqK+g4I3IeypO15MnlsX8Z8a1T8ORy4QnG68iTyxKerFPwRGor6Dgjch7Kk3XkyRWxfznxXangyRXCE4yvJE8uT3hypYInUltBxxmR81CeXEmeXBX7VxDfegVPrhKeYLyePLki4cl6BU+ktoKOMyLnoTxZT55siP2riG+jgicbhCcYbyRPrkp4slHBE6mtoOOMyHkoTzaSJ1fH/gbi26TgydXCE4w3kScbEp5sUvBEaivoOCNyHsqTTeTJNbF/NfFdq+DJNcITjK8lT65OeHKtgidSW0HHGZHzUJ4wy3V5WfrCmtdnzi+scQMlhFyvIz9x/HrK7QaFvXWd2FsYM987ZZ3ZNbKsWv7fqLC/bxjG/r6R8tucmWWKX2Mi+bpZ+AvObuK5KbE38Lgz2ceW6u0/h/44Os6PVdsU9svWuGYVY5vgCro3K+hCZ3zUBQe0euhnvjW5fTu1q/2cgvlDqTa3ZmYMOrcMY//dSvXanvn+FdbYQeLg2k6PLzi+jTh2KPi2XTy+YLyDWNBwf1CoSX+TF9sTLNv0/Bk2y5YOYpne1TksUzqIxXQQy4QOYunpIJapHcQyuYNYJnYQy7gOYpnTQSyzO4hlVgexTOsgFttBLJM6iGV8B7FUI8xiut5+XWDo+Bb6ObxGvoXmuhPr4Tl2B10jfXra29fRzp11BmgMrcnEsEP5Ne07YRnfQSyTOojFdhDLtA5imdVBLLM7iGVOB7GM6yCWiR3EMrmDWKZ2EEtPB7FM6CAW00EsUzqIZXoHseC1YiewbOsglu6ER7flZVnFr9nRxHDQ6//biGVnXpb687TbM68Z1thFCSFXsFs6fjvltkvB853V4JpizHyFdWyz5tddWf8utZ3DuI8zy26F++MdJA6u3eTFHQkv7lDwYrfwAmPmK6yFtbAW1sJaWAtrYS2shbWwFtbCWlgLa2EtrIW1sBbWwlpYC2thLayFtbAW1sJaWAtrftb8uq7+fgTrhiaGP9CSLHfmrUH9/Yg9JA6uO8mLPQkv9ih4cafwAmPmK6yFtbAW1sJaWAtrYS2shbWwFtbCWlgLa2EtrIW1sBbW0cKqoFv/H1vWDU0MB72/zCx787LU7y/vI3Fw7SUv9iW82KfgxV7hBcbMV1jHNmt+3f768569w7g/Mst+hfvjXSQOrv3kxV0JL+5S8GK/8AJj5iushbWwFtbCWlgLa2EtrGObNb9u+/U564YmhoNenzPL3XlrUL8+v4fEwXU3eXFPwot7FLy4W3iBMfMV1sJaWAtrYS2shbWwFtaxzaqgW/+NQtYNTQwHvT5nlnvzstSvz+8jcXDdS17cl/DiPgUv7hVeYMx8o4VVQbfeN/cOY98wy/0K++YBEgfX/eTFAwkvHlDw4n7hBcbMN1pYDc11d701h+M9NPdgnBtHcw/FufE09zDlhLlH4txEmns0zk2iucfi3GyaezzO8d+eeSL2d9Lck7G/m+aeiv07ae7p2N9Lc8/E/n6ae1Y8T4S558RjQJh7XngZ5g5QH7cvxLnJNPci7QnMvRTnptDcy3FuKs29Euem0dyrcW46zb2W4IPX99McvOa9Aa8fpDl4/RDNweuHaQ5eP0Jz8PpRmkONHqM51OhxmkONnqA51OhJmkONnqI51OhpmkONnqG5GXHuWZo7KM49R3MHx7nnae6QOMfe4+8ov0Bz+HuwL9Ic/sbLSzSH+8DLNIe/efIKzc2Nc6/S3Lw49xrd74JHi+P8QLzte3etfo7o7Rrcmp4joB9YFuVlqd+HXBDX6iWd+Xl16sfVBSI/jKFliWGRIotNaOfXcX2cc3fMbZbQ5X23gFiW5s3ZBZa5tH4v6UKrh37mM/EBITyWbZr21nmZ9199X8D+Rmu6L+jtCzfovvBOWHjvzMvK0v47EXMzrxnWWEr8yBXslo7zXsm8F+vHgnldg2sq92JhLayFtbAW1sJaWAtrYS2shbWwFtbCWlgLa2EtrIW1sI4mVn6vf/EIs1hi0PvcwfXZhLbG++X8OR/WDp9drKHPLhZkzi3UmT+P6CUGaPXQz9w97S2uC2J/Mh3nPZGZ9SaFmtef3eAzELSmz0vmUX55P9twfeEz7YlUy7nCB3B2E8NhWRnan68cTvlCEzrBa9wH8HOG+r3EtiwvW/2YwGwDNIYWf/66VJHFJrT5c8ZQJ9wXllDtFiXqlPczwP539Rlg7s/rKuHDAGmwbuZ93M+6VQxoYL6H+rfhiyL0c6HBQzAHD+clfo77C8Q5lo7PU855LnEM0BhaYY/eSLnOS3DPIm4c5+eIzI/BNTc/BvcSA7T4+y5LiEXjdce/VEN+3bFE1Cw/S3/9ukNq83eGULPg6RvEofkaoVv4s4C4wIOfla/heuln5tB5yFHxvuHk41Bv19vvG/x48Ai9zvnUEK9zMn+3x/F3biQrP2ZqvuYO3/VjDn7MxM88HesyNfIcnrkOpmvw42pXV/Pz2OFUm8zP9/XrouWkBS5+3YHjM4ljeV6O2qNlIn+Mme+dsi7sANZlCdbU8+0yNdb296aYI7SmvbacWI7IytJ+zXQkrT9AGqx7VF7dftbFayZoYL6H+j9MryOOeqv7g8dIMAcPVyR+jvvLxDmWjq9QzvkI4higMbTCc8DrlOuKBPcc4sZxfu0C3/h5Y4VCLstFLssFM79nskyNpf2eidTm59L8r1Pa+S/qeqv1dr39vQx+j2URcfF7LOMzc/H36dGaHlugH74zj++57923e8/mW7Zt2LN937aK1hgn1uumdbrpWI/4uYldb2fIlvBMEuuO4uNiYhOieEgOX/i3MdHwQiL4EL6gH76QH76AH75wH75gP5M4X4i34Qv14YVo+MJ8MDU8aYQ7XXiRFAwPT27B5LDhe7vaL9rDC5nwJBNeLIQ7QNikYYOGO314YAt3+qN9HOPjWB/H+Tg+1MRHvw/nY6WPVT5O8HGij5N8nOzjFB+n+jjNx+k+zvBxpo/VsbZn+Tjbxzk+Wj7W+DjXx3k+zvdxgY8LfVzk42Ifl/i41MdlPtb6uNzHOh9X+LjSx1U+1vvY4GOjj6t9bPJxjY9rfVzn43ofN/i40cdmH1t83ORjq49tPm72cYuPW31s97HDx20+dvq43ccuH7t93OHjTh97fOz1sc/Hfh93+bjbxz0+7vVxn4/7fTzg40EfD/l42McjPh718ZiPx3084eNJH0/5eNrHMz6e9fGcj+d9HOhq+/yij5d8vOzjFR+v+njNx3t8vNfH+3y838cHfLzu44M+PuTjwz4+4uOjPj7m4+Nd7YskvlOg/Ub8Xyenx/EV7Ttb796du/f19vXu8v9u3rlz993bth7Xy8f29t6+f+++3r37Nu/Z13vznt239/Yfx+s+FR9U8B9ELti1dds9vbv37+vdfXPvlt37d23d+8/h50dT8JUCAA==", "verificationKey": "0000000200000800000000740000000f00000003515f3109623eb3c25aa5b16a1a79fd558bac7a7ce62c4560a8c537c77ce80dd339128d1d37b6582ee9e6df9567efb64313471dfa18f520f9ce53161b50dbf7731bc5f900000003515f322bc4cce83a486a92c92fd59bd84e0f92595baa639fc2ed86b00ffa0dfded2a092a669a3bdb7a273a015eda494457cc7ed5236f26cee330c290d45a33b9daa94800000003515f332729426c008c085a81bd34d8ef12dd31e80130339ef99d50013a89e4558eee6d0fa4ffe2ee7b7b62eb92608b2251ac31396a718f9b34978888789042b790a30100000003515f342be6b6824a913eb7a57b03cb1ee7bfb4de02f2f65fe8a4e97baa7766ddb353a82a8a25c49dc63778cd9fe96173f12a2bc77f3682f4c4448f98f1df82c75234a100000003515f351f85760d6ab567465aadc2f180af9eae3800e6958fec96aef53fd8a7b195d7c000c6267a0dd5cfc22b3fe804f53e266069c0e36f51885baec1e7e67650c62e170000000c515f41524954484d455449430d9d0f8ece2aa12012fa21e6e5c859e97bd5704e5c122064a66051294bc5e04213f61f54a0ebdf6fee4d4a6ecf693478191de0c2899bcd8e86a636c8d3eff43400000003515f43224a99d02c86336737c8dd5b746c40d2be6aead8393889a76a18d664029096e90f7fe81adcc92a74350eada9622ac453f49ebac24a066a1f83b394df54dfa0130000000c515f46495845445f42415345060e8a013ed289c2f9fd7473b04f6594b138ddb4b4cf6b901622a14088f04b8d2c83ff74fce56e3d5573b99c7b26d85d5046ce0c6559506acb7a675e7713eb3a00000007515f4c4f4749430721a91cb8da4b917e054f72147e1760cfe0ef3d45090ac0f4961d84ec1996961a25e787b26bd8b50b1a99450f77a424a83513c2b33af268cd253b0587ff50c700000003515f4d05dbd8623b8652511e1eb38d38887a69eceb082f807514f09e127237c5213b401b9325b48c6c225968002318095f89d0ef9cf629b2b7f0172e03bc39aacf6ed800000007515f52414e474504b57a3805e41df328f5ca9aefa40fad5917391543b7b65c6476e60b8f72e9ad07c92f3b3e11c8feae96dedc4b14a6226ef3201244f37cfc1ee5b96781f48d2b000000075349474d415f3125001d1954a18571eaa007144c5a567bb0d2be4def08a8be918b8c05e3b27d312c59ed41e09e144eab5de77ca89a2fd783be702a47c951d3112e3de02ce6e47c000000075349474d415f3223994e6a23618e60fa01c449a7ab88378709197e186d48d604bfb6931ffb15ad11c5ec7a0700570f80088fd5198ab5d5c227f2ad2a455a6edeec024156bb7beb000000075349474d415f3300cda5845f23468a13275d18bddae27c6bb189cf9aa95b6a03a0cb6688c7e8d829639b45cf8607c525cc400b55ebf90205f2f378626dc3406cc59b2d1b474fba000000075349474d415f342d299e7928496ea2d37f10b43afd6a80c90a33b483090d18069ffa275eedb2fc2f82121e8de43dc036d99b478b6227ceef34248939987a19011f065d8b5cef5c0000000010000000000000000100000002000000030000000400000005000000060000000700000008000000090000000a0000000b0000000c0000000d0000000e0000000f" } - ] + ], + "events": [] } diff --git a/yarn-project/aztec.js/src/abis/schnorr_account_contract.json b/yarn-project/aztec.js/src/artifacts/schnorr_account_contract.json similarity index 99% rename from yarn-project/aztec.js/src/abis/schnorr_account_contract.json rename to yarn-project/aztec.js/src/artifacts/schnorr_account_contract.json index a050dcc129c..b392f06a344 100644 --- a/yarn-project/aztec.js/src/abis/schnorr_account_contract.json +++ b/yarn-project/aztec.js/src/artifacts/schnorr_account_contract.json @@ -682,5 +682,6 @@ "bytecode": "H4sIAAAAAAAA/+2dB5hdx3me9+yiDQaNJIheFiABsHN3APaCJXnBXkCQBECwAgRIAgQBEoW99wJ2NarTsuI4juIoVoktK7as2LJiy4oty7Ks2JLiOI7jOO690DPnzid++3N8lmvO/+zdZ2ee58edmbNnvvf/v7nl3Hux+0NdXV1VV7v1+JjZ9faG4wPxtu/dtf4q31p9mpzdo4SzZ5RwjhslnONHCeeEUcI5cZRwTholnGaUcE4eJZx2lHBOGSWcU0cJ57RRwjl9lHDOGCWcB40SzoNHCecho4RzZkbOucR5aLydFW9nx9s58RY/Oy/ezo+3C2KO4+J4oY9FPhb76I3HUJAlPpb6OMzH4T6W+VjuY4WPI3wc6eMoH0f7OMbHsT6O83F8XKPfh/Ox0scqHyf4ONHHST5O9nGKj1N9nObjdB9n+DjTx+pYs7N8nO3jHB8tH2t8nOvjPB/n+7jAx4U+LvJxsY9LfFzq4zIfa2MuvTGXy32s83GFjyt9XOVjvY8NPjb6uNrHJh/X+LjWx3U+rvdxg48bfWz2scXHTT62+tjm42Yft/i41cd2Hzt83OZjp4/bfezysdvHHaLmd/rY42Ovj33x2Ix4bL+Pu3zc7eMeH/f6uM/H/T4e8PGgj4d8POzjER+P+njMx+M+nhBrPenjKR9P+3jGx7M+nvPxvI8DPl7w8aKPl3y87OMVH6/6eM3He+Ja3XGt9/p4n5h7v48PxP7r8faD8fZD8fbD8fYj8faj8fZj8fbj8faNrrfa56a1b8NrTuzzQ7remsP7BQfTHI4fRHM4PoPmcHw6zeH4NJrD8ak0h+NTaA7HLc3h+GSa4+O4xfFJNIfjE2kOxyfQHI6PpzkcH0dzON5DczjeTXM4XtEcjncJ/dAG4m3fu2wTurI/5vaFnPspj65Evvz+k8x3XKIu4xP1Yz9wnH3DcfaXj+MWx3m/4DjvGxzn/YfjvE9xnPczjvO+x3G+f+A4349wnO9vOM73SxyfSXM4fijN4fgsmsPx2TSH43NoDsfn0hyO47ku5DWBxgPxtu/dNWdIF60S4wHqQz+wLFBgmT8MlgVUr4WxfyjxLVLgWxzXmks6vXl16vd1F4ucMYaWJYZFiiw2oa2gM6i2aE3e9xLLkrws9UvFpaQFriVUexyfSRxLM9e+Ik2sizHzvVPWhSPMamhu/gizWGJYTHP4uV49PmcEX2hNe30psRyelaW/L7AcNgyWw4llWVaW9uum5ZnXDGusIH7kCnZLx5dTbivyctR7clnX4JpizHyFtbAW1sJaWAtrYS2shXVssxqaWzLCLHx9dJgaS3+fTWhrXG/w+3FYO7wP+gZpLs6cG39+g/dRwACtHvqZT0x9i+uTcW4yHV+ixupuCmsuzLpm+9oX71ehNV1v8nsWed9v7O8L7ylPpFouED6As5sYMr//Wl+f8vuv0IRO8BrvU/D7w3jPGMfC/vi0Hue/+n1ik+DP72X78Wu+YOHPh0Pj90y7FVlsQrtT3y/n9+Eyv19ee7JQsGAMLa5VjyKLTWhrfUbAOYfW5An0+f1vfszrzVyHd/rZAhjGKbJ0+mcL/D7tYuIbqferwTBekcUmtDvpPXB+v5pf++R9X7ztyWGCBWNoWcGgxWIT2go67l/7WQBfo/B1Qeb38huvUaDFtZqoyGIT2go6zoicQ2vyBPrhPFw7LSO+IzLXoeoafC09QGNoca0mKbLYhLaCjjMi59CaPIF+OO/I2Ofr2qMy16EiHayLMbS4VkaRxSa0FXScETmH1uTJUZT70bF/JPEdk7kOFelgXYyhxbWarMhiE9oKOs6InENr8gT64bxjY/9o4jsucx0q0sG6GEOLa2UVWWxCW0HHGZFzaE2eQD+cd3zsH0t8fZnrUJEO1sW4j3wAwxRFFpvQVtBxRuQcWpMn0A/n4TuhxxOfy1yHinSwLsbQ4lpNVWSxCW0FHWdEzqE1eQL9cN7K2O8nvlWZ61CRDtbFGFpcq2mKLDahraDjjMg5tCZPoB/OOyH2VxLfiZnrUJEO1sUYWlyr6YosNqGtoOOMyDm0Jk+gH847KfZPIL6TM9ehIh2sizG0uFYzFFlsQltBxxmRc2hNnkA/nHdK7J9EfKdmrkNFOlgXY2hxrQ5SZLEJbQUdZ0TOoTV5Av1w3mmxfwrxnZ65DhXpYF2MocW1OliRxSa0FXScETmH1uQJ9MN5Z8T+acR3ZuY6VKSDdTGGFtfqEEUWm9BW0HFG5BxakyfQD+etjv0ziG8gcx0q0sG6q4UG12q2IotNaCvoOK4tWpMnzHJ2XpZVgeWsYbCcTSzn5GWpv6/eyrxmWGMN8SNXsFs63qLc1uTlqPf5OV2Da4ox8xXWsc1qaG71CLPw4+FZeiyrbEJbQccZkXNoTY917Mm5sX8O8Z2Xl6/25FzBgjG0uFYtRRab0FbQcUbkHFqTJ8xyQVaWlfX3F88fBssFxHJhVpb289ZFpAUu6Fg6zvvgorwc9Z68UOSPMfMV1sJaWAtrYS2shbWwFtbCWlgLa2EtrIW1sBbWwlpYC2thLayFtbAW1sJaWAtrYS2shTU/q6G5c0eYxRLD+WosK+vfcyW1FXJ2RuQcWtP3RNiTi2P/QuK7JC9f7cnFggVjaHGtWoosNqGtoOOMyDm0Jk+Y5bKsLK7+HtGlw2C5jFjWZmVpf4/octICF3QsHed9cHlejnpPrhX5Y8x8hbWwFtbCWlgLa2EtrIW1sBbWwlpYC2thLayFtbAW1sI6WlgNzV08wiz8Xvylaiyu/hxGaivk7IzIObSm99nZk3Wxv5b4rsjLV3uyTrBgDC2uVUuRxSa0FXScETmH1uQJs1yVl6X+3SZXDoPlKmJZn5elL6yxgbTABR1Lx3kfbMjLUe/J9SJ/jJmvsI5tVkNz60aYhR+7rtRjqX8PidRW0HFG5Bxa0+MSe7Ix9tcT39V5+WpPNgoWjKHFtWopstiEtoKOMyLn0Jo8YZZrsrK0/4b0pmGwXEMs12ZlaT9vXUda4IKOpeO8D67Ly1HvyWtF/hgzX2EtrIW1sBbWwlpYC2thHdushuY2jjALX8tsUmNp/71nqa2QszMi59CarlPYk+tj/1riuyEvX+3J9YIFY2hxrVqKLDahraDjjMg5tCZPmGVzVpb2deyNw2DZTCxbsrK0r2NvIq0b4y10LB3nfXBTXo56T24R+WPMfIW1sBbWwlpYC2thLayFdWyzGpq7foRZ+FrmRjWW9nWs1FbI2RmRc2hN1ynsydbY30J82/Ly1Z5sFSwYQ4tr1VJksQltBR1nRM6hNXnCLLcosNw8DJZbiOXWvCz1dex20gIXdCwd532wPS9HvSdvFfljzHyjhdXQ3NYRZuH72M16LM4mtDV0jMg5tKb7D3uyI/ZvJb7b8vLVnuwQLBhDi2vVUmSxCW0FHWdEzqE1ecIstyuw7BwGy+3EsisvS/34upu0wAUdS8d5H+zOy1HvyV0if4yZb7SwGprbMcIsfB/bqcdSP75KbQ0dI3IOren+w57cEfu7iO/OvHy1J3cIFoyhxbVqKbLYhLaCjjMi59CaPIF+OG9P7N9BfHsz16EiHayLMbS4Vi1FFpvQVtBxRuQcWpMn0A/n7Yv9PcS3P3MdKtLBuhhDi2tlFFlsQltBxxmRc2hNnuyn3O+K/X3Ed3fmOlSkg3UxhhbXarIii01oK+g4I3IOrckT6Ifz7on9u4jv3sx1qEgH62IMLa6VVWSxCW0FHWdEzqE1eQL9cN59sX8P8d2fuQ4V6WBdjO8nH8AwRZHFJrQVdJwROYfW5An0w3kPxP59xPdg5jpUpIN1MYYW12qqIotNaCvoOCNyDq3JE+iH8x6K/QeI7+HMdahIB+tiDC2u1TRFFpvQVtBxRuQcWpMn0A/nPRL7DxHfo5nrUJEO1sUYWlyr6YosNqGtoOO4tmhNnkA/nPdY7D9CfI9nrkNFOlgXY2hxrVqKLDahraDjjMg5tCZPoB/OeyL2HyO+JzPXoSIdrIsxtLhWLUUWm9BW0HFG5BxakyfQD+c9FftPEN/TmetQkQ7WxRhaXKuWIotNaCvoOCNyDq3JE+iH856J/aeI79nMdahIB+tiDC2uVUuRxSa0FXScETmH1uQJ9MN5z8X+M8T3fOY6VKSDdTGGFteqpchiE9oKOs6InENr8oRZDuRlqb8L/kJc6znSeTFzbSvSwboYQ4vrf0CRxSa0FXScETmH1uQz9MN5L8X+C8T3cuY6VKSDdTGGFtfqgCKLTWgr6Dgjcg6tyRPoh/Neif2XiO/VzHWoSAfrYgwtrtUBRRab0FbQcUbkHFqTJ9AP570W+68Q33sy16EiHayLMbS4Vi1FFpvQVtBxRuQcWpMn0A/nvTf2XyO+92WuQ0U6WBdjaHGtWoosNqGtoOOMyDm0Jk+gH857f+y/l/g+kLkOFelgXYyhxbVqKbLYhLaCjjMi59CaPIF+OO/12H8/8X0wcx0q0sG6GEOLa9VSZLEJbQUdZ0TOoTV5Av1w3odi/3Xi+3DmOlSkg3UxhhbXqqXIYhPaCjrOiJxDa/IE+uG8j8T+h4jvo5nrUJEO1sUYWlyrliKLTWgr6Dgjcg6tyRPoh/M+FvsfIb6PZ65DRTpYF2Noca1aiiw2oa2g47i2aE2efDzehvPeiP2PEd8PZa5DRTpYF2Noca1aiiw2oa2g44zIObQmT6AfzvtE7L9BfD+cuQ4V6WBdjKHFtWopstiEtoKOMyLn0Jo8gX4475Ox/wni+zeZ61CRDtbFGFpcq5Yii01oK+g4I3IOrckT6IfzfiT2P0l8/zZzHSrSwboYQ4tr1VJksQltBR1nRM6hNXkC/XDej8b+jxDfv8tch4p0sC7G0OJatRRZbEJbQccZkXNoTZ5AP5z3Y7H/o8T37zPXoSIdrIsxtLhWLUUWm9BW0HFG5BxakyfQD+d9KvZ/jPj+Q+Y6VKSDdTGGFteqpchiE9oKOs6InENr8gT64bwfj/1PEd9/zFyHinSwLsbQ4lq1FFlsQltBxxmRc2hNnkA/nPfp2P9x4vtPmetQkQ7WxRhaXKuWIotNaCvoOCNyDq3JE2b5TF6W+m87/MQwWD5DLJ/Ny1L/n8HPkRa4oGPpOO+Dz+XlqPfkZ0X+GDNfYR3brIbmPj3CLPzY9RN6LPXfdpDaCjrOiJxDa3pcYk8+H/ufJb7/nJev9uTzggVjaHGtWoosNqGtoOOMyDm0Jk+Y5aeysrj6e1A/OQyWnyKWL2RlaT9v/TRpgQs6lo7zPvjpvBz1nvyCyB9j5iushbWwFtbCWlgLa2EtrIW1sBbWwlpYC2thLayFtbAW1tHCamju8yPMwu/F/6Qai6t/p7vUVsjZGZFzaE3vs7MnX4z9LxDff8nLV3vyRcGCMbS4Vi1FFpvQVtBxRuQcWpMnzPKzWVnaf5vsZ4bB8rPE8qWsLO3PYX6OtMAFHUvHeR/8XF6Oek9+SeSPMfMV1sJaWAtrYS2shbWwFtaxzWpo7osjzMLXMj+jxtL+22RSWyFnZ0TOoVViPEB99uTLsf8l4vuveflqT74sWDCGFteqpchiE9oKOs6InENr8oRZfiErS/s69ueHwfILxPKVrCzt69hfJC1wQcfScd4Hv5iXo96TXxH5Y8x8hbWwFtbCWlgLa2EtrIV1bLMamvvyCLPwtczPq7G0r2OltkLOzoicQ2u6TmFPvhr7XyG+/5aXr/bkq4IFY2hxrVqKLDahraDjjMg5tCZPmOWXs7K0r2N/aRgsv0wsX8vK0r6O/RXSAhd0LB3nffAreTnqPfk1kT/GzFdYC2thLayFtbAW1sJaWMc2q6G5r44wC1/L/JIaS/s6Vmor5OyMyDm0pusU9uTrsf814vvveflqT74uWDCGFteqpchiE9oKOs6InENr8oRZfi0vS/17yX51GCy/RizfyMtSX8f+OmmBCzqWjvM++PW8HPWe/IbIH2PmK6xjm9XQ3NdHmIUfu35Vj6X+vWRSW0HHGZFzaE2PS+zJN2P/G8T3G3n5ak++KVgwhhbXqqXIYhPaCjrOiJxDa/KEWX4zL0v9vPWtYbD8JrF8Oy9L/bz1W6QFLuhYOs774LfyctR78tsif4yZr7CObVZDc98cYRZ+7PqWHkv9vCW1FXScETmH1vS4xJ58J/a/TXz/Iy9f7cl3BAvG0OJatRRZbEJbQccZkXNoTZ4wy+8osPz2MFh+h1i+m5elft76HmmBCzqWjvM++F5ejnpPflfkj/H3aH60sBqa+84Is/B97Lf1WJxNaGvocG3Rmu4/34u34bzvx/53ie9/5uWrPfm+YMEYWlyrliKLTWgr6Dgjcg6tyRPoh/N+N/a/T3z/K3MdKtLBuhhDi2vVUmSxCW0FHWdEzqE1eQL9cN7vxf7vEt//zlyHinSwLsbQ4lq1FFlsQltBxxmRc2hNnkA/nPf7sf97xPd/MtehIh2sizG0uFYtRRab0FbQcUbkHFqTJ9AP5/1B7P8+8f3fzHWoSAfrYgwtrlVLkcUmtBV0nBE5h9bkCfTDeX8Y+39AfP8vcx0q0sG6GEOLa9VSZLEJbQUdZ0TOoTV5Av1w3h/F/h8S3//PXIeKdLAuxtDiWrUUWWxCW0HHGZFzaE2eQD+c98ex/0fE9yeZ61CRDtbFGFpcq5Yii01oK+g4I3IOrckT6Ifz/jT2/5j4/ixzHSrSwboYQ4tr1VJksQltBR1nRM6hNXkC/XDen8f+nxLfX2SuQ0U6WBdjaHGtWoosNqGtoOOMyDm0Jk+gH877y9j/c+L7q8x1qEgH62IMLa5VS5HFJrQVdJwROYfW5An0w3l/Hft/SXx/k7kOFelgXYyhxbVqKbLYhLaCjjMi59CaPIF+OO9vY/+vie/vMtehIh2sizG0uFYHFFlsQltBxxmRc2hNnkA/nPf3sf+3xPcPmetQkQ7WxRhaXKsDiiw2oa2g44zIObQmT6AfzvvH2P974vunzHWoSAfrYgwtrtUBRRab0FbQcUbkHFqTJ9AP570Z+/9IfDg5E1/tyZuC5U0ByrVqKbLYhLaCjjMiZ5IYVAd5MJxXxf6bVIduBU+qajALxt3kyZsJT7oVPJHaCjrOiJyp7IPqgNZNnvTEfkV84xQ86RGeYDyOPAEDezJOwROpraDjjMh5KE/GkSfjY7+H+CYoeDJeeILxBPKkJ+HJBAVPpLaCjjMi56E8mUCeTIz98cQ3ScGTicITjCeRJ+MTnkxS8ERqK+g4I3IeypNJ5ImJ/YnEN1nBEyM8wXgyeTIx4clkBU+ktoKOMyLnoTyZTJ5Y9IlvioInVniC8RTyxCQ8maLgidRW0HFG5DyUJ1PIk6moCfFNU/BkqvAE42nkiU14Mk3BE6mtoOOMyHkoT6aRJ9NjfyrxzVDwZLrwBOMZ5MnUhCczFDyR2go6zoich/JkBnlyUOxPJ76DFTw5SHiC8cHkyfSEJwcreCK1FXScETkP5cnB5MkhsX8Q8c1U8OQQ4QnGM8mTgxKezFTwRGor6Dgjch7Kk5nkyaGxfwjxzVLw5FDhCcazyJNDEp7MUvBEaivoOCNyHsqTWeTJ7Ng/lPjmKHgyW3iC8Rzy5NCEJ3MUPJHaCjrOiJyH8mQOeTI39mcT3zwFT+YKTzCeR57MTngyT8ETqa2g44zIeShP5pEn82N/LvEtUPBkvvAE4wXkydyEJwsUPJHaCjrOiJyH8mQBebIw9ucT3yIFTxYKTzBeRJ7MT3iySMETqa2g44zIeShPmKU3L0v9/20XD4Oll1iW5GWp/9/SUhIH1xLaB0sT+2Cpwp5cIvYkxsxXWMc2q6G5hdXIsvBj12I9lvr/20ptBR1nRM6hNT0usSeHiT0T+A5X8OQw4QnGh5MnSxJ7NTeLTWgr6Dgjch7KE2ZZnpXF1b+nd9kwWJYTy4q8damft44gcXCtoH1wRGIfHKGwJ1eIPYkx8xXWwlpYC2thLayFtbAW1sJaWAtrYS2shbWwFtbCWlgL62hhNTSH96dHioXfi1+mxuLqvzMhtRVydkbkHJoYDnqfnT05UuyZwHeUgidHCk8wPoo8WZHYq7lZbEJbQccZkfNQnjDLMVlZ2n8v8ehhsBxDLMfmrUv9OcxxJA6uY2kfHJfYB8cp7MljxZ7EmPkKa2EtrIW1sBbWwlpYC+vYZjU0h9f3I8XC1zJHq7G0/16i1FbI2RmRc2hiOOg6hT05XuyZwNen4MnxwhOM+8iTYxN7NTeLTWgr6Dgjch7KE2ZxmfdhYOkfBosjlpV561Jfx64icXCtpH2wKrEPVinsyZViT2LMfIW1sBbWwlpYC2thLayFdWyzGprD6/uRYuFrmX41lvZ1rNRWyNkZkXNoYjjoOoU9OUHsmcB3ooInJwhPMD6RPFmZ2Ku5WWxCW0HHGZHzUJ4wy8mZ92FgOWkYLCcTyyl561Jfx55K4uA6hfbBqYl9cKrCnjxF7EmMma+wFtbCWlgLa2EtrIW1sI5tVkNzeH0/Uix8LXOSGkv7OlZqK+TsjMg5NDEcdJ3Cnpwm9kzgO13Bk9OEJxifTp6cktiruVlsQltBxxmR81CeMMuZeVnq30t2xjBYziSW1XlZ6uvYARIH12raBwOJfTCgsCdXiz35g1x5vrCOaVZDc6dVI8vCj11n6LHUv5dMaivoOCNyDq3pcYk9OUvsmcB3toInZwlPMD6bPFmd2Ku5WWxCW0HHGZHzUJ4wS0vheeucYbC0iGVNXpb6eetcEgfXGtoH5yb2wbkKe3KN2JMYM19hHdushubOqkaWhR+7ztFjqZ+3pLaCjjMi59CaHpfYk/PEngl85yt4cp7wBOPzyZM1ib2am8UmtBV0nBE5D+UJs1yowHLBMFguJJaL8rLUz1sXkzi4LqJ9cHFiH1yssCcvEnsSY+YbLayG5s6rRpaF72MX6LE4m9DW0DEi59Ca7j/sySVizwS+SxU8uUR4gvGl5MlFib2am8UmtBV0nBE5D+XJpeTJZbF/CfGtVfDkMuEJxmvJk0sSnqxV8ERqK+g4I3IeypO15MnlsX8Z8a1T8ORy4QnG68iTyxKerFPwRGor6Dgjch7Kk3XkyRWxfznxXangyRXCE4yvJE8uT3hypYInUltBxxmR81CeXEmeXBX7VxDfegVPrhKeYLyePLki4cl6BU+ktoKOMyLnoTxZT55siP2riG+jgicbhCcYbyRPrkp4slHBE6mtoOOMyHkoTzaSJ1fH/gbi26TgydXCE4w3kScbEp5sUvBEaivoOCNyHsqTTeTJNbF/NfFdq+DJNcITjK8lT65OeHKtgidSW0HHGZHzUJ4wy3V5WfrCmtdnzi+scQMlhFyvIz9x/HrK7QaFvXWd2FsYM987ZZ3ZNbKsWv7fqLC/bxjG/r6R8tucmWWKX2Mi+bpZ+AvObuK5KbE38Lgz2ceW6u0/h/44Os6PVdsU9svWuGYVY5vgCro3K+hCZ3zUBQe0euhnvjW5fTu1q/2cgvlDqTa3ZmYMOrcMY//dSvXanvn+FdbYQeLg2k6PLzi+jTh2KPi2XTy+YLyDWNBwf1CoSX+TF9sTLNv0/Bk2y5YOYpne1TksUzqIxXQQy4QOYunpIJapHcQyuYNYJnYQy7gOYpnTQSyzO4hlVgexTOsgFttBLJM6iGV8B7FUI8xiut5+XWDo+Bb6ObxGvoXmuhPr4Tl2B10jfXra29fRzp11BmgMrcnEsEP5Ne07YRnfQSyTOojFdhDLtA5imdVBLLM7iGVOB7GM6yCWiR3EMrmDWKZ2EEtPB7FM6CAW00EsUzqIZXoHseC1YiewbOsglu6ER7flZVnFr9nRxHDQ6//biGVnXpb687TbM68Z1thFCSFXsFs6fjvltkvB853V4JpizHyFdWyz5tddWf8utZ3DuI8zy26F++MdJA6u3eTFHQkv7lDwYrfwAmPmK6yFtbAW1sJaWAtrYS2shbWwFtbCWlgLa2EtrIW1sBbWwlpYC2thLayFtbAW1sJaWAtrftb8uq7+fgTrhiaGP9CSLHfmrUH9/Yg9JA6uO8mLPQkv9ih4cafwAmPmK6yFtbAW1sJaWAtrYS2shbWwFtbCWlgLa2EtrIW1sBbW0cKqoFv/H1vWDU0MB72/zCx787LU7y/vI3Fw7SUv9iW82KfgxV7hBcbMV1jHNmt+3f768569w7g/Mst+hfvjXSQOrv3kxV0JL+5S8GK/8AJj5iushbWwFtbCWlgLa2EtrGObNb9u+/U564YmhoNenzPL3XlrUL8+v4fEwXU3eXFPwot7FLy4W3iBMfMV1sJaWAtrYS2shbWwFtaxzaqgW/+NQtYNTQwHvT5nlnvzstSvz+8jcXDdS17cl/DiPgUv7hVeYMx8o4VVQbfeN/cOY98wy/0K++YBEgfX/eTFAwkvHlDw4n7hBcbMN1pYDc11d701h+M9NPdgnBtHcw/FufE09zDlhLlH4txEmns0zk2iucfi3GyaezzO8d+eeSL2d9Lck7G/m+aeiv07ae7p2N9Lc8/E/n6ae1Y8T4S558RjQJh7XngZ5g5QH7cvxLnJNPci7QnMvRTnptDcy3FuKs29Euem0dyrcW46zb2W4IPX99McvOa9Aa8fpDl4/RDNweuHaQ5eP0Jz8PpRmkONHqM51OhxmkONnqA51OhJmkONnqI51OhpmkONnqG5GXHuWZo7KM49R3MHx7nnae6QOMfe4+8ov0Bz+HuwL9Ic/sbLSzSH+8DLNIe/efIKzc2Nc6/S3Lw49xrd74JHi+P8QLzte3etfo7o7Rrcmp4joB9YFuVlqd+HXBDX6iWd+Xl16sfVBSI/jKFliWGRIotNaOfXcX2cc3fMbZbQ5X23gFiW5s3ZBZa5tH4v6UKrh37mM/EBITyWbZr21nmZ9199X8D+Rmu6L+jtCzfovvBOWHjvzMvK0v47EXMzrxnWWEr8yBXslo7zXsm8F+vHgnldg2sq92JhLayFtbAW1sJaWAtrYS2shbWwFtbCWlgLa2EtrIW1sI4mVn6vf/EIs1hi0PvcwfXZhLbG++X8OR/WDp9drKHPLhZkzi3UmT+P6CUGaPXQz9w97S2uC2J/Mh3nPZGZ9SaFmtef3eAzELSmz0vmUX55P9twfeEz7YlUy7nCB3B2E8NhWRnan68cTvlCEzrBa9wH8HOG+r3EtiwvW/2YwGwDNIYWf/66VJHFJrT5c8ZQJ9wXllDtFiXqlPczwP539Rlg7s/rKuHDAGmwbuZ93M+6VQxoYL6H+rfhiyL0c6HBQzAHD+clfo77C8Q5lo7PU855LnEM0BhaYY/eSLnOS3DPIm4c5+eIzI/BNTc/BvcSA7T4+y5LiEXjdce/VEN+3bFE1Cw/S3/9ukNq83eGULPg6RvEofkaoVv4s4C4wIOfla/heuln5tB5yFHxvuHk41Bv19vvG/x48Ai9zvnUEK9zMn+3x/F3biQrP2ZqvuYO3/VjDn7MxM88HesyNfIcnrkOpmvw42pXV/Pz2OFUm8zP9/XrouWkBS5+3YHjM4ljeV6O2qNlIn+Mme+dsi7sANZlCdbU8+0yNdb296aYI7SmvbacWI7IytJ+zXQkrT9AGqx7VF7dftbFayZoYL6H+j9MryOOeqv7g8dIMAcPVyR+jvvLxDmWjq9QzvkI4higMbTCc8DrlOuKBPcc4sZxfu0C3/h5Y4VCLstFLssFM79nskyNpf2eidTm59L8r1Pa+S/qeqv1dr39vQx+j2URcfF7LOMzc/H36dGaHlugH74zj++57923e8/mW7Zt2LN937aK1hgn1uumdbrpWI/4uYldb2fIlvBMEuuO4uNiYhOieEgOX/i3MdHwQiL4EL6gH76QH76AH75wH75gP5M4X4i34Qv14YVo+MJ8MDU8aYQ7XXiRFAwPT27B5LDhe7vaL9rDC5nwJBNeLIQ7QNikYYOGO314YAt3+qN9HOPjWB/H+Tg+1MRHvw/nY6WPVT5O8HGij5N8nOzjFB+n+jjNx+k+zvBxpo/VsbZn+Tjbxzk+Wj7W+DjXx3k+zvdxgY8LfVzk42Ifl/i41MdlPtb6uNzHOh9X+LjSx1U+1vvY4GOjj6t9bPJxjY9rfVzn43ofN/i40cdmH1t83ORjq49tPm72cYuPW31s97HDx20+dvq43ccuH7t93OHjTh97fOz1sc/Hfh93+bjbxz0+7vVxn4/7fTzg40EfD/l42McjPh718ZiPx3084eNJH0/5eNrHMz6e9fGcj+d9HOhq+/yij5d8vOzjFR+v+njNx3t8vNfH+3y838cHfLzu44M+PuTjwz4+4uOjPj7m4+Nd7YskvlOg/Ub8Xyenx/EV7Ttb796du/f19vXu8v9u3rlz993bth7Xy8f29t6+f+++3r37Nu/Z13vznt239/Yfx+s+FR9U8B9ELti1dds9vbv37+vdfXPvlt37d23d+8/h50dT8JUCAA==", "verificationKey": "0000000200000800000000740000000f00000003515f3109623eb3c25aa5b16a1a79fd558bac7a7ce62c4560a8c537c77ce80dd339128d1d37b6582ee9e6df9567efb64313471dfa18f520f9ce53161b50dbf7731bc5f900000003515f322bc4cce83a486a92c92fd59bd84e0f92595baa639fc2ed86b00ffa0dfded2a092a669a3bdb7a273a015eda494457cc7ed5236f26cee330c290d45a33b9daa94800000003515f332729426c008c085a81bd34d8ef12dd31e80130339ef99d50013a89e4558eee6d0fa4ffe2ee7b7b62eb92608b2251ac31396a718f9b34978888789042b790a30100000003515f342be6b6824a913eb7a57b03cb1ee7bfb4de02f2f65fe8a4e97baa7766ddb353a82a8a25c49dc63778cd9fe96173f12a2bc77f3682f4c4448f98f1df82c75234a100000003515f351f85760d6ab567465aadc2f180af9eae3800e6958fec96aef53fd8a7b195d7c000c6267a0dd5cfc22b3fe804f53e266069c0e36f51885baec1e7e67650c62e170000000c515f41524954484d455449430d9d0f8ece2aa12012fa21e6e5c859e97bd5704e5c122064a66051294bc5e04213f61f54a0ebdf6fee4d4a6ecf693478191de0c2899bcd8e86a636c8d3eff43400000003515f43224a99d02c86336737c8dd5b746c40d2be6aead8393889a76a18d664029096e90f7fe81adcc92a74350eada9622ac453f49ebac24a066a1f83b394df54dfa0130000000c515f46495845445f42415345060e8a013ed289c2f9fd7473b04f6594b138ddb4b4cf6b901622a14088f04b8d2c83ff74fce56e3d5573b99c7b26d85d5046ce0c6559506acb7a675e7713eb3a00000007515f4c4f4749430721a91cb8da4b917e054f72147e1760cfe0ef3d45090ac0f4961d84ec1996961a25e787b26bd8b50b1a99450f77a424a83513c2b33af268cd253b0587ff50c700000003515f4d05dbd8623b8652511e1eb38d38887a69eceb082f807514f09e127237c5213b401b9325b48c6c225968002318095f89d0ef9cf629b2b7f0172e03bc39aacf6ed800000007515f52414e474504b57a3805e41df328f5ca9aefa40fad5917391543b7b65c6476e60b8f72e9ad07c92f3b3e11c8feae96dedc4b14a6226ef3201244f37cfc1ee5b96781f48d2b000000075349474d415f3125001d1954a18571eaa007144c5a567bb0d2be4def08a8be918b8c05e3b27d312c59ed41e09e144eab5de77ca89a2fd783be702a47c951d3112e3de02ce6e47c000000075349474d415f3223994e6a23618e60fa01c449a7ab88378709197e186d48d604bfb6931ffb15ad11c5ec7a0700570f80088fd5198ab5d5c227f2ad2a455a6edeec024156bb7beb000000075349474d415f3300cda5845f23468a13275d18bddae27c6bb189cf9aa95b6a03a0cb6688c7e8d829639b45cf8607c525cc400b55ebf90205f2f378626dc3406cc59b2d1b474fba000000075349474d415f342d299e7928496ea2d37f10b43afd6a80c90a33b483090d18069ffa275eedb2fc2f82121e8de43dc036d99b478b6227ceef34248939987a19011f065d8b5cef5c0000000010000000000000000100000002000000030000000400000005000000060000000700000008000000090000000a0000000b0000000c0000000d0000000e0000000f" } - ] + ], + "events": [] } diff --git a/yarn-project/aztec.js/src/abis/schnorr_single_key_account_contract.json b/yarn-project/aztec.js/src/artifacts/schnorr_single_key_account_contract.json similarity index 99% rename from yarn-project/aztec.js/src/abis/schnorr_single_key_account_contract.json rename to yarn-project/aztec.js/src/artifacts/schnorr_single_key_account_contract.json index 6f79711a6ab..99eca3c3032 100644 --- a/yarn-project/aztec.js/src/abis/schnorr_single_key_account_contract.json +++ b/yarn-project/aztec.js/src/artifacts/schnorr_single_key_account_contract.json @@ -617,5 +617,6 @@ "bytecode": "H4sIAAAAAAAA/+2dCZglVXXHX3XPTM/tOxswzMYsPcMsLALdBYgISAMPZJN9k02GmWFzFpwFEBARERFBREREREBjjDHGGLe4xS1ucYtb3KJijDHGmEUNCVEx59a7f+ffdyoVWu75+vXX537foe7y6v5/55xb9arqFdNvbLVaRatTesVmt3YsGB+O28EnV4aKfHMNanL2jBPO3nHCOWmccE4eJ5xTxgln3zjhnDpOON044ewfJ5x+nHBOGyec08cJ54xxwjlznHDOGiecO40Tzp3HCecu44RzdkbO+cS5a9zOidu5cTsvbvHZBXG7W9wujD5Oiu1FYovFlogNxDEEZKnYMrHdxZaLrRBbKbZKbA+xPcX2Ettb7Cli+4jtK7ZfnGNIrBTbX+wAsQPFnip2kNjTxA4We7rYIWKHih0m9gyxw2PMjhA7UuwosbbY0WLHiD1T7Fix48SOFztB7ESxZ4mdJHay2CnRl4Hoy6lip4mdLnaG2JliZ4mdLXaO2LPFzhU7T+x8sQvELhR7jthFYqvFLhZbI7ZWbJ3YJWKXil0mdrnYFWLPFVsvtkFso9gmsSuTmD9PbLPYFrGtcWxWHNsmdpXY1WLXiD1f7Fqx68SuF3uB2A1iLxS7UexFYjeJvVjs5mSul4jdIvZSsVvFXiZ2m9jLxW4Xu0PsFWJ3ir1S7C6xV4ndLfbqOFdPnOsesdckffeKvTbW74vb18Xt/XH7+rh9IG7fELcPxu1Dcftwa3u50Xe24ZoT63yX1vY+PC/YmfowvhP1YXwW9WF8JvVhfAb1YXw69WF8GvVh3FMfxvupj8exxfhU6sN4H/VhfAr1YXwy9WF8EvVhvJf6MN5DfRgvqA/jrUQ/lOG4HXySZUor+zl3MPg8RH60avzl50+pv5Nq4jK5Jn6cD4xz3jDO+eVxbDHO6wXjvG4wzusP47xOMc7rGeO87jHOxwfG+TjCOB9vGOfjEuOzqQ/ju1IfxudQH8bnUh/G51EfxudTH8bxXRf8mkLt4bgdfHKldKSLUiTtYapDP7AsVGDZbRQsCylei2J9V+JbrMC3JM41n3QG8upUz3WXJD6jDS1PDIsVWXyNtoLOiNiiNOV+gFiW5mWpLhWXkRa4llLsMT6bOJZljn1BmpgXbeZ7oqyLxpjVUd9uY8ziiWEJ9eFzA3p8pUv4Qmla68uIZXlWlqHBwLL7KFiWE8uKrCyd66aVmecMc6wifvgKdk/jK8m3VXk5qjW5ojUypmgzn7Eaq7Eaq7Eaq7Eaq7FObFZHfUvHmIXvj3ZXYxka9DXaGvcb/DwOc4fnoA+T5pLMvvHvN3iOAgZo9dJnNvRv53pz7Oun8aVqrOWaMOeirHN27n3xvAql6X6Tn1nkfd44NBieKfdRLBcmeQBnDzFkfv5a3Z/y81doQifkGs8p+PkwnhljLKyPd+px/t7PiV0Nf/5cds5fuyUs/PtwKPzMtEeRxddod+vzcn4Ol/l5eZWTRQkL2tDiWPUqsvgaba3fCNjnUJpyAn1+/s3nvIHMcXiivy2AYZIiS7f/tsDPaZcQ31g9rwbDZEUWX6PdTc/A+Xk1X/vkfS7eycnuCQva0PIJgxaLr9FW0Cl/398C+B6F7wsyP8tvvEeBFseqT5HF12gr6JQu8TmUppxAP+yHe6cVxLdH5jgUrZH30sPUhhbHaqoii6/RVtApXeJzKE05gX7Yb89Y5/vavTLHoSAdzIs2tDhWTpHF12gr6JQu8TmUppzsRb7vHet7Et9TMsehIB3Miza0OFb9iiy+RltBp3SJz6E05QT6Yb99Yn1v4ts3cxwK0sG8aEOLY+UVWXyNtoJO6RKfQ2nKCfTDfvvF+j7EN5g5DgXpYF60BykPYJimyOJrtBV0Spf4HEpTTqAf9sM7ofsRX5k5DgXpYF60ocWxmq7I4mu0FXRKl/gcSlNOoB/22z/Wh4jvgMxxKEgH86INLY7VDEUWX6OtoFO6xOdQmnIC/bDfgbG+P/E9NXMcCtLBvGhDi2M1U5HF12gr6JQu8TmUppxAP+x3UKwfSHxPyxyHgnQwL9rQ4ljNUmTxNdoKOqVLfA6lKSfQD/sdHOsHEd/TM8ehIB3Miza0OFY7KbL4Gm0FndIlPofSlBPoh/0OifWDie/QzHEoSAfzog0tjtXOiiy+RltBp3SJz6E05QT6Yb/DYv0Q4ntG5jgUpIN50YYWx2oXRRZfo62gU7rE51CacgL9sN/hsX4Y8Q1njkNBOpj38ESDYzVXkcXXaCvolBxblKacMMuReVkOCCxHjILlSGI5Ki9L9b56O/OcYY6jiR++gt3TeJt8OzovR7XOj2qNjCnazGesE5vVUd/hY8zC58Mj9FgO8DXaCjqlS3wOpelcxzk5JtaPIr5n5uWrcnJMwoI2tDhWbUUWX6OtoFO6xOdQmnLCLMdlZdm/en/x2FGwHEcsx2dl6XxvnUBa4IKOp3FeByfk5ajW5PGJ/2gzn7Eaq7Eaq7Eaq7Eaq7Eaq7Eaq7Eaq7Eaq7Eaq7Eaq7Eaq7Eaq7Eaq7Eaq7Eaq7HmZ3XUd8wYs3hiOFaNZf/q37lKtRV8Ll3icyhN74lwTk6M9eOJ71l5+aqcnJiwoA0tjlVbkcXXaCvolC7xOZSmnDDLyVlZyuo9opNGwXIysZySlaXzHtGppAUu6Hga53Vwal6Oak2ekviPNvMZq7Eaq7Eaq7Eaq7Eaq7Eaq7Eaq7Eaq7Eaq7Eaq7GOF1ZHfSeOMQs/iz9JjaWsfodJtRV8Ll3icyhNz9k5J6fF+inEd3pevionpyUsaEOLY9VWZPE12go6pUt8DqUpJ8xyZl6W6t82OWMULGcSy1l5WQbDHGeTFrig42mc18HZeTmqNXlW4j/azGesE5vVUd9pY8zC564z9Fiqf4ck1VbQKV3icyhN5yXOyTmxfhbxPTsvX5WTcxIWtKHFsWorsvgabQWd0iU+h9KUE2Y5LytL529InzsKlvOI5fysLJ3vrQtIC1zQ8TTO6+CCvBzVmjw/8R9t5jNWYzVWYzVWYzVWYzXWic3qqO+cMWbhe5lz1Vg6f+851VbwuXSJz6E03adwTi6M9fOJ7zl5+aqcXJiwoA0tjlVbkcXXaCvolC7xOZSmnDDL6qwsnfvYi0bBsppYLs7K0rmPXUNaF8UtdDyN8zpYk5ejWpMXJ/6jzXzGaqzGaqzGaqzGaqzGOrFZHfVdOMYsfC9zkRpL5z421VbwuXSJz6E03adwTtbG+sXEty4vX5WTtQkL2tDiWLUVWXyNtoJO6RKfQ2nKCbNcqsByyShYLiWWy/KyVPexl5MWuKDjaZzXweV5Oao1eVniP9rMN15YHfWtHWMWPsYu0WMpfY22ho5LfA6l6fjhnFwR65cR33Pz8lU5uSJhQRtaHKu2Iouv0VbQKV3icyhNOWGWDQos60fBsoFYNuZlqc6vm0gLXNDxNM7rYFNejmpNbkz8R5v5xguro74rxpiFj7H1eizV+TXV1tBxic+hNB0/nJMrY30j8T0vL1+VkysTFrShxbFqK7L4Gm0FndIlPofSlBPoh/02x/qVxLclcxwK0sG8aEOLY9VWZPE12go6pUt8DqUpJ9AP+22N9c3Ety1zHArSwbxoQ4tj5RRZfI22gk7pEp9DacrJNvL9qljfSnxXZ45DQTqYF21ocaz6FVl8jbaCTukSn0Npygn0w37XxPpVxPf8zHEoSAfzog0tjpVXZPE12go6pUt8DqUpJ9AP+10b69cQ33WZ41CQDuZF+zrKAximKbL4Gm0FndIlPofSlBPoh/2uj/Vrie8FmeNQkA7mRRtaHKvpiiy+RltBp3SJz6E05QT6Yb8bYv164nth5jgUpIN50YYWx2qGIouv0VbQKV3icyhNOYF+2O/GWL+B+F6UOQ4F6WBetKHFsZqpyOJrtBV0So4tSlNOoB/2uynWbyS+F2eOQ0E6mBdtaHGs2oosvkZbQad0ic+hNOUE+mG/m2P9JuJ7SeY4FKSDedGGFseqrcjia7QVdEqX+BxKU06gH/a7JdZvJr6XZo5DQTqYF21ocazaiiy+RltBp3SJz6E05QT6Yb9bY/0W4ntZ5jgUpIN50YYWx6qtyOJrtBV0Spf4HEpTTqAf9rst1m8lvpdnjkNBOpgXbWhxrNqKLL5GW0GndInPoTTlhFluz8tSvQt+R5zrNtJ5RebYFqSDedGGFsf/dkUWX6OtoFO6xOdQmvIM/bDfnbF+B/G9MnMcCtLBvGhDi2N1uyKLr9FW0Cld4nMoTTmBftjvrli/k/helTkOBelgXrShxbG6XZHF12gr6JQu8TmUppxAP+x3d6zfRXyvzhyHgnQwL9rQ4li1FVl8jbaCTukSn0Npygn0w373xPrdxPeazHEoSAfzog0tjlVbkcXXaCvolC7xOZSmnEA/7HdvrN9DfK/NHIeCdDAv2tDiWLUVWXyNtoJO6RKfQ2nKCfTDfvfF+r3E97rMcShIB/OiDS2OVVuRxddoK+iULvE5lKacQD/sd3+s30d8r88ch4J0MC/a0OJYtRVZfI22gk7pEp9DacoJ9MN+D8T6/cT3hsxxKEgH86INLY5VW5HF12gr6JQu8TmUppxAP+z3YKw/QHwPZY5DQTqYF21ocazaiiy+RltBp+TYojTl5KG4Dfs9HOsPEt8bM8ehIB3Miza0OFZtRRZfo62gU7rE51CacgL9sN+bYv1h4vuDzHEoSAfzog0tjlVbkcXXaCvolC7xOZSmnEA/7PfmWH8T8f1h5jgUpIN50YYWx6qtyOJrtBV0Spf4HEpTTqAf9ntLrL+Z+P4ocxwK0sG8aEOLY9VWZPE12go6pUt8DqUpJ9AP+7011t9CfH+cOQ4F6WBetKHFsWorsvgabQWd0iU+h9KUE+iH/d4W628lvj/JHIeCdDAv2tDiWLUVWXyNtoJO6RKfQ2nKCfTDfm+P9bcR359mjkNBOpgXbWhxrNqKLL5GW0GndInPoTTlBPphv3fE+tuJ788yx6EgHcyLNrQ4Vm1FFl+jraBTusTnUJpyAv2w3ztj/R3E9+eZ41CQDuZFG1ocq7Yii6/RVtApXeJzKE05YZZ352Wp/rbDu0bB8m5ieU9elur/GXwvaYELOp7GeR28Ny9HtSbfk/iPNvMZ68RmddT3zjFm4XPXu/RYqr/tkGor6JQu8TmUpvMS5+R9sf4e4vuLvHxVTt6XsKANLY5VW5HF12gr6JQu8TmUppwwyweyspTVe1DvHwXLB4jlg1lZOt9bHyItcEHH0zivgw/l5ajW5AcT/9FmPmM1VmM1VmM1VmM1VmM1VmM1VmM1VmM1VmM1VmM11vHC6qjvfWPMws/i36/GUlb/pnuqreBz6RKfQ2l6zs45+XCsf5D4/jIvX5WTDycsaEOLY9VWZPE12go6pUt8DqUpJ8zy0awsnb9N9pFRsHyUWD6WlaXzO8zHSQtc0PE0zuvg43k5qjX5scR/tJnPWI3VWI3VWI3VWI3VWCc2q6O+D48xC9/LfESNpfO3yVJtBZ9Ll/gcSpG0h6nOOflErH+M+P4qL1+Vk08kLGhDi2PVVmTxNdoKOqVLfA6lKSfM8qmsLJ372E+OguVTxPLprCyd+9jPkBa4oONpnNfBZ/JyVGvy04n/aDOfsRqrsRqrsRqrsRqrsU5sVkd9nxhjFr6X+aQaS+c+NtVW8Ll0ic+hNN2ncE4+G+ufJr6/zstX5eSzCQva0OJYtRVZfI22gk7pEp9DacoJs3w+K0vnPvZzo2D5PLF8IStL5z72i6QFLuh4Gud18MW8HNWa/ELiP9rMZ6zGaqzGaqzGaqzGaqwTm9VR32fHmIXvZT6nxtK5j021FXwuXeJzKE33KZyTL8X6F4jvb/LyVTn5UsKCNrQ4Vm1FFl+jraBTusTnUJpywixfyctS/btkXx4Fy1eI5at5War72K+RFrig42mc18HX8nJUa/Krif9oM5+xTmxWR31fGmMWPnd9WY+l+nfJUm0FndIlPofSdF7inHw91r9KfH+bl6/KydcTFrShxbFqK7L4Gm0FndIlPofSlBNm+WZelup76xujYPkmsXwrL0v1vfVt0gIXdDyN8zr4dl6Oak1+K/EfbeYz1onN6qjv62PMwueub+ixVN9bqbaCTukSn0NpOi9xTr4T698ivr/Ly1fl5DsJC9rQ4li1FVl8jbaCTukSn0NpygmzfE+B5bujYPkesXw/L0v1vfUIaYELOp7GeR08kpejWpPfT/xH+xHqHy+sjvq+M8YsfIx9V4+l9DXaGjocW5Sm4+eRuA37/SDWv098f5+Xr8rJDxIWtKHFsWorsvgabQWd0iU+h9KUE+iH/X4Y6z8gvn/IHIeCdDAv2tDiWLUVWXyNtoJO6RKfQ2nKCfTDfj+K9R8S3z9mjkNBOpgXbWhxrNqKLL5GW0GndInPoTTlBPphvx/H+o+I758yx6EgHcyLNrQ4Vm1FFl+jraBTusTnUJpyAv2w309i/cfE98+Z41CQDuZFG1ocq7Yii6/RVtApXeJzKE05gX7Y76ex/hPi+5fMcShIB/OiDS2OVVuRxddoK+iULvE5lKacQD/s97NY/ynx/WvmOBSkg3nRhhbHqq3I4mu0FXRKl/gcSlNOoB/2+7dY/xnx/XvmOBSkg3nRhhbHqq3I4mu0FXRKl/gcSlNOmOU/8rIMhjl/njmOYY5fED98Bbun8Z+Tb7/IzFGQJuZFm/meKOvsMWbVyv8v885ZrW+OaShN6/uX5N9/ZmaZJnP0tbbnFfOnse0hnv+iOsZx3ukXe7Tmc6hPonE+Vz2W169qvfx3nKuI9ljCEnT/R0EXOpOjLjig1Uufebivs53e6nynoH9XYvx1Zsag86vWyNK0/n5NLL/Jy1I9P32ctMAFHU/jjxHH45ljUpAm5kX7cWJB6dGLyVBTLn5Tw/JYF7E82kUsM7uIZVoXsbguYpnSRSy9XcQyvYtY+ruIpa+LWCZ1Ecu8LmKZ20Usc7qIZUYXsfguYpnaRSyTu4ilGGMW19rxvsDR+KPUh2vkX1FfT818+I7F50O8r/M7zqPtO+sMUxta/cTweBewTO4ilqldxOK7iGVGF7HM6SKWuV3EMq+LWCZ1EUtfF7H0dxHL9C5i6e0ilildxOK6iGVaF7HM7CKWR7uI5bEuYumpYfltXpYD+Jq9RUxchqn+W2LBBzOxVL+nFZnnDHP0kEO/SZz0NA7tKdSXiaNzT1UkMY1t5jPWic2aX3f/6t9SSw/qpmOcWXoVjsdJxY4gvZSLSTW5mKSQi94kF2gzn7Eaq7Eaq7Eaq7Eaq7Eaq7Eaq7Eaq7Eaq7Eaq7Eaq7Eaq7Eaq7Eaq7Eaq7Eaq7HmZ82vW1bvR7AuhWAESxqrwDI5bwyq9yOmkDi4JlMuptTkYopCLiYnuUCb+YzVWI3VWI3VWI3VWI3VWI3VWI3VWI3VWI3VWI3VWMcLq4Ju9f/Ysm7l58jmiOfLzNKXl6V6vjyVxMHVR7mYWpOLqQq56EtygTbzGevEZs2vO1T93tM3iuORWZzC8dhP4uBylIv+mlz0K+TCJblAm/mM1ViN1ViN1ViN1ViNdWKz5tftXJ+zbuXnyOaI63Nm8XljUF2fTyPx3+WAcjGtJhfTFHLhk1ygzXzGaqzGaqzGaqzGaqzGOrFZFXSrv1HIupWfI5sjrs+ZZXpelur6fAaJg2s65WJGTS5mKORiepILtJlvvLAq6FbrZvoo1g2zzFRYN7NIHFwzKRezanIxSyEXM5NcoM1844XVUV9Pa3sfxnupb6fYN4n6do59k6lvF/IJfbNjXx/17Rr7plLfnNg3l/rmxj7+2zPz4Bf5MR/M1LcAfNS3G1iobyHq1Lco+Z4IfYuTc0DoW5LkMvQNUB3bpbGvn/qW0ZpA3+74TqC+5dClvhU4B1DfSqw16ltVw4dcz6Q+5JrXBnK9E/Uh1ztTH3K9C/Uh17OpD7nelfoQoznUhxjNpT7EaB71IUbzqQ8xWkB9iNFuvO5j30LqmxX7FlHfTrFvMfXtHPuWUN8usY9zj7+jvJT68Pdgl1Ef/sbL7tSHY2A59eFvnqzgdR/7VvK6j32r6LgLOVoS+4fjdvDJleo7YqA1sjR9R0A/sCzOy1I9h1wY5xognd3y6lTn1YWJf2hDyxPDYkUWX6OdX6ccZJ97om9zEl1edwuJZVlen8vAMp/mHyBdaPXSZ26IJ5NwLlvut++Xef1VxwLWN0rTsaC3LsoRx8ITYeG1syArS+fvRMzPPGeYYxnxw1ewexrntZJ5LVbnggWtkTFN16KxGquxGquxGquxGquxGquxGquxGquxGquxGquxGut4YuVn/UvGmMUTg97vDuWgr9HWeF7Ov/Nh7vDbxUz67WJhZt9CnPn3iAFigFYvfeYIv51rl1jvp3FeE5lZ1yjEvPrtBr+BoDT9XrKA/Mv720Y5GH7T7qNYzk/yAM4eYtg9K0Pn95Xl5C80oRNyjWMAn3NUHyC2FXnZqnMCsw1TG1r8++syRRZfo82/M4Y44VhYSrFbXBOnvL8BDj2p3wBz/15XJHkYJg3WzbyOh1i3iAYN9PdS/UC8ZEKfCwU5BHPI4YKaz3F9YbKPp/EFyj7PJ45hakMrrNG9yNcFNdxziBvj/B2R+RxccfM5eIAYoMXvuywlFo3rjv8rhnzdsTSJWX6Woeq6I9Xmd4YQs5DTh4lD8xqhJ8nPQuICDz6bXsMN0Gfm0X7wUfHYKNPz0EBrx2ODzwfH03XO1f/PdU7md3tKfucmZeVzpuY1d3jXjzn4nInPnBrjMj3yLM8ch+qdyNbI0vQ9tpxik/n7vrouWkla4OLrDozPJo6VeTmqHK1I/Eeb+Z4o66IuYF1Rw1r3fbtCjbXz3hRzhNK01lYSyx5ZWTrXTHvS/MOkwbp75dUdYl1cM0ED/b1U30jXEXttr/7uHAnmkMNVsc6f4/qKZB9P49hXy+c9iGOY2tAK3wFryNdVNdzziBvjfO2CvPH3xioFX1YmvqxMmPmZyQo1ls4zk1Sbv0vzX6d0/F/c2l4GWjs+y+BnLIuJi5+xTM7Mxe/TozSdW6Af3pnHe+5btm7avPrSdWdvvnzruoLmmJTM10Pz9NBYb/K5vtaODNkcnk1iPVF8UnRsShQPzuGF/+p/xGt1LiTCC/vhBf3wQn54AT+8cB9esJ9NnHfEbXihPlyIhhfmQ1LDl0Y46MJFUkh4+HILSQ4LfqDVuWgPFzLhSyZcLIQDICzSsEDDQR9ObOGg31vsKWL7iO0rtl+IidiQWCm2v9gBYgeKPVXsILGniR0s9nSxQ8QOFTtM7Blih8fYHiF2pNhRYm2xo8WOEXum2LFix4kdL3aC2IlizxI7SexksVPEThU7Tex0sTPEzhQ7S+xssXPEni12rth5YueLXSB2odhzxC4SWy12sdgasbVi68QuEbtU7DKxy8WuEHuu2HqxDWIbxTaJXSn2PLHNYlvEtoptE7tK7Gqxa8SeL3at2HVi14u9QOwGsReK3Sj2IrGbxF4sdrPYS8RuEXup2K1iLxO7TezlYre3Onl+hdidYq8Uu0vsVWJ3i71a7B6x14jdK/ZasfvEXid2v9jrxR4Qe4PYg2IPtTo3SXxQoDwU/4+VQ2P79M7BNrBl/aatA4MDG+W/q9ev33T1urX7DvDYloEN27ZsHdiydfXmrQOXbN60YWBoX573lHhSwf8gctzGteuuGdi0bevApksGLt60bePaLf8LdTCcQzAHAgA=", "verificationKey": "0000000200000800000000740000000f00000003515f3109623eb3c25aa5b16a1a79fd558bac7a7ce62c4560a8c537c77ce80dd339128d1d37b6582ee9e6df9567efb64313471dfa18f520f9ce53161b50dbf7731bc5f900000003515f322bc4cce83a486a92c92fd59bd84e0f92595baa639fc2ed86b00ffa0dfded2a092a669a3bdb7a273a015eda494457cc7ed5236f26cee330c290d45a33b9daa94800000003515f332729426c008c085a81bd34d8ef12dd31e80130339ef99d50013a89e4558eee6d0fa4ffe2ee7b7b62eb92608b2251ac31396a718f9b34978888789042b790a30100000003515f342be6b6824a913eb7a57b03cb1ee7bfb4de02f2f65fe8a4e97baa7766ddb353a82a8a25c49dc63778cd9fe96173f12a2bc77f3682f4c4448f98f1df82c75234a100000003515f351f85760d6ab567465aadc2f180af9eae3800e6958fec96aef53fd8a7b195d7c000c6267a0dd5cfc22b3fe804f53e266069c0e36f51885baec1e7e67650c62e170000000c515f41524954484d455449430d9d0f8ece2aa12012fa21e6e5c859e97bd5704e5c122064a66051294bc5e04213f61f54a0ebdf6fee4d4a6ecf693478191de0c2899bcd8e86a636c8d3eff43400000003515f43224a99d02c86336737c8dd5b746c40d2be6aead8393889a76a18d664029096e90f7fe81adcc92a74350eada9622ac453f49ebac24a066a1f83b394df54dfa0130000000c515f46495845445f42415345060e8a013ed289c2f9fd7473b04f6594b138ddb4b4cf6b901622a14088f04b8d2c83ff74fce56e3d5573b99c7b26d85d5046ce0c6559506acb7a675e7713eb3a00000007515f4c4f4749430721a91cb8da4b917e054f72147e1760cfe0ef3d45090ac0f4961d84ec1996961a25e787b26bd8b50b1a99450f77a424a83513c2b33af268cd253b0587ff50c700000003515f4d05dbd8623b8652511e1eb38d38887a69eceb082f807514f09e127237c5213b401b9325b48c6c225968002318095f89d0ef9cf629b2b7f0172e03bc39aacf6ed800000007515f52414e474504b57a3805e41df328f5ca9aefa40fad5917391543b7b65c6476e60b8f72e9ad07c92f3b3e11c8feae96dedc4b14a6226ef3201244f37cfc1ee5b96781f48d2b000000075349474d415f3125001d1954a18571eaa007144c5a567bb0d2be4def08a8be918b8c05e3b27d312c59ed41e09e144eab5de77ca89a2fd783be702a47c951d3112e3de02ce6e47c000000075349474d415f3223994e6a23618e60fa01c449a7ab88378709197e186d48d604bfb6931ffb15ad11c5ec7a0700570f80088fd5198ab5d5c227f2ad2a455a6edeec024156bb7beb000000075349474d415f3300cda5845f23468a13275d18bddae27c6bb189cf9aa95b6a03a0cb6688c7e8d829639b45cf8607c525cc400b55ebf90205f2f378626dc3406cc59b2d1b474fba000000075349474d415f342d299e7928496ea2d37f10b43afd6a80c90a33b483090d18069ffa275eedb2fc2f82121e8de43dc036d99b478b6227ceef34248939987a19011f065d8b5cef5c0000000010000000000000000100000002000000030000000400000005000000060000000700000008000000090000000a0000000b0000000c0000000d0000000e0000000f" } - ] + ], + "events": [] } diff --git a/yarn-project/aztec.js/src/contract/checker.test.ts b/yarn-project/aztec.js/src/contract/checker.test.ts index b4ae515188c..7214f924f86 100644 --- a/yarn-project/aztec.js/src/contract/checker.test.ts +++ b/yarn-project/aztec.js/src/contract/checker.test.ts @@ -7,12 +7,12 @@ describe('abiChecker', () => { abi = { name: 'TEST_ABI', }; - expect(() => abiChecker(abi)).toThrowError('ABI has no functions'); + expect(() => abiChecker(abi)).toThrowError('artifact has no functions'); abi = { name: 'TEST_ABI', functions: [], }; - expect(() => abiChecker(abi)).toThrowError('ABI has no functions'); + expect(() => abiChecker(abi)).toThrowError('artifact has no functions'); }); it('should error if ABI has no names', () => { diff --git a/yarn-project/aztec.js/src/contract/checker.ts b/yarn-project/aztec.js/src/contract/checker.ts index b90c3845845..616b4840ae7 100644 --- a/yarn-project/aztec.js/src/contract/checker.ts +++ b/yarn-project/aztec.js/src/contract/checker.ts @@ -1,4 +1,4 @@ -import { ABIType, BasicType, ContractAbi, StructType } from '@aztec/foundation/abi'; +import { ABIType, BasicType, ContractArtifact, StructType } from '@aztec/foundation/abi'; /** * Represents a type derived from input type T with the 'kind' property removed. @@ -7,19 +7,19 @@ import { ABIType, BasicType, ContractAbi, StructType } from '@aztec/foundation/a type TypeWithoutKind = Omit<{ [key in keyof T]: any }, 'kind'>; /** - * Validates the given ContractAbi object by checking its functions and their parameters. + * Validates the given ContractArtifact object by checking its functions and their parameters. * Ensures that the ABI has at least one function, a constructor, valid bytecode, and correct parameter types. * Throws an error if any inconsistency is detected during the validation process. * - * @param abi - The ContractAbi object to be validated. - * @returns A boolean value indicating whether the ABI is valid or not. + * @param artifact - The ContractArtifact object to be validated. + * @returns A boolean value indicating whether the artifact is valid or not. */ -export function abiChecker(abi: ContractAbi) { - if (!abi.functions || abi.functions.length === 0) { - throw new Error('ABI has no functions'); +export function abiChecker(artifact: ContractArtifact) { + if (!artifact.functions || artifact.functions.length === 0) { + throw new Error('artifact has no functions'); } - abi.functions.forEach(func => { + artifact.functions.forEach(func => { if (!('name' in func && typeof func.name === 'string' && func.name.length > 0)) { throw new Error('ABI function has no name'); } @@ -39,7 +39,7 @@ export function abiChecker(abi: ContractAbi) { }); // TODO: implement a better check for constructor (right now only checks if it has it or not) - if (!abi.functions.find(func => func.name === 'constructor')) { + if (!artifact.functions.find(func => func.name === 'constructor')) { throw new Error('ABI has no constructor'); } diff --git a/yarn-project/aztec.js/src/contract/contract.test.ts b/yarn-project/aztec.js/src/contract/contract.test.ts index 541a9c588b7..4852169b3a5 100644 --- a/yarn-project/aztec.js/src/contract/contract.test.ts +++ b/yarn-project/aztec.js/src/contract/contract.test.ts @@ -1,6 +1,6 @@ import { AztecAddress, CompleteAddress, EthAddress } from '@aztec/circuits.js'; import { L1ContractAddresses } from '@aztec/ethereum'; -import { ABIParameterVisibility, ContractAbi, FunctionType } from '@aztec/foundation/abi'; +import { ABIParameterVisibility, ContractArtifact, FunctionType } from '@aztec/foundation/abi'; import { ExtendedContractData, NodeInfo, Tx, TxExecutionRequest, TxHash, TxReceipt } from '@aztec/types'; import { MockProxy, mock } from 'jest-mock-extended'; @@ -35,7 +35,7 @@ describe('Contract Class', () => { l1ContractAddresses: l1Addresses, }; - const defaultAbi: ContractAbi = { + const defaultArtifact: ContractArtifact = { name: 'FooContract', functions: [ { @@ -92,6 +92,7 @@ describe('Contract Class', () => { bytecode: '0cd', }, ], + events: [], }; beforeEach(async () => { @@ -111,7 +112,7 @@ describe('Contract Class', () => { }); it('should create and send a contract method tx', async () => { - const fooContract = await Contract.at(contractAddress, defaultAbi, wallet); + const fooContract = await Contract.at(contractAddress, defaultArtifact, wallet); const param0 = 12; const param1 = 345n; const sentTx = fooContract.methods.bar(param0, param1).send(); @@ -126,7 +127,7 @@ describe('Contract Class', () => { }); it('should call view on an unconstrained function', async () => { - const fooContract = await Contract.at(contractAddress, defaultAbi, wallet); + const fooContract = await Contract.at(contractAddress, defaultArtifact, wallet); const result = await fooContract.methods.qux(123n).view({ from: account.address, }); @@ -136,12 +137,12 @@ describe('Contract Class', () => { }); it('should not call create on an unconstrained function', async () => { - const fooContract = await Contract.at(contractAddress, defaultAbi, wallet); + const fooContract = await Contract.at(contractAddress, defaultArtifact, wallet); await expect(fooContract.methods.qux().create()).rejects.toThrow(); }); it('should not call view on a secret or open function', async () => { - const fooContract = await Contract.at(contractAddress, defaultAbi, wallet); + const fooContract = await Contract.at(contractAddress, defaultArtifact, wallet); expect(() => fooContract.methods.bar().view()).toThrow(); expect(() => fooContract.methods.baz().view()).toThrow(); }); diff --git a/yarn-project/aztec.js/src/contract/contract.ts b/yarn-project/aztec.js/src/contract/contract.ts index 9249184c491..47dc389c7eb 100644 --- a/yarn-project/aztec.js/src/contract/contract.ts +++ b/yarn-project/aztec.js/src/contract/contract.ts @@ -1,4 +1,4 @@ -import { ContractAbi } from '@aztec/foundation/abi'; +import { ContractArtifact } from '@aztec/foundation/abi'; import { AztecAddress } from '@aztec/foundation/aztec-address'; import { PublicKey } from '@aztec/types'; @@ -17,19 +17,19 @@ export class Contract extends ContractBase { /** * Creates a contract instance. * @param address - The deployed contract's address. - * @param abi - The Application Binary Interface for the contract. + * @param artifact - Build artifact of the contract. * @param wallet - The wallet to use when interacting with the contract. * @param portalContract - The portal contract address on L1, if any. * @returns A promise that resolves to a new Contract instance. */ - public static async at(address: AztecAddress, abi: ContractAbi, wallet: Wallet): Promise { + public static async at(address: AztecAddress, artifact: ContractArtifact, wallet: Wallet): Promise { const extendedContractData = await wallet.getExtendedContractData(address); if (extendedContractData === undefined) { throw new Error('Contract ' + address.toString() + ' is not deployed'); } return new Contract( extendedContractData.getCompleteAddress(), - abi, + artifact, wallet, extendedContractData.contractData.portalContractAddress, ); @@ -38,21 +38,21 @@ export class Contract extends ContractBase { /** * Creates a tx to deploy a new instance of a contract. * @param wallet - The wallet for executing the deployment. - * @param abi - ABI of the contract to deploy. + * @param artifact - Build artifact of the contract to deploy * @param args - Arguments for the constructor. */ - public static deploy(wallet: Wallet, abi: ContractAbi, args: any[]) { - return new DeployMethod(Point.ZERO, wallet, abi, args); + public static deploy(wallet: Wallet, artifact: ContractArtifact, args: any[]) { + return new DeployMethod(Point.ZERO, wallet, artifact, args); } /** * Creates a tx to deploy a new instance of a contract using the specified public key to derive the address. * @param publicKey - Public key for deriving the address. * @param wallet - The wallet for executing the deployment. - * @param abi - ABI of the contract to deploy. + * @param artifact - Build artifact of the contract. * @param args - Arguments for the constructor. */ - public static deployWithPublicKey(publicKey: PublicKey, wallet: Wallet, abi: ContractAbi, args: any[]) { - return new DeployMethod(publicKey, wallet, abi, args); + public static deployWithPublicKey(publicKey: PublicKey, wallet: Wallet, artifact: ContractArtifact, args: any[]) { + return new DeployMethod(publicKey, wallet, artifact, args); } } diff --git a/yarn-project/aztec.js/src/contract/contract_base.ts b/yarn-project/aztec.js/src/contract/contract_base.ts index 4ac143fc47a..584529573d8 100644 --- a/yarn-project/aztec.js/src/contract/contract_base.ts +++ b/yarn-project/aztec.js/src/contract/contract_base.ts @@ -1,4 +1,4 @@ -import { ContractAbi, FunctionAbi, FunctionSelector } from '@aztec/foundation/abi'; +import { ContractArtifact, FunctionArtifact, FunctionSelector } from '@aztec/foundation/abi'; import { EthAddress } from '@aztec/foundation/eth-address'; import { CompleteAddress, DeployedContract } from '@aztec/types'; @@ -29,13 +29,13 @@ export class ContractBase implements DeployedContract { /** The deployed contract's complete address. */ public readonly completeAddress: CompleteAddress, /** The Application Binary Interface for the contract. */ - public readonly abi: ContractAbi, + public readonly artifact: ContractArtifact, /** The wallet used for interacting with this contract. */ protected wallet: Wallet, /** The portal contract address on L1, if any. */ public readonly portalContract: EthAddress, ) { - abi.functions.forEach((f: FunctionAbi) => { + artifact.functions.forEach((f: FunctionArtifact) => { const interactionFunction = (...args: any[]) => { return new ContractFunctionInteraction(this.wallet, this.completeAddress.address!, f, args); }; @@ -65,6 +65,6 @@ export class ContractBase implements DeployedContract { * @returns A new contract instance. */ public withWallet(wallet: Wallet): this { - return new ContractBase(this.completeAddress, this.abi, wallet, this.portalContract) as this; + return new ContractBase(this.completeAddress, this.artifact, wallet, this.portalContract) as this; } } diff --git a/yarn-project/aztec.js/src/contract/contract_function_interaction.ts b/yarn-project/aztec.js/src/contract/contract_function_interaction.ts index e8a58728fe4..da6932b1b91 100644 --- a/yarn-project/aztec.js/src/contract/contract_function_interaction.ts +++ b/yarn-project/aztec.js/src/contract/contract_function_interaction.ts @@ -1,5 +1,5 @@ import { AztecAddress, FunctionData } from '@aztec/circuits.js'; -import { FunctionAbiHeader, FunctionType, encodeArguments } from '@aztec/foundation/abi'; +import { FunctionAbi, FunctionType, encodeArguments } from '@aztec/foundation/abi'; import { FunctionCall, TxExecutionRequest } from '@aztec/types'; import { Wallet } from '../wallet/index.js'; @@ -26,7 +26,7 @@ export class ContractFunctionInteraction extends BaseContractInteraction { constructor( protected wallet: Wallet, protected contractAddress: AztecAddress, - protected functionDao: FunctionAbiHeader, + protected functionDao: FunctionAbi, protected args: any[], ) { super(wallet); diff --git a/yarn-project/aztec.js/src/contract/index.ts b/yarn-project/aztec.js/src/contract/index.ts index 620c4d1e968..55c7329a978 100644 --- a/yarn-project/aztec.js/src/contract/index.ts +++ b/yarn-project/aztec.js/src/contract/index.ts @@ -10,12 +10,12 @@ * or can be queried via `view()`. * * ```ts - * const contract = await Contract.deploy(wallet, MyContractAbi, [...constructorArgs]).send().deployed(); + * const contract = await Contract.deploy(wallet, MyContractArtifact, [...constructorArgs]).send().deployed(); * console.log(`Contract deployed at ${contract.address}`); * ``` * * ```ts - * const contract = await Contract.at(address, MyContractAbi, wallet); + * const contract = await Contract.at(address, MyContractArtifact, wallet); * await contract.methods.mint(1000, owner).send().wait(); * console.log(`Total supply is now ${await contract.methods.totalSupply().view()}`); * ``` diff --git a/yarn-project/aztec.js/src/contract_deployer/contract_deployer.test.ts b/yarn-project/aztec.js/src/contract_deployer/contract_deployer.test.ts index a95e6c45b97..8622f29e6f7 100644 --- a/yarn-project/aztec.js/src/contract_deployer/contract_deployer.test.ts +++ b/yarn-project/aztec.js/src/contract_deployer/contract_deployer.test.ts @@ -1,5 +1,5 @@ import { EthAddress, Fr, Point } from '@aztec/circuits.js'; -import { ContractAbi, FunctionType } from '@aztec/foundation/abi'; +import { ContractArtifact, FunctionType } from '@aztec/foundation/abi'; import { PXE, PublicKey, Tx, TxHash, TxReceipt } from '@aztec/types'; import { MockProxy, mock } from 'jest-mock-extended'; @@ -9,7 +9,7 @@ import { ContractDeployer } from './contract_deployer.js'; describe.skip('Contract Deployer', () => { let pxe: MockProxy; - const abi: ContractAbi = { + const artifact: ContractArtifact = { name: 'MyContract', functions: [ { @@ -21,6 +21,7 @@ describe.skip('Contract Deployer', () => { bytecode: '0af', }, ], + events: [], }; const publicKey: PublicKey = Point.random(); @@ -39,7 +40,7 @@ describe.skip('Contract Deployer', () => { }); it('should create and send a contract deployment tx', async () => { - const deployer = new ContractDeployer(abi, pxe, publicKey); + const deployer = new ContractDeployer(artifact, pxe, publicKey); const sentTx = deployer.deploy(args[0], args[1]).send({ portalContract, contractAddressSalt, diff --git a/yarn-project/aztec.js/src/contract_deployer/contract_deployer.ts b/yarn-project/aztec.js/src/contract_deployer/contract_deployer.ts index 92ed8e23fc6..3bc720d72be 100644 --- a/yarn-project/aztec.js/src/contract_deployer/contract_deployer.ts +++ b/yarn-project/aztec.js/src/contract_deployer/contract_deployer.ts @@ -1,5 +1,5 @@ -import { Point } from '@aztec/circuits.js'; -import { ContractAbi } from '@aztec/foundation/abi'; +import { ContractArtifact } from '@aztec/foundation/abi'; +import { Point } from '@aztec/foundation/fields'; import { PXE, PublicKey } from '@aztec/types'; import { DeployMethod } from './deploy_method.js'; @@ -9,7 +9,7 @@ import { DeployMethod } from './deploy_method.js'; * @remarks Keeping this around even though we have Aztec.nr contract types because it can be useful for non-TS users. */ export class ContractDeployer { - constructor(private abi: ContractAbi, private pxe: PXE, private publicKey?: PublicKey) {} + constructor(private artifact: ContractArtifact, private pxe: PXE, private publicKey?: PublicKey) {} /** * Deploy a contract using the provided ABI and constructor arguments. @@ -21,6 +21,6 @@ export class ContractDeployer { * @returns A DeployMethod instance configured with the ABI, PXE, and constructor arguments. */ public deploy(...args: any[]) { - return new DeployMethod(this.publicKey ?? Point.ZERO, this.pxe, this.abi, args); + return new DeployMethod(this.publicKey ?? Point.ZERO, this.pxe, this.artifact, args); } } diff --git a/yarn-project/aztec.js/src/contract_deployer/deploy_method.ts b/yarn-project/aztec.js/src/contract_deployer/deploy_method.ts index 2cf6dbd9ad6..f6e1991e632 100644 --- a/yarn-project/aztec.js/src/contract_deployer/deploy_method.ts +++ b/yarn-project/aztec.js/src/contract_deployer/deploy_method.ts @@ -5,7 +5,7 @@ import { TxContext, getContractDeploymentInfo, } from '@aztec/circuits.js'; -import { ContractAbi, FunctionAbi, encodeArguments } from '@aztec/foundation/abi'; +import { ContractArtifact, FunctionArtifact, encodeArguments } from '@aztec/foundation/abi'; import { EthAddress } from '@aztec/foundation/eth-address'; import { Fr } from '@aztec/foundation/fields'; import { PXE, PackedArguments, PublicKey, Tx, TxExecutionRequest } from '@aztec/types'; @@ -38,13 +38,18 @@ export class DeployMethod extends Bas public completeAddress?: CompleteAddress = undefined; /** Constructor function to call. */ - private constructorAbi: FunctionAbi; - - constructor(private publicKey: PublicKey, protected pxe: PXE, private abi: ContractAbi, private args: any[] = []) { + private constructorArtifact: FunctionArtifact; + + constructor( + private publicKey: PublicKey, + protected pxe: PXE, + private artifact: ContractArtifact, + private args: any[] = [], + ) { super(pxe); - const constructorAbi = abi.functions.find(f => f.name === 'constructor'); - if (!constructorAbi) throw new Error('Cannot find constructor in the ABI.'); - this.constructorAbi = constructorAbi; + const constructorArtifact = artifact.functions.find(f => f.name === 'constructor'); + if (!constructorArtifact) throw new Error('Cannot find constructor in the artifact.'); + this.constructorArtifact = constructorArtifact; } /** @@ -63,7 +68,7 @@ export class DeployMethod extends Bas const { chainId, protocolVersion } = await this.pxe.getNodeInfo(); const { completeAddress, constructorHash, functionTreeRoot } = await getContractDeploymentInfo( - this.abi, + this.artifact, this.args, contractAddressSalt, this.publicKey, @@ -85,8 +90,8 @@ export class DeployMethod extends Bas new Fr(chainId), new Fr(protocolVersion), ); - const args = encodeArguments(this.constructorAbi, this.args); - const functionData = FunctionData.fromAbi(this.constructorAbi); + const args = encodeArguments(this.constructorArtifact, this.args); + const functionData = FunctionData.fromAbi(this.constructorArtifact); const execution = { args, functionData, to: completeAddress.address }; const packedArguments = await PackedArguments.fromArgs(execution.args); @@ -103,7 +108,7 @@ export class DeployMethod extends Bas this.completeAddress = completeAddress; // TODO: Should we add the contracts to the DB here, or once the tx has been sent or mined? - await this.pxe.addContracts([{ abi: this.abi, completeAddress, portalContract }]); + await this.pxe.addContracts([{ artifact: this.artifact, completeAddress, portalContract }]); return this.txRequest; } @@ -118,7 +123,7 @@ export class DeployMethod extends Bas */ public send(options: DeployOptions = {}): DeploySentTx { const txHashPromise = super.send(options).getTxHash(); - return new DeploySentTx(this.abi, this.pxe, txHashPromise); + return new DeploySentTx(this.artifact, this.pxe, txHashPromise); } /** diff --git a/yarn-project/aztec.js/src/contract_deployer/deploy_sent_tx.ts b/yarn-project/aztec.js/src/contract_deployer/deploy_sent_tx.ts index 8b0fc0a2a0f..4441c831bed 100644 --- a/yarn-project/aztec.js/src/contract_deployer/deploy_sent_tx.ts +++ b/yarn-project/aztec.js/src/contract_deployer/deploy_sent_tx.ts @@ -1,5 +1,5 @@ import { FieldsOf } from '@aztec/circuits.js'; -import { ContractAbi } from '@aztec/foundation/abi'; +import { ContractArtifact } from '@aztec/foundation/abi'; import { TxHash, TxReceipt } from '@aztec/types'; import { AztecAddress, Contract, ContractBase, PXE, SentTx, WaitOpts, Wallet } from '../index.js'; @@ -20,7 +20,7 @@ export type DeployTxReceipt = FieldsO * A contract deployment transaction sent to the network, extending SentTx with methods to create a contract instance. */ export class DeploySentTx extends SentTx { - constructor(private abi: ContractAbi, wallet: PXE | Wallet, txHashPromise: Promise) { + constructor(private artifact: ContractArtifact, wallet: PXE | Wallet, txHashPromise: Promise) { super(wallet, txHashPromise); } @@ -50,6 +50,6 @@ export class DeploySentTx extends SentTx const contractWallet = wallet ?? (isWallet(this.pxe) && this.pxe); if (!contractWallet) throw new Error(`A wallet is required for creating a contract instance`); if (!address) throw new Error(`Contract address is missing from transaction receipt`); - return Contract.at(address, this.abi, contractWallet) as Promise; + return Contract.at(address, this.artifact, contractWallet) as Promise; } } diff --git a/yarn-project/aztec.js/src/sandbox/index.ts b/yarn-project/aztec.js/src/sandbox/index.ts index 89470f7ea96..23d0b438a0f 100644 --- a/yarn-project/aztec.js/src/sandbox/index.ts +++ b/yarn-project/aztec.js/src/sandbox/index.ts @@ -3,7 +3,7 @@ import { sleep } from '@aztec/foundation/sleep'; import zip from 'lodash.zip'; -import SchnorrAccountContractAbi from '../abis/schnorr_account_contract.json' assert { type: 'json' }; +import SchnorrAccountContractArtifact from '../artifacts/schnorr_account_contract.json' assert { type: 'json' }; import { AccountWalletWithPrivateKey, PXE, createPXEClient, getSchnorrAccount } from '../index.js'; export const INITIAL_SANDBOX_ENCRYPTION_KEYS = [ @@ -16,7 +16,7 @@ export const INITIAL_SANDBOX_SIGNING_KEYS = INITIAL_SANDBOX_ENCRYPTION_KEYS; export const INITIAL_SANDBOX_SALTS = [Fr.ZERO, Fr.ZERO, Fr.ZERO]; -export const INITIAL_SANDBOX_ACCOUNT_CONTRACT_ABI = SchnorrAccountContractAbi; +export const INITIAL_SANDBOX_ACCOUNT_CONTRACT_ABI = SchnorrAccountContractArtifact; export const { PXE_URL = 'http://localhost:8080' } = process.env; diff --git a/yarn-project/aztec.js/src/utils/authwit.ts b/yarn-project/aztec.js/src/utils/authwit.ts index cbad0a84c94..7f38fd271f3 100644 --- a/yarn-project/aztec.js/src/utils/authwit.ts +++ b/yarn-project/aztec.js/src/utils/authwit.ts @@ -2,6 +2,7 @@ import { AztecAddress, CircuitsWasm, GeneratorIndex } from '@aztec/circuits.js'; import { pedersenPlookupCompressWithHashIndex } from '@aztec/circuits.js/barretenberg'; import { FunctionCall, PackedArguments } from '@aztec/types'; +// docs:start:authwit_computeAuthWitMessageHash /** * Compute an authentication witness message hash from a caller and a request * H(caller: AztecAddress, target: AztecAddress, selector: Field, args_hash: Field) @@ -22,3 +23,4 @@ export const computeAuthWitMessageHash = async (caller: AztecAddress, request: F GeneratorIndex.SIGNATURE_PAYLOAD, ); }; +// docs:end:authwit_computeAuthWitMessageHash diff --git a/yarn-project/aztec.js/src/wallet/account_wallet.ts b/yarn-project/aztec.js/src/wallet/account_wallet.ts index 829f7e16bc4..9df782b76df 100644 --- a/yarn-project/aztec.js/src/wallet/account_wallet.ts +++ b/yarn-project/aztec.js/src/wallet/account_wallet.ts @@ -1,5 +1,5 @@ import { Fr, GrumpkinPrivateKey } from '@aztec/circuits.js'; -import { ABIParameterVisibility, FunctionAbiHeader, FunctionType } from '@aztec/foundation/abi'; +import { ABIParameterVisibility, FunctionAbi, FunctionType } from '@aztec/foundation/abi'; import { AuthWitness, FunctionCall, PXE, TxExecutionRequest } from '@aztec/types'; import { AccountInterface } from '../account/interface.js'; @@ -47,7 +47,7 @@ export class AccountWallet extends BaseWallet { return this.getCompleteAddress().address; } - private getSetIsValidStorageAbi(): FunctionAbiHeader { + private getSetIsValidStorageAbi(): FunctionAbi { return { name: 'set_is_valid_storage', functionType: 'open' as FunctionType, diff --git a/yarn-project/bootstrap.sh b/yarn-project/bootstrap.sh index 8e83894baaa..a5fff3dedbe 100755 --- a/yarn-project/bootstrap.sh +++ b/yarn-project/bootstrap.sh @@ -29,6 +29,7 @@ yarn --cwd circuits.js remake-bindings yarn --cwd circuits.js remake-constants (cd noir-contracts && ./bootstrap.sh) +(cd boxes && ./bootstrap.sh) (cd .. && l1-contracts/bootstrap.sh) # Until we push .yarn/cache, we still need to install. diff --git a/yarn-project/boxes/README.md b/yarn-project/boxes/README.md new file mode 100644 index 00000000000..ad57029c908 --- /dev/null +++ b/yarn-project/boxes/README.md @@ -0,0 +1,3 @@ +This contains the "boxes" that are meant for quickstarts for Aztec smart contract developers, containing simple Noir smart contracts and frontends. + +If CI is failing, it may be due to incompatibility with previous build artifacts - running "./bootstrap.sh" inside this boxes folder should regenerate the artifacts. diff --git a/yarn-project/boxes/blank-react/README.md b/yarn-project/boxes/blank-react/README.md index f6d7be7983f..3a1035d8a79 100644 --- a/yarn-project/boxes/blank-react/README.md +++ b/yarn-project/boxes/blank-react/README.md @@ -62,12 +62,12 @@ This will generate a [Contract ABI](src/artifacts/test_contract.json) and TypeSc Note: the `compile` command seems to generate a Typescript file which needs a single change - ``` -import TestContractAbiJson from 'text_contract.json' assert { type: 'json' }; +import TestContractArtifactJson from 'text_contract.json' assert { type: 'json' }; // need to update the relative import to -import TestContractAbiJson from './test_contract.json' assert { type: 'json' }; +import TestContractArtifactJson from './test_contract.json' assert { type: 'json' }; ``` -After compiling, you can re-deploy the upated noir smart contract from the web UI. The function interaction forms are generated from parsing the ContractABI, so they should update automatically after you recompile. +After compiling, you can re-deploy the updated noir smart contract from the web UI. The function interaction forms are generated from parsing the contract artifacts, so they should update automatically after you recompile. ## Learn More diff --git a/yarn-project/boxes/blank-react/package.json b/yarn-project/boxes/blank-react/package.json index 60abb15a1c7..5948e40c4e1 100644 --- a/yarn-project/boxes/blank-react/package.json +++ b/yarn-project/boxes/blank-react/package.json @@ -37,7 +37,6 @@ "@aztec/aztec-ui": "^0.1.14", "@aztec/aztec.js": "workspace:^", "@aztec/circuits.js": "workspace:^", - "@aztec/cli": "workspace:^", "@aztec/foundation": "workspace:^", "@aztec/types": "workspace:^", "classnames": "^2.3.2", diff --git a/yarn-project/boxes/blank-react/src/app/components/contract_function_form.tsx b/yarn-project/boxes/blank-react/src/app/components/contract_function_form.tsx index 6c4e6b1d5ff..484a7b10207 100644 --- a/yarn-project/boxes/blank-react/src/app/components/contract_function_form.tsx +++ b/yarn-project/boxes/blank-react/src/app/components/contract_function_form.tsx @@ -1,12 +1,12 @@ -import { Button, Loader } from '@aztec/aztec-ui'; -import { AztecAddress, CompleteAddress, Fr } from '@aztec/aztec.js'; -import { ContractAbi, FunctionAbi } from '@aztec/foundation/abi'; -import { useFormik } from 'formik'; -import * as Yup from 'yup'; import { CONTRACT_ADDRESS_PARAM_NAMES, pxe } from '../../config.js'; import { callContractFunction, deployContract, viewContractFunction } from '../../scripts/index.js'; import { convertArgs } from '../../scripts/util.js'; import styles from './contract_function_form.module.scss'; +import { Button, Loader } from '@aztec/aztec-ui'; +import { AztecAddress, CompleteAddress, Fr } from '@aztec/aztec.js'; +import { ContractArtifact, FunctionArtifact } from '@aztec/foundation/abi'; +import { useFormik } from 'formik'; +import * as Yup from 'yup'; type NoirFunctionYupSchema = { // hack: add `any` at the end to get the array schema to typecheck @@ -18,7 +18,7 @@ type NoirFunctionFormValues = { [key: string]: string | number | number[] | boolean; }; -function generateYupSchema(functionAbi: FunctionAbi, defaultAddress: string) { +function generateYupSchema(functionAbi: FunctionArtifact, defaultAddress: string) { const parameterSchema: NoirFunctionYupSchema = {}; const initialValues: NoirFunctionFormValues = {}; for (const param of functionAbi.parameters) { @@ -62,12 +62,12 @@ function generateYupSchema(functionAbi: FunctionAbi, defaultAddress: string) { async function handleFunctionCall( contractAddress: AztecAddress | undefined, - contractAbi: ContractAbi, + contractArtifact: ContractArtifact, functionName: string, args: any, wallet: CompleteAddress, ) { - const functionAbi = contractAbi.functions.find(f => f.name === functionName)!; + const functionAbi = contractArtifact.functions.find(f => f.name === functionName)!; const typedArgs: any[] = convertArgs(functionAbi, args); if (functionName === 'constructor' && !!wallet) { @@ -80,13 +80,20 @@ async function handleFunctionCall( // for now, dont let user change the salt. requires some change to the form generation if we want to let user choose one // since everything is currently based on parsing the contractABI, and the salt parameter is not present there const salt = Fr.random(); - return await deployContract(wallet, contractAbi, typedArgs, salt, pxe); + return await deployContract(wallet, contractArtifact, typedArgs, salt, pxe); } if (functionAbi.functionType === 'unconstrained') { - return await viewContractFunction(contractAddress!, contractAbi, functionName, typedArgs, pxe, wallet); + return await viewContractFunction(contractAddress!, contractArtifact, functionName, typedArgs, pxe, wallet); } else { - const txnReceipt = await callContractFunction(contractAddress!, contractAbi, functionName, typedArgs, pxe, wallet); + const txnReceipt = await callContractFunction( + contractAddress!, + contractArtifact, + functionName, + typedArgs, + pxe, + wallet, + ); return `Transaction ${txnReceipt.status} on block number ${txnReceipt.blockNumber}`; } } @@ -94,8 +101,8 @@ async function handleFunctionCall( interface ContractFunctionFormProps { wallet: CompleteAddress; contractAddress?: AztecAddress; - contractAbi: ContractAbi; - functionAbi: FunctionAbi; + contractArtifact: ContractArtifact; + functionArtifact: FunctionArtifact; defaultAddress: string; title?: string; buttonText?: string; @@ -109,8 +116,8 @@ interface ContractFunctionFormProps { export function ContractFunctionForm({ wallet, contractAddress, - contractAbi, - functionAbi, + contractArtifact, + functionArtifact, defaultAddress, buttonText = 'Submit', isLoading, @@ -119,14 +126,20 @@ export function ContractFunctionForm({ onSuccess, onError, }: ContractFunctionFormProps) { - const { validationSchema, initialValues } = generateYupSchema(functionAbi, defaultAddress); + const { validationSchema, initialValues } = generateYupSchema(functionArtifact, defaultAddress); const formik = useFormik({ initialValues: initialValues, validationSchema: validationSchema, onSubmit: async (values: any) => { onSubmit(); try { - const result = await handleFunctionCall(contractAddress, contractAbi, functionAbi.name, values, wallet); + const result = await handleFunctionCall( + contractAddress, + contractArtifact, + functionArtifact.name, + values, + wallet, + ); onSuccess(result); } catch (e: any) { onError(e.message); @@ -136,7 +149,7 @@ export function ContractFunctionForm({ return (
- {functionAbi.parameters.map(input => ( + {functionArtifact.parameters.map(input => (