diff --git a/.circleci/config.yml b/.circleci/config.yml index 9d56b6b719f..279e66e9d5b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -443,6 +443,28 @@ jobs: echo "export DOCKER_BUILDKIT=" > $BASH_ENV build aztec-sandbox false arm64 + aztec-p2p-bootstrap: + machine: + image: ubuntu-2204:2023.07.2 + resource_class: large + steps: + - *checkout + - *setup_env + - run: + name: "Build and test" + command: build p2p-bootstrap | add_timestamps + + aztec-node: + machine: + image: ubuntu-2204:2023.07.2 + resource_class: large + steps: + - *checkout + - *setup_env + - run: + name: "Build and test" + command: build aztec-node | add_timestamps + pxe-x86_64: machine: image: ubuntu-2204:2023.07.2 @@ -1024,6 +1046,17 @@ jobs: name: "yarn-project" command: yarn-project/deploy_npm.sh + deploy-ecr: + machine: + image: ubuntu-2204:2023.07.2 + resource_class: medium + steps: + - *checkout + - *setup_env + - run: + name: "yarn-project" + command: yarn-project/deploy_ecr.sh + deploy-dockerhub: machine: image: ubuntu-2204:2023.07.2 @@ -1244,6 +1277,16 @@ workflows: - yarn-project <<: *defaults + - aztec-p2p-bootstrap: + requires: + - yarn-project + <<: *defaults + + - aztec-node: + requires: + - yarn-project + <<: *defaults + - pxe-x86_64: requires: - yarn-project @@ -1374,6 +1417,10 @@ workflows: requires: - e2e-end <<: *deploy_defaults + - deploy-ecr: + requires: + - e2e-end + <<: *deploy_defaults - deploy-end: requires: diff --git a/.gitignore b/.gitignore index 9115ec7782a..46c867ea481 100644 --- a/.gitignore +++ b/.gitignore @@ -6,5 +6,5 @@ node_modules build/ .idea cmake-build-debug -.terraform +.terraform* .bootstrapped diff --git a/build_manifest.yml b/build_manifest.yml index 54b6c81fcc9..e27a7fbd00b 100644 --- a/build_manifest.yml +++ b/build_manifest.yml @@ -156,6 +156,18 @@ end-to-end: dependencies: - yarn-project +aztec-node: + buildDir: yarn-project + projectDir: yarn-project/aztec-node + dependencies: + - yarn-project + +p2p-bootstrap: + buildDir: yarn-project + projectDir: yarn-project/p2p-bootstrap + dependencies: + - yarn-project + docs: buildDir: . dockerfile: docs/Dockerfile diff --git a/iac/main.tf b/iac/main.tf new file mode 100644 index 00000000000..6c088f4042d --- /dev/null +++ b/iac/main.tf @@ -0,0 +1,61 @@ +terraform { + backend "s3" { + bucket = "aztec-terraform" + key = "aztec-network/iac" + region = "eu-west-2" + } + required_providers { + aws = { + source = "hashicorp/aws" + version = "3.74.2" + } + } +} + +data "terraform_remote_state" "setup_iac" { + backend = "s3" + config = { + bucket = "aztec-terraform" + key = "setup/setup-iac" + region = "eu-west-2" + } +} + +provider "aws" { + profile = "default" + region = "eu-west-2" +} + +# Create our load balancer. +resource "aws_lb" "aztec-network" { + name = "aztec-network" + internal = false + load_balancer_type = "network" + security_groups = [ + data.terraform_remote_state.setup_iac.outputs.security_group_public_id, aws_security_group.security-group-p2p.id + ] + subnets = [ + data.terraform_remote_state.setup_iac.outputs.subnet_az1_id, + data.terraform_remote_state.setup_iac.outputs.subnet_az2_id + ] + + access_logs { + bucket = "aztec-logs" + prefix = "aztec-network-nlb-logs" + enabled = false + } + + tags = { + Name = "aztec-network" + } +} + +resource "aws_security_group" "security-group-p2p" { + name = "security-group-p2p" + description = "Allow inbound p2p traffic" + vpc_id = data.terraform_remote_state.setup_iac.outputs.vpc_id + + tags = { + Name = "allow-p2p" + } +} diff --git a/iac/output.tf b/iac/output.tf new file mode 100644 index 00000000000..1c99c747e1b --- /dev/null +++ b/iac/output.tf @@ -0,0 +1,11 @@ +output "nlb_arn" { + value = "${aws_lb.aztec-network.arn}" +} + +output "nlb_dns" { + value = "${aws_lb.aztec-network.dns_name}" +} + +output "p2p_security_group_id" { + value = "${aws_security_group.security-group-p2p.id}" +} diff --git a/yarn-project/aztec-node/Dockerfile b/yarn-project/aztec-node/Dockerfile new file mode 100644 index 00000000000..d01aa4ede31 --- /dev/null +++ b/yarn-project/aztec-node/Dockerfile @@ -0,0 +1,14 @@ +FROM 278380418400.dkr.ecr.eu-west-2.amazonaws.com/yarn-project AS builder + +WORKDIR /usr/src/yarn-project/aztec-node + +# Productionify. See comment in yarn-project-base/Dockerfile. +RUN yarn cache clean && yarn workspaces focus --production + +# Create final, minimal size image. +FROM node:18-alpine +COPY --from=builder /usr/src/ /usr/src/ +WORKDIR /usr/src/yarn-project/aztec-node +ENTRYPOINT ["yarn"] +CMD [ "start" ] +EXPOSE 8080 diff --git a/yarn-project/aztec-node/src/aztec-node/server.ts b/yarn-project/aztec-node/src/aztec-node/server.ts index 9b7c610fda7..d989a584e7c 100644 --- a/yarn-project/aztec-node/src/aztec-node/server.ts +++ b/yarn-project/aztec-node/src/aztec-node/server.ts @@ -69,7 +69,16 @@ export class AztecNodeService implements AztecNode { protected readonly globalVariableBuilder: GlobalVariableBuilder, protected readonly merkleTreesDb: levelup.LevelUp, private log = createDebugLogger('aztec:node'), - ) {} + ) { + const message = + `Started Aztec Node with contracts - \n` + + `Rollup: ${config.l1Contracts.rollupAddress.toString()}\n` + + `Registry: ${config.l1Contracts.registryAddress.toString()}\n` + + `Inbox: ${config.l1Contracts.inboxAddress.toString()}\n` + + `Outbox: ${config.l1Contracts.outboxAddress.toString()}\n` + + `Contract Emitter: ${config.l1Contracts.contractDeploymentEmitterAddress.toString()}`; + this.log(message); + } /** * initializes the Aztec Node, wait for component to sync. diff --git a/yarn-project/aztec-node/terraform/main.tf b/yarn-project/aztec-node/terraform/main.tf index 38fff0a2d9e..7f64a6d9f1d 100644 --- a/yarn-project/aztec-node/terraform/main.tf +++ b/yarn-project/aztec-node/terraform/main.tf @@ -1,3 +1,9 @@ +# Terraform to setup a prototype network of Aztec Nodes in AWS +# It sets up 2 full nodes with different ports/keys etc. +# Some duplication across the 2 defined services, could possibly +# be refactored to use modules as and when we build out infrastructure for real + + terraform { backend "s3" { bucket = "aztec-terraform" @@ -34,14 +40,23 @@ data "terraform_remote_state" "aztec2_iac" { } } +data "terraform_remote_state" "aztec-network_iac" { + backend = "s3" + config = { + bucket = "aztec-terraform" + key = "aztec-network/iac" + region = "eu-west-2" + } +} + -resource "aws_cloudwatch_log_group" "aztec_node_log_group" { - name = "/fargate/service/${var.DEPLOY_TAG}/aztec-node" +resource "aws_cloudwatch_log_group" "aztec-node-log-group-1" { + name = "/fargate/service/${var.DEPLOY_TAG}/aztec-node-1" retention_in_days = 14 } -resource "aws_service_discovery_service" "aztec-node" { - name = "${var.DEPLOY_TAG}-aztec-node" +resource "aws_service_discovery_service" "aztec-node-1" { + name = "${var.DEPLOY_TAG}-aztec-node-1" health_check_custom_config { failure_threshold = 1 @@ -72,7 +87,7 @@ resource "aws_service_discovery_service" "aztec-node" { # Define task definition and service. resource "aws_ecs_task_definition" "aztec-node-1" { - family = "${var.DEPLOY_TAG}-aztec-node" + family = "${var.DEPLOY_TAG}-aztec-node-1" requires_compatibilities = ["FARGATE"] network_mode = "awsvpc" cpu = "2048" @@ -83,13 +98,16 @@ resource "aws_ecs_task_definition" "aztec-node-1" { container_definitions = < { - const client = await createCompatibleClient(options.rpcUrl, debugLogger); - const privateKey = options.privateKey ?? GrumpkinScalar.random(); + // `options.wait` is default true. Passing `--no-wait` will set it to false. + // https://github.com/tj/commander.js#other-option-types-negatable-boolean-and-booleanvalue + .option('--no-wait', 'Skip waiting for the contract to be deployed. Print the hash of deployment transaction') + .action(async ({ rpcUrl, privateKey, wait }) => { + const client = await createCompatibleClient(rpcUrl, debugLogger); + const actualPrivateKey = privateKey ?? GrumpkinScalar.random(); - const account = getSchnorrAccount(client, privateKey, privateKey, accountCreationSalt); - const wallet = await account.waitDeploy(); - const { address, publicKey, partialAddress } = wallet.getCompleteAddress(); + const account = getSchnorrAccount(client, actualPrivateKey, actualPrivateKey, accountCreationSalt); + const { address, publicKey, partialAddress } = await account.getCompleteAddress(); + const tx = await account.deploy(); + const txHash = await tx.getTxHash(); + debugLogger(`Account contract tx sent with hash ${txHash}`); + if (wait) { + log(`\nWaiting for account contract deployment...`); + await tx.wait(); + } else { + log(`\nAccount deployment transaction hash: ${txHash}\n`); + } - log(`\nCreated new account:\n`); + log(`\nNew account:\n`); log(`Address: ${address.toString()}`); log(`Public key: ${publicKey.toString()}`); - if (!options.privateKey) log(`Private key: ${privateKey.toString(true)}`); + if (!privateKey) log(`Private key: ${actualPrivateKey.toString(true)}`); log(`Partial address: ${partialAddress.toString()}`); }); diff --git a/yarn-project/cli/src/utils.ts b/yarn-project/cli/src/utils.ts index 1fc57b19ecd..0937bed88eb 100644 --- a/yarn-project/cli/src/utils.ts +++ b/yarn-project/cli/src/utils.ts @@ -150,7 +150,6 @@ export async function getTxSender(pxe: PXE, _from?: string) { /** * Performs necessary checks, conversions & operations to call a contract fn from the CLI. * @param contractFile - Directory of the compiled contract ABI. - * @param contractAddress - Aztec Address of the contract. * @param functionName - Name of the function to be called. * @param _functionArgs - Arguments to call the function with. * @param log - Logger instance that will output to the CLI diff --git a/yarn-project/deploy_ecr.sh b/yarn-project/deploy_ecr.sh new file mode 100755 index 00000000000..9dae7f7b1d6 --- /dev/null +++ b/yarn-project/deploy_ecr.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +extract_repo yarn-project /usr/src project +PROJECT_ROOT=$(pwd)/project/src/ + +for REPOSITORY in "p2p-bootstrap" "aztec-node" +do + echo "Deploying $REPOSITORY" + RELATIVE_PROJECT_DIR=$(query_manifest relativeProjectDir $REPOSITORY) + cd "$PROJECT_ROOT/$RELATIVE_PROJECT_DIR" + + deploy_ecr $REPOSITORY +done \ No newline at end of file diff --git a/yarn-project/end-to-end/scripts/docker-compose-p2p.yml b/yarn-project/end-to-end/scripts/docker-compose-p2p.yml index c6d607a79bb..9fa069b1ca9 100644 --- a/yarn-project/end-to-end/scripts/docker-compose-p2p.yml +++ b/yarn-project/end-to-end/scripts/docker-compose-p2p.yml @@ -33,7 +33,6 @@ services: SEQ_MIN_TX_PER_BLOCK: 1 P2P_TCP_LISTEN_IP: '0.0.0.0' P2P_NAT_ENABLED: 'false' - P2P_SERVER: 'false' P2P_ENABLED: 'true' BOOTSTRAP_NODES: '/ip4/p2p-bootstrap/tcp/40400/p2p/12D3KooWGBpbC6qQFkaCYphjNeY6sV99o4SnEWyTeBigoVriDn4D' command: ${TEST:-./src/e2e_p2p_network.test.ts} diff --git a/yarn-project/end-to-end/scripts/start_p2p_e2e.sh b/yarn-project/end-to-end/scripts/start_p2p_e2e.sh index 9b1b661a7fe..828a1db8b43 100755 --- a/yarn-project/end-to-end/scripts/start_p2p_e2e.sh +++ b/yarn-project/end-to-end/scripts/start_p2p_e2e.sh @@ -10,7 +10,6 @@ export SEQ_MIN_TX_PER_BLOCK=32 export BOOTSTRAP_NODES='/ip4/127.0.0.1/tcp/40400/p2p/12D3KooWGBpbC6qQFkaCYphjNeY6sV99o4SnEWyTeBigoVriDn4D' export P2P_TCP_LISTEN_IP='0.0.0.0' export P2P_NAT_ENABLED='false' -export P2P_SERVER='false' export P2P_ENABLED='true' export DEBUG='aztec:*,libp2p:*' diff --git a/yarn-project/end-to-end/src/e2e_p2p_network.test.ts b/yarn-project/end-to-end/src/e2e_p2p_network.test.ts index b4a5d8ba3f6..507738a1fb5 100644 --- a/yarn-project/end-to-end/src/e2e_p2p_network.test.ts +++ b/yarn-project/end-to-end/src/e2e_p2p_network.test.ts @@ -78,10 +78,10 @@ describe('e2e_p2p_network', () => { p2pEnabled: true, tcpListenPort: BOOT_NODE_TCP_PORT, tcpListenIp: '0.0.0.0', - announceHostname: '127.0.0.1', + announceHostname: '/tcp/127.0.0.1', announcePort: BOOT_NODE_TCP_PORT, peerIdPrivateKey: Buffer.from(peerId.privateKey!).toString('hex'), - serverMode: false, + clientKADRouting: false, minPeerCount: 10, maxPeerCount: 100, @@ -107,7 +107,7 @@ describe('e2e_p2p_network', () => { minTxsPerBlock: NUM_TXS_PER_BLOCK, maxTxsPerBlock: NUM_TXS_PER_BLOCK, p2pEnabled: true, - serverMode: false, + clientKADRouting: false, }; return await AztecNodeService.createAndSync(newConfig); }; diff --git a/yarn-project/p2p-bootstrap/Dockerfile b/yarn-project/p2p-bootstrap/Dockerfile new file mode 100644 index 00000000000..98cb0e4d6cf --- /dev/null +++ b/yarn-project/p2p-bootstrap/Dockerfile @@ -0,0 +1,14 @@ +FROM 278380418400.dkr.ecr.eu-west-2.amazonaws.com/yarn-project AS builder + +WORKDIR /usr/src/yarn-project/p2p-bootstrap + +# Productionify. See comment in yarn-project-base/Dockerfile. +RUN yarn cache clean && yarn workspaces focus --production + +# Create final, minimal size image. +FROM node:18-alpine +COPY --from=builder /usr/src/ /usr/src/ +WORKDIR /usr/src/yarn-project/p2p-bootstrap +ENTRYPOINT ["yarn"] +CMD [ "start" ] +EXPOSE 8080 diff --git a/yarn-project/p2p-bootstrap/src/index.ts b/yarn-project/p2p-bootstrap/src/index.ts index 3b1ccc3080d..f6de48946fe 100644 --- a/yarn-project/p2p-bootstrap/src/index.ts +++ b/yarn-project/p2p-bootstrap/src/index.ts @@ -1,8 +1,6 @@ import { createDebugLogger } from '@aztec/foundation/log'; import { BootstrapNode, getP2PConfigEnvVars } from '@aztec/p2p'; -import 'dotenv/config'; - const logger = createDebugLogger('aztec:bootstrap_node'); /** diff --git a/yarn-project/p2p-bootstrap/terraform/main.tf b/yarn-project/p2p-bootstrap/terraform/main.tf new file mode 100644 index 00000000000..cd3e4d591c1 --- /dev/null +++ b/yarn-project/p2p-bootstrap/terraform/main.tf @@ -0,0 +1,402 @@ +# Terraform to setup a prototype network of Aztec Boot Nodes in AWS +# It sets up 2 boot nodes with different ports/keys etc. +# Some duplication across the 2 defined services, could possibly +# be refactored to use modules as and when we build out infrastructure for real + +terraform { + backend "s3" { + bucket = "aztec-terraform" + region = "eu-west-2" + } + required_providers { + aws = { + source = "hashicorp/aws" + version = "3.74.2" + } + } +} + +# Define provider and region +provider "aws" { + region = "eu-west-2" +} + +data "terraform_remote_state" "setup_iac" { + backend = "s3" + config = { + bucket = "aztec-terraform" + key = "setup/setup-iac" + region = "eu-west-2" + } +} + +data "terraform_remote_state" "aztec2_iac" { + backend = "s3" + config = { + bucket = "aztec-terraform" + key = "aztec2/iac" + region = "eu-west-2" + } +} + +data "terraform_remote_state" "aztec-network_iac" { + backend = "s3" + config = { + bucket = "aztec-terraform" + key = "aztec-network/iac" + region = "eu-west-2" + } +} + + +resource "aws_cloudwatch_log_group" "aztec-bootstrap-1-log-group" { + name = "/fargate/service/${var.DEPLOY_TAG}/aztec-bootstrap-1" + retention_in_days = 14 +} + +resource "aws_service_discovery_service" "aztec-bootstrap-1" { + name = "${var.DEPLOY_TAG}-aztec-bootstrap-1" + + health_check_custom_config { + failure_threshold = 1 + } + + dns_config { + namespace_id = data.terraform_remote_state.setup_iac.outputs.local_service_discovery_id + + dns_records { + ttl = 60 + type = "A" + } + + dns_records { + ttl = 60 + type = "SRV" + } + + routing_policy = "MULTIVALUE" + } + + # Terraform just fails if this resource changes and you have registered instances. + provisioner "local-exec" { + when = destroy + command = "${path.module}/../servicediscovery-drain.sh ${self.id}" + } +} + +resource "aws_ecs_task_definition" "aztec-bootstrap-1" { + family = "${var.DEPLOY_TAG}-aztec-bootstrap-1" + requires_compatibilities = ["FARGATE"] + network_mode = "awsvpc" + cpu = "2048" + memory = "4096" + execution_role_arn = data.terraform_remote_state.setup_iac.outputs.ecs_task_execution_role_arn + task_role_arn = data.terraform_remote_state.aztec2_iac.outputs.cloudwatch_logging_ecs_role_arn + + container_definitions = < { @@ -141,12 +140,11 @@ export class LibP2PService implements P2PService { */ public static async new(config: P2PConfig, txPool: TxPool) { const { - enableNat, tcpListenIp, tcpListenPort, announceHostname, announcePort, - serverMode, + clientKADRouting, minPeerCount, maxPeerCount, peerIdPrivateKey, @@ -158,7 +156,7 @@ export class LibP2PService implements P2PService { peerId, addresses: { listen: [`/ip4/${tcpListenIp}/tcp/${tcpListenPort}`], - announce: announceHostname ? [`/ip4/${announceHostname}/tcp/${announcePort ?? tcpListenPort}`] : [], + announce: announceHostname ? [`${announceHostname}/tcp/${announcePort ?? tcpListenPort}`] : [], }, transports: [tcp()], streamMuxers: [yamux(), mplex()], @@ -180,15 +178,23 @@ export class LibP2PService implements P2PService { }), kadDHT: kadDHT({ protocolPrefix: 'aztec', - clientMode: !serverMode, + clientMode: clientKADRouting, }), }; - if (enableNat) { - services.nat = autoNATService({ - protocolPrefix: 'aztec', - }); - } + // The autonat service seems quite problematic in that using it seems to cause a lot of attempts + // to dial ephemeral ports. I suspect that it works better if you can get the uPNPnat service to + // work as then you would have a permanent port to be dialled. + // Alas, I struggled to get this to work reliably either. I find there is a race between the + // service that reads our listener addresses and the uPnP service. + // The result being the uPnP service can't find an address to use for the port forward. + // Need to investigate further. + // if (enableNat) { + // services.autoNAT = autoNATService({ + // protocolPrefix: 'aztec', + // }); + // services.uPnPNAT = uPnPNATService(); + // } const node = await createLibp2p({ ...opts,