Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(ci): redeploy triggers #3677

Merged
merged 25 commits into from
Dec 14, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
abc06c7
WIP fixing devnet redeploy triggers
spypsy Dec 13, 2023
50b10fb
merge with master
spypsy Dec 13, 2023
b8b50b1
add dry_deploy capabilites to new script
spypsy Dec 13, 2023
59ee799
redeploy l1-contracts
spypsy Dec 13, 2023
8af11fe
use dry deploy on should_deploy [ci dry-deploy]
spypsy Dec 13, 2023
8e53628
fixes [ci dry-deploy]
spypsy Dec 13, 2023
36fae97
fix script file permissions
spypsy Dec 13, 2023
2560ef3
[ci dry-deploy]
spypsy Dec 13, 2023
6fcb3e5
export DRY_DEPLOY for use in setup_env [ci dry-deploy]
spypsy Dec 13, 2023
f7a8a36
no -auto-approve for tf plan
spypsy Dec 13, 2023
c2f62f1
[ci dry-deploy]
spypsy Dec 13, 2023
c53c46e
aztec-dev deploy tag on dry runs [ci dry-deploy]
spypsy Dec 13, 2023
562a7f8
fix typo [ci dry-deploy]
spypsy Dec 13, 2023
3f5f529
set DEPLOY_ENV to dev [ci dry-deploy]
spypsy Dec 13, 2023
c213ab3
fix deploy_tf_services args
spypsy Dec 13, 2023
30a1a3f
add node/bootnode to build_manifest [ci dry-deploy]
spypsy Dec 13, 2023
4928c54
script improvements [ci dry-deploy]
spypsy Dec 13, 2023
63f3c58
extract PROJECT_NAME [ci dry-deploy]
spypsy Dec 13, 2023
220c778
fix grep regex pattern [ci dry-deploy]
spypsy Dec 13, 2023
e100ede
Merge branch 'master' into spy/fix-redeploys
spypsy Dec 13, 2023
3321ef5
[ci dry-deploy]
spypsy Dec 13, 2023
f8166f4
fix ecs naming + don't fail service script [ci dry-deploy]
spypsy Dec 13, 2023
d6c8f12
update TF_VARs
spypsy Dec 13, 2023
3227d58
[ci dry-deploy]
spypsy Dec 13, 2023
ae135b3
add comments to new script
spypsy Dec 14, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 24 additions & 8 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -945,27 +945,43 @@ jobs:
name: "Deploy mainnet fork"
command: |
should_deploy || exit 0
deploy mainnet-fork
deploy_terraform_services iac/mainnet-fork
- run:
name: "Deploy L1 contracts to mainnet fork"
working_directory: l1-contracts
command: |
should_deploy || exit 0
./scripts/ci_deploy_contracts.sh
- run:
name: "Deploy devnet to AWS"
name: "Deploy P2P bootstrap servers to AWS"
command: |
should_deploy 0 || exit 0
export TF_VAR_FAUCET_PRIVATE_KEY=$FAUCET_PRIVATE_KEY
export TF_VAR_BOOTNODE_1_PEER_ID=$BOOTNODE_1_PEER_ID
export TF_VAR_BOOTNODE_2_PEER_ID=$BOOTNODE_2_PEER_ID
# Export variables for Terraform.
export TF_VAR_BOOTNODE_1_PRIVATE_KEY=$BOOTNODE_1_PRIVATE_KEY
export TF_VAR_BOOTNODE_2_PRIVATE_KEY=$BOOTNODE_2_PRIVATE_KEY
deploy_terraform_services yarn-project/p2p-bootstrap aztec-sandbox
- run:
name: "Deploy Aztec Nodes to AWS"
command: |
should_deploy 0 || exit 0
export TF_VAR_BOOTNODE_1_PEER_ID=$BOOTNODE_1_PEER_ID
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I forget but can't we specify all of the TF_VAR variables in CCI?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

hmm so yes, these are CCI Env vars that get 'renamed' here for terraform. We could store these directly as TF_VAR_$VAR in CCI but not all of them as some are used in different scripts as well.
I think for consistency it's better to have the regular names stored and convert what's needed for terraform

export TF_VAR_BOOTNODE_2_PEER_ID=$BOOTNODE_2_PEER_ID
export TF_VAR_SEQ_1_PUBLISHER_PRIVATE_KEY=$SEQ_1_PUBLISHER_PRIVATE_KEY
export TF_VAR_SEQ_2_PUBLISHER_PRIVATE_KEY=$SEQ_2_PUBLISHER_PRIVATE_KEY
deploy_terraform p2p-bootstrap yarn-project/p2p-bootstrap/terraform
deploy_terraform aztec-node yarn-project/aztec-node/terraform
deploy_terraform aztec-faucet yarn-project/aztec-faucet/terraform
export TF_VAR_NODE_1_PRIVATE_KEY=$NODE_1_PRIVATE_KEY
export TF_VAR_NODE_2_PRIVATE_KEY=$NODE_2_PRIVATE_KEY
# Check if l1-contracts have changed
if $CONTRACTS_DEPLOYED -eq 1; then
deploy_terraform_services yarn-project/aztec-node aztec-sandbox aztec-node aws_efs_file_system.node_data_store
else
deploy_terraform_services yarn-project/aztec-node aztec-sandbox
fi
- run:
name: "Deploy Aztec Faucet to AWS"
command: |
should_deploy 0 || exit 0
export TF_VAR_FAUCET_PRIVATE_KEY=$FAUCET_PRIVATE_KEY
deploy_terraform_services yarn-project/aztec-faucet aztec-sandbox

# Repeatable config for defining the workflow below.
defaults: &defaults
Expand Down
24 changes: 20 additions & 4 deletions build-system/scripts/deploy_service
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,24 @@
[ -n "${BUILD_SYSTEM_DEBUG:-}" ] && set -x # conditionally trace
set -eu

# Redeploy service with latest image.
# Redeploy services with the latest image that match $DEPLOY_TAG followed by $SERVICE_NAME.
SERVICE_NAME=$1
if aws ecs list-services --region $ECR_DEPLOY_REGION --cluster setup | grep "/$SERVICE_NAME\"" > /dev/null; then
aws ecs update-service --region $ECR_DEPLOY_REGION --cluster setup --service $SERVICE_NAME --force-new-deployment
fi
PATTERN="$DEPLOY_TAG.*$SERVICE_NAME.*"

# Fetch list of services
SERVICES=$(aws ecs list-services --region $ECR_DEPLOY_REGION --cluster setup | grep -Eo "arn:aws:ecs:[^:]+:[^:]+:service/[^/]+/$PATTERN" || true)

echo "Services to redeploy:"
echo "$SERVICES"

# Loop through and update each matching service.
for SERVICE_ARN in $SERVICES; do
# Extract the actual service name from ARN
ACTUAL_SERVICE_NAME=$(echo "$SERVICE_ARN" | awk -F/ '{print $NF}')

if [ "$DRY_DEPLOY" -eq 1 ]; then
echo "DRY_DEPLOY: aws ecs update-service --region $ECR_DEPLOY_REGION --cluster setup --service $ACTUAL_SERVICE_NAME --force-new-deployment"
else
aws ecs update-service --region $ECR_DEPLOY_REGION --cluster setup --service $ACTUAL_SERVICE_NAME --force-new-deployment
fi
done
2 changes: 1 addition & 1 deletion build-system/scripts/deploy_terraform
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ for RESOURCE in $TO_TAINT; do
done

if [ "$DRY_DEPLOY" -eq 1 ]; then
terraform plan -input=false -auto-approve
terraform plan -input=false
else
terraform apply -input=false -auto-approve
fi
44 changes: 44 additions & 0 deletions build-system/scripts/deploy_terraform_services
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
#!/usr/bin/env bash
[ -n "${BUILD_SYSTEM_DEBUG:-}" ] && set -x # conditionally trace
set -eu

# This script deploys a terraform project and restarts related services.

# The terraform project directory.
PROJECT_DIR=$1
# Extract project name fromm the directory, e.g. yarn-project/aztec-node -> aztec-node
PROJECT_NAME=$(basename $PROJECT_DIR)

# The repository to check for changes. Defaults to the project name
# but can be different for projects that e.g. use the sandbox image.
CHECK_REBUILD_REPOSITORY=${2:-$PROJECT_NAME}

# The services to restart. Defaults to the project name but can be different.
SERVICE_NAMES=${3:-$PROJECT_NAME}

# The terraform resources to taint. Defaults to none.
TO_TAINT=${4:-}

cd $PROJECT_DIR

# Bail out if nothing changed.
CONTENT_HASH=$(calculate_content_hash $CHECK_REBUILD_REPOSITORY)
echo "Last successfully deployed commit: $CONTENT_HASH"
if check_rebuild cache-$CONTENT_HASH-$DEPLOY_TAG-deployed $CHECK_REBUILD_REPOSITORY; then
echo "No changes detected, skipping deployment."
exit 0
fi

deploy_terraform $PROJECT_NAME ./terraform/ "$TO_TAINT"

# Restart services.
for SERVICE in $SERVICE_NAMES; do
deploy_service $SERVICE
done

# Tag the image as deployed.
if [ "$DRY_DEPLOY" -eq 1 ]; then
echo "DRY_DEPLOY: tag_remote_image $CHECK_REBUILD_REPOSITORY cache-$CONTENT_HASH cache-$CONTENT_HASH-$DEPLOY_TAG-deployed"
else
retry tag_remote_image $CHECK_REBUILD_REPOSITORY cache-$CONTENT_HASH cache-$CONTENT_HASH-$DEPLOY_TAG-deployed
fi
10 changes: 6 additions & 4 deletions build-system/scripts/setup_env
Original file line number Diff line number Diff line change
Expand Up @@ -34,20 +34,22 @@ echo "PULL_REQUEST=$PULL_REQUEST"
# If the user has requested to perform a "dry deploy", we set the commit tag to fake version, and set DRY_DEPLOY to 1.
if [[ "$COMMIT_MESSAGE" == *"[ci dry-deploy]"* ]]; then
COMMIT_TAG=v999.999.999
DRY_DEPLOY=1
export DRY_DEPLOY=1
fi

if should_deploy; then
if [ -n "${COMMIT_TAG:-}" ]; then
# Extract the deploy env from the commit tag, if it has one, e.g. testnet.
# If we have one, we look something like v2.1.123-testnet.0. This is a "non production" release.
if [[ "$COMMIT_TAG" == *"-"* ]]; then
# Strips the trailing '.XX' from the end of the commit tag
# Strips the trailing '.XX' from the end of the commit tag.
TEMP=${COMMIT_TAG%.*}
# Strips the 'vX.Y.ZZZ-' from the front of the commit tag, leaving the e.g. 'testnet'
# Strips the 'vX.Y.ZZZ-' from the front of the commit tag, leaving the e.g. 'testnet'.
DEPLOY_ENV=${TEMP##*-}
else
elif [ ! "$DRY_DEPLOY" -eq 1 ]; then
DEPLOY_ENV=prod
else
DEPLOY_ENV=dev
fi
else
# If we're on master, this is our devnet.
Expand Down
2 changes: 1 addition & 1 deletion build-system/scripts/should_deploy
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
# Right now, that's only if we're master.
set -eu

if [ "$BRANCH" == "master" ]; then
if [ "$BRANCH" == "master" ] || [ "$DRY_DEPLOY" -eq 1 ]; then
exit 0
else
exit 1
Expand Down
12 changes: 12 additions & 0 deletions build_manifest.yml
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,18 @@ aztec-faucet:
dependencies:
- yarn-project-prod

aztec-node:
buildDir: yarn-project
projectDir: yarn-project/aztec-node
dependencies:
- yarn-project-prod

p2p-bootstrap:
buildDir: yarn-project
projectDir: yarn-project/p2p-bootstrap
dependencies:
- yarn-project-prod

cli:
buildDir: yarn-project
projectDir: yarn-project/cli
Expand Down
2 changes: 1 addition & 1 deletion l1-contracts/REDEPLOY
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
# Append value to force redeploy
1
3
18 changes: 14 additions & 4 deletions l1-contracts/scripts/ci_deploy_contracts.sh
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@ echo "Last successfully published commit: $CONTENT_HASH"
# Check if image hash has alredy been deployed.
if check_rebuild "cache-$CONTENT_HASH-$DEPLOY_TAG-deployed" $REPOSITORY; then
echo "No changes detected, no contract deploy necessary."
# Set global variable for redeployment of contracts
echo export CONTRACTS_DEPLOYED=0 >>$BASH_ENV
exit 0
fi

Expand All @@ -31,8 +33,16 @@ for KEY in ROLLUP_CONTRACT_ADDRESS REGISTRY_CONTRACT_ADDRESS INBOX_CONTRACT_ADDR
export TF_VAR_$KEY=$VALUE
done

# Write TF state variables
deploy_terraform l1-contracts ./terraform
if [ -n "${DRY_DEPLOY:-}" ]; then
echo "DRY_DEPLOY: deploy_terraform l1-contracts ./terraform"
echo "DRY_DEPLOY: tag_remote_image $REPOSITORY cache-$CONTENT_HASH cache-$CONTENT_HASH-$DEPLOY_TAG-deployed"
else
# Write TF state variables
deploy_terraform l1-contracts ./terraform

# Tag the image as deployed.
retry tag_remote_image $REPOSITORY cache-$CONTENT_HASH cache-$CONTENT_HASH-$DEPLOY_TAG-deployed
# Tag the image as deployed.
retry tag_remote_image $REPOSITORY cache-$CONTENT_HASH cache-$CONTENT_HASH-$DEPLOY_TAG-deployed
fi

# Set global variable for redeployment of contracts
echo export CONTRACTS_DEPLOYED=1 >>$BASH_ENV
8 changes: 4 additions & 4 deletions yarn-project/aztec-faucet/terraform/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ resource "aws_ecs_task_definition" "aztec-faucet" {
container_definitions = <<DEFINITIONS
[
{
"name": "${var.DEPLOY_TAG}-faucet",
"name": "${var.DEPLOY_TAG}-aztec-faucet",
"image": "${var.DOCKERHUB_ACCOUNT}/aztec-faucet:${var.DEPLOY_TAG}",
"essential": true,
"memoryReservation": 3776,
Expand Down Expand Up @@ -151,7 +151,7 @@ DEFINITIONS
}

resource "aws_ecs_service" "aztec-faucet" {
name = "${var.DEPLOY_TAG}-faucet"
name = "${var.DEPLOY_TAG}-aztec-faucet"
cluster = data.terraform_remote_state.setup_iac.outputs.ecs_cluster_id
launch_type = "FARGATE"
desired_count = 1
Expand All @@ -169,13 +169,13 @@ resource "aws_ecs_service" "aztec-faucet" {

load_balancer {
target_group_arn = aws_alb_target_group.aztec-faucet.arn
container_name = "${var.DEPLOY_TAG}-faucet"
container_name = "${var.DEPLOY_TAG}-aztec-faucet"
container_port = 80
}

service_registries {
registry_arn = aws_service_discovery_service.aztec-faucet.arn
container_name = "${var.DEPLOY_TAG}-faucet"
container_name = "${var.DEPLOY_TAG}-aztec-faucet"
container_port = 80
}

Expand Down
2 changes: 1 addition & 1 deletion yarn-project/aztec-node/terraform/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ resource "aws_efs_file_system" "node_data_store" {
provisioned_throughput_in_mibps = 20

tags = {
Name = "${var.DEPLOY_TAG}-node-data"
Name = "${var.DEPLOY_TAG}-node-${count.index + 1}-data"
}

lifecycle_policy {
Expand Down
40 changes: 20 additions & 20 deletions yarn-project/p2p-bootstrap/terraform/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -54,15 +54,15 @@ locals {
}


resource "aws_cloudwatch_log_group" "aztec-bootstrap-log-group" {
resource "aws_cloudwatch_log_group" "p2p-bootstrap-log-group" {
count = local.bootnode_count
name = "/fargate/service/${var.DEPLOY_TAG}/aztec-bootstrap-${count.index + 1}"
name = "/fargate/service/${var.DEPLOY_TAG}/p2p-bootstrap-${count.index + 1}"
retention_in_days = 14
}

resource "aws_service_discovery_service" "aztec-bootstrap" {
resource "aws_service_discovery_service" "p2p-bootstrap" {
count = local.bootnode_count
name = "${var.DEPLOY_TAG}-aztec-bootstrap-${count.index + 1}"
name = "${var.DEPLOY_TAG}-p2p-bootstrap-${count.index + 1}"

health_check_custom_config {
failure_threshold = 1
Expand Down Expand Up @@ -91,9 +91,9 @@ resource "aws_service_discovery_service" "aztec-bootstrap" {
}
}

resource "aws_ecs_task_definition" "aztec-bootstrap" {
resource "aws_ecs_task_definition" "p2p-bootstrap" {
count = local.bootnode_count
family = "${var.DEPLOY_TAG}-aztec-bootstrap-${count.index + 1}"
family = "${var.DEPLOY_TAG}-p2p-bootstrap-${count.index + 1}"
requires_compatibilities = ["FARGATE"]
network_mode = "awsvpc"
cpu = "2048"
Expand All @@ -104,7 +104,7 @@ resource "aws_ecs_task_definition" "aztec-bootstrap" {
container_definitions = <<DEFINITIONS
[
{
"name": "${var.DEPLOY_TAG}-aztec-bootstrap-${count.index + 1}",
"name": "${var.DEPLOY_TAG}-p2p-bootstrap-${count.index + 1}",
"image": "${var.DOCKERHUB_ACCOUNT}/aztec-sandbox:${var.DEPLOY_TAG}",
"essential": true,
"command": ["start"],
Expand Down Expand Up @@ -154,7 +154,7 @@ resource "aws_ecs_task_definition" "aztec-bootstrap" {
"logConfiguration": {
"logDriver": "awslogs",
"options": {
"awslogs-group": "/fargate/service/${var.DEPLOY_TAG}/aztec-bootstrap-${count.index + 1}",
"awslogs-group": "/fargate/service/${var.DEPLOY_TAG}/p2p-bootstrap-${count.index + 1}",
"awslogs-region": "eu-west-2",
"awslogs-stream-prefix": "ecs"
}
Expand All @@ -164,9 +164,9 @@ resource "aws_ecs_task_definition" "aztec-bootstrap" {
DEFINITIONS
}

resource "aws_ecs_service" "aztec-bootstrap" {
resource "aws_ecs_service" "p2p-bootstrap" {
count = local.bootnode_count
name = "${var.DEPLOY_TAG}-aztec-bootstrap-${count.index + 1}"
name = "${var.DEPLOY_TAG}-p2p-bootstrap-${count.index + 1}"
cluster = data.terraform_remote_state.setup_iac.outputs.ecs_cluster_id
launch_type = "FARGATE"
desired_count = 1
Expand All @@ -183,23 +183,23 @@ resource "aws_ecs_service" "aztec-bootstrap" {
}

service_registries {
registry_arn = aws_service_discovery_service.aztec-bootstrap[count.index].arn
container_name = "${var.DEPLOY_TAG}-aztec-bootstrap-${count.index + 1}"
registry_arn = aws_service_discovery_service.p2p-bootstrap[count.index].arn
container_name = "${var.DEPLOY_TAG}-p2p-bootstrap-${count.index + 1}"
container_port = 80
}

load_balancer {
target_group_arn = aws_lb_target_group.aztec-bootstrap-target-group[count.index].id
container_name = "${var.DEPLOY_TAG}-aztec-bootstrap-${count.index + 1}"
target_group_arn = aws_lb_target_group.p2p-bootstrap-target-group[count.index].id
container_name = "${var.DEPLOY_TAG}-p2p-bootstrap-${count.index + 1}"
container_port = var.BOOTNODE_LISTEN_PORT + count.index
}

task_definition = aws_ecs_task_definition.aztec-bootstrap[count.index].family
task_definition = aws_ecs_task_definition.p2p-bootstrap[count.index].family
}

resource "aws_lb_target_group" "aztec-bootstrap-target-group" {
resource "aws_lb_target_group" "p2p-bootstrap-target-group" {
count = local.bootnode_count
name = "aztec-bootstrap-${count.index + 1}-target-group"
name = "p2p-bootstrap-${count.index + 1}-target-group"
port = var.BOOTNODE_LISTEN_PORT + count.index
protocol = "TCP"
target_type = "ip"
Expand All @@ -224,18 +224,18 @@ resource "aws_security_group_rule" "allow-bootstrap-tcp" {
security_group_id = data.terraform_remote_state.aztec-network_iac.outputs.p2p_security_group_id
}

resource "aws_lb_listener" "aztec-bootstrap-tcp-listener" {
resource "aws_lb_listener" "p2p-bootstrap-tcp-listener" {
count = local.bootnode_count
load_balancer_arn = data.terraform_remote_state.aztec-network_iac.outputs.nlb_arn
port = var.BOOTNODE_LISTEN_PORT + count.index
protocol = "TCP"

tags = {
name = "aztec-bootstrap-${count.index}-target-group"
name = "p2p-bootstrap-${count.index}-target-group"
}

default_action {
type = "forward"
target_group_arn = aws_lb_target_group.aztec-bootstrap-target-group[count.index].arn
target_group_arn = aws_lb_target_group.p2p-bootstrap-target-group[count.index].arn
}
}