forked from AztecProtocol/aztec-packages
-
Notifications
You must be signed in to change notification settings - Fork 0
/
docker_fast.sh
executable file
·79 lines (68 loc) · 2.81 KB
/
docker_fast.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
#!/usr/bin/env bash
# TODO eventually rename this docker.sh when we've moved to it entirely
set -eux
function start_minio() {
if nc -z 127.0.0.1 12000 2>/dev/null >/dev/null ; then
# Already started
return
fi
docker run -d -p 12000:9000 -p 12001:12001 -v minio-data:/data \
quay.io/minio/minio server /data --console-address ":12001"
# Make our cache bucket
AWS_ACCESS_KEY_ID="minioadmin" AWS_SECRET_ACCESS_KEY="minioadmin" aws --endpoint-url http://localhost:12000 s3 mb s3://aztec-ci-artifacts 2>/dev/null || true
}
S3_BUILD_CACHE_UPLOAD=${S3_BUILD_CACHE_UPLOAD:-false}
S3_BUILD_CACHE_MINIO_URL="http://$(hostname -I | awk '{print $1}'):12000"
# Start local file server for a quicker cache layer
start_minio
if ! git diff-index --quiet HEAD --; then
echo "Warning: You have unstaged changes. Disabling S3 caching and local MinIO caching to avoid polluting cache (which uses Git data)." >&2
S3_BUILD_CACHE_UPLOAD=false
S3_BUILD_CACHE_DOWNLOAD=false
S3_BUILD_CACHE_MINIO_URL=""
echo "Fatal: For now, this is a fatal error as it would defeat the purpose of 'fast'." >&2
exit 1
elif [ ! -z "${AWS_ACCESS_KEY_ID:-}" ] ; then
S3_BUILD_CACHE_DOWNLOAD=true
elif [ -f ~/.aws/credentials ]; then
# Retrieve credentials if available in AWS config
AWS_ACCESS_KEY_ID=$(aws configure get default.aws_access_key_id)
AWS_SECRET_ACCESS_KEY=$(aws configure get default.aws_secret_access_key)
S3_BUILD_CACHE_DOWNLOAD=true
else
S3_BUILD_CACHE_UPLOAD=false
S3_BUILD_CACHE_DOWNLOAD=false
fi
TMP=$(mktemp -d)
function on_exit() {
rm -rf "$TMP"
}
trap on_exit EXIT
# Save each secret environment variable into a separate file in $TMP directory
echo "${AWS_ACCESS_KEY_ID:-}" > "$TMP/aws_access_key_id.txt"
echo "${AWS_SECRET_ACCESS_KEY:-}" > "$TMP/aws_secret_access_key.txt"
echo "${S3_BUILD_CACHE_MINIO_URL:-}" > "$TMP/s3_build_cache_minio_url.txt"
echo "${S3_BUILD_CACHE_UPLOAD:-}" > "$TMP/s3_build_cache_upload.txt"
echo "${S3_BUILD_CACHE_DOWNLOAD:-}" > "$TMP/s3_build_cache_download.txt"
cd $(git rev-parse --show-toplevel)
PROJECTS=(
barretenberg
build-system
noir
l1-contracts
avm-transpiler
noir-projects
yarn-project
)
for project in "${PROJECTS[@]}"; do
# Archive Git-tracked files per project into a tar.gz file
git archive --format=tar.gz -o "$TMP/$project.tar.gz" HEAD $project
done
# Run Docker build with secrets in the folder with our archive
DOCKER_BUILDKIT=1 docker build -t aztecprotocol/aztec -f Dockerfile.fast --progress=plain \
--secret id=aws_access_key_id,src=$TMP/aws_access_key_id.txt \
--secret id=aws_secret_access_key,src=$TMP/aws_secret_access_key.txt \
--secret id=s3_build_cache_minio_url,src=$TMP/s3_build_cache_minio_url.txt \
--secret id=s3_build_cache_upload,src=$TMP/s3_build_cache_upload.txt \
--secret id=s3_build_cache_download,src=$TMP/s3_build_cache_download.txt \
"$TMP"