-
Notifications
You must be signed in to change notification settings - Fork 33
/
.cirrus.yml
261 lines (237 loc) · 8.95 KB
/
.cirrus.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
---
# Format Ref: https://cirrus-ci.org/guide/writing-tasks/
# Main collection of env. vars to set for all tasks and scripts.
env:
# Actual|intended branch for this run
DEST_BRANCH: "main"
# The default is 'sh' if unspecified
CIRRUS_SHELL: "/bin/bash"
# Location where source repo. will be cloned
CIRRUS_WORKING_DIR: "/var/tmp/aardvark-dns"
# Rust package cache also lives here
CARGO_HOME: "/var/cache/cargo"
# Rust compiler output lives here (see Makefile)
CARGO_TARGET_DIR: "$CIRRUS_WORKING_DIR/targets"
# Testing depends on the latest netavark binary from upstream CI
NETAVARK_BRANCH: "main"
NETAVARK_URL: "https://api.cirrus-ci.com/v1/artifact/github/containers/netavark/success/binary.zip?branch=${NETAVARK_BRANCH}"
# Save a little typing (path relative to $CIRRUS_WORKING_DIR)
SCRIPT_BASE: "./contrib/cirrus"
IMAGE_SUFFIX: "c20241107t210000z-f41f40d13"
FEDORA_NETAVARK_IMAGE: "fedora-netavark-${IMAGE_SUFFIX}"
FEDORA_NETAVARK_AMI: "fedora-netavark-aws-arm64-${IMAGE_SUFFIX}"
EC2_INST_TYPE: "t4g.xlarge"
gcp_credentials: ENCRYPTED[f6a0e4101418bec8180783b208721fc990772817364fed0346f5fd126bf0cfca03738dd8c7fb867944637a1eac7cec37]
aws_credentials: ENCRYPTED[3fab904a98355f84b0bac084f8a50428ff8a27dd2b6a6c42fca77df89010e620a1da3cd246a50b1074e6787c42818080]
build_task:
alias: "build"
# Compiling is very CPU intensive, make it chooch quicker for this task only
gce_instance: &standard_build_gce_x86_64
image_project: "libpod-218412"
zone: "us-central1-c"
disk: 200 # GB, do not set <200 per gcloud warning re: I/O performance
cpu: 8
memory: "8Gb"
image_name: "${FEDORA_NETAVARK_IMAGE}"
cargo_cache: &cargo_cache
folder: "$CARGO_HOME"
fingerprint_script: echo -e "cargo_v3_${DEST_BRANCH}_amd64\n---\n$(<Cargo.lock)\n---\n$(<Cargo.toml)"
reupload_on_changes: true
targets_cache: &targets_cache
folder: "$CARGO_TARGET_DIR"
fingerprint_script: echo -e "targets_v3_${CIRRUS_TAG}${DEST_BRANCH}${CIRRUS_PR}_amd64\n---\n$(<Cargo.lock)\n---\n$(<Cargo.toml)"
reupload_on_changes: true
bin_cache: &bin_cache
# This simply prevents rebuilding bin/aardvark-dns* or every subsequent task.
folder: "$CIRRUS_WORKING_DIR/bin"
fingerprint_key: "bin_v1_${CIRRUS_BUILD_ID}" # Cache only within same build
reupload_on_changes: true
setup_script: &setup "$SCRIPT_BASE/setup.sh $CIRRUS_TASK_NAME"
main_script: &main "$SCRIPT_BASE/runner.sh $CIRRUS_TASK_NAME"
# N/B: This script comes from `main` on the netavark repo
cache_grooming_script: &groom bash "$SCRIPT_BASE/netavark_cache_groom.sh"
upload_caches: [ "cargo", "targets", "bin" ]
build_aarch64_task:
alias: "build_aarch64"
# Compiling is very CPU intensive, make it chooch quicker for this task only
ec2_instance: &standard_build_ec2_aarch64
image: "$FEDORA_NETAVARK_AMI"
type: $EC2_INST_TYPE
region: us-east-1
architecture: arm64 # CAUTION: This has to be "arm64", not aarch64.
cargo_cache: &cargo_cache_aarch64
<<: *cargo_cache
fingerprint_script: echo -e "cargo_v3_${DEST_BRANCH}_aarch64\n---\n$(<Cargo.lock)\n---\n$(<Cargo.toml)"
targets_cache: &targets_cache_aarch64
<<: *targets_cache
fingerprint_script: echo -e "targets_v3_${CIRRUS_TAG}${DEST_BRANCH}${CIRRUS_PR}_aarch64\n---\n$(<Cargo.lock)\n---\n$(<Cargo.toml)"
bin_cache: &bin_cache_aarch64
<<: *bin_cache
fingerprint_key: "cargo_v1_${DEST_BRANCH}_aarch64"
setup_script: *setup
main_script: *main
cache_grooming_script: *groom
upload_caches: [ "cargo", "targets", "bin" ]
# Downstream CI needs the aarch64 binaries from this CI system.
# However, we don't want to confuse architectures.
art_prep_script:
- cd bin
- ls -la
- mv aardvark-dns aardvark-dns.$(uname -m)-unknown-linux-gnu
- mv aardvark-dns.debug aardvark-dns.debug.$(uname -m)-unknown-linux-gnu
- mv aardvark-dns.info aardvark-dns.info.$(uname -m)-unknown-linux-gnu
armbinary_artifacts: # See success_task
path: ./bin/aardvark-dns*
validate_task:
alias: "validate"
depends_on:
- "build"
gce_instance: &standard_gce_x86_64
<<: *standard_build_gce_x86_64
cpu: 2
memory: "4Gb"
# From this point forward, all cache's become read-only for this run.
cargo_cache: &ro_cargo_cache
<<: *cargo_cache
reupload_on_changes: false
targets_cache: &ro_targets_cache
<<: *targets_cache
reupload_on_changes: false
bin_cache: &ro_bin_cache
<<: *bin_cache
reupload_on_changes: false
setup_script: *setup
main_script: *main
validate_aarch64_task:
alias: "validate_aarch64"
depends_on:
- "build_aarch64"
ec2_instance: *standard_build_ec2_aarch64
# From this point forward, all cache's become read-only for this run.
cargo_cache: &ro_cargo_cache_aarch64
<<: *cargo_cache_aarch64
reupload_on_changes: false
targets_cache: &ro_targets_cache_aarch64
<<: *targets_cache_aarch64
reupload_on_changes: false
bin_cache: &ro_bin_cache_aarch64
<<: *bin_cache_aarch64
reupload_on_changes: false
setup_script: *setup
main_script: *main
unit_task:
alias: "unit"
depends_on:
- "build" # Run in parallel with validate to save some time
gce_instance: *standard_gce_x86_64
cargo_cache: *ro_cargo_cache
targets_cache: *ro_targets_cache
bin_cache: *ro_bin_cache
setup_script: *setup
main_script: *main
unit_aarch64_task:
alias: "unit_aarch64"
depends_on:
- "build_aarch64" # Run in parallel with validate to save some time
ec2_instance: *standard_build_ec2_aarch64
cargo_cache: *ro_cargo_cache_aarch64
targets_cache: *ro_targets_cache_aarch64
bin_cache: *ro_bin_cache_aarch64
setup_script: *setup
main_script: *main
integration_task:
alias: "integration"
depends_on:
- "unit"
gce_instance: *standard_gce_x86_64
cargo_cache: *ro_cargo_cache
targets_cache: *ro_targets_cache
bin_cache: *ro_bin_cache
setup_script: *setup
main_script: *main
integration_aarch64_task:
alias: "integration_aarch64"
depends_on:
- "unit_aarch64"
ec2_instance: *standard_build_ec2_aarch64
cargo_cache: *ro_cargo_cache_aarch64
targets_cache: *ro_targets_cache_aarch64
bin_cache: *ro_bin_cache_aarch64
setup_script: *setup
main_script: *main
# This task is critical. It updates the "last-used by" timestamp stored
# in metadata for all VM images. This mechanism functions in tandem with
# an out-of-band pruning operation to remove disused VM images.
meta_task:
alias: meta
name: "VM img. keepalive"
container:
cpu: 2
memory: 2
image: quay.io/libpod/imgts:latest
env:
# Space-separated list of images used by this repository state
IMGNAMES: "${FEDORA_NETAVARK_IMAGE}"
EC2IMGNAMES: "$FEDORA_NETAVARK_AMI"
BUILDID: "${CIRRUS_BUILD_ID}"
REPOREF: "${CIRRUS_REPO_NAME}"
AWSINI: ENCRYPTED[94661de0a481bfa6757f6ef26f896c28578c764f00364a293871b7576337c947304e2dfa35e3819c066057ef3957237f]
GCPJSON: ENCRYPTED[4c8f37db84c8afb3d67932ebbf1f062e5e7e54b64e9f99624d96d828d2b8677624fb1470a9b12c097e06afeb11fb8c4e]
GCPNAME: ENCRYPTED[1d96b7a11a12abe142a2e6f5a97ff6cca2bdcbe73724d560d9a2d339b7323b911c814d814057e19932f9d339e9a4c929]
GCPPROJECT: libpod-218412
clone_script: &noop mkdir -p $CIRRUS_WORKING_DIR # source not needed
script: /usr/local/bin/entrypoint.sh
msrv_build_task:
alias: msrv_build
depends_on:
- "build"
gce_instance: *standard_gce_x86_64
container:
cpu: 2
memory: 2
# When bumping the image always remember to update the README MSRV as well.
image: quay.io/libpod/nv-rust:1.76
script:
- make build
success_task:
alias: "success"
gce_instance: *standard_gce_x86_64
name: "Total success"
depends_on:
- "build"
- "build_aarch64"
- "validate"
- "validate_aarch64"
- "unit"
- "unit_aarch64"
- "integration"
- "integration_aarch64"
- "meta"
- "msrv_build"
env:
API_URL_BASE: "https://api.cirrus-ci.com/v1/artifact/build/${CIRRUS_BUILD_ID}"
# FAIL task if all expected binary flavors are not present
EXP_BINS: >-
aardvark-dns
aardvark-dns.debug
aardvark-dns.info
aardvark-dns.aarch64-unknown-linux-gnu
aardvark-dns.debug.aarch64-unknown-linux-gnu
aardvark-dns.info.aarch64-unknown-linux-gnu
bin_cache: *ro_bin_cache
clone_script: *noop
# The paths used for uploaded artifacts are relative here and in Cirrus
artifacts_prep_script:
- set -x
- curl --fail --location -o /tmp/armbinary.zip ${API_URL_BASE}/build_aarch64/armbinary.zip
- unzip /tmp/armbinary.zip
- mv bin/* ./
- rm -rf bin
artifacts_test_script: # Other CI systems depend on all files being present
- ls -la
# If there's a missing file, show what it was in the output
- for fn in $EXP_BINS; do [[ -r "$(echo $fn|tee /dev/stderr)" ]] || exit 1; done
# Upload tested binary for consumption downstream
# https://cirrus-ci.org/guide/writing-tasks/#artifacts-instruction
binary_artifacts:
path: ./aardvark-dns*