-
-
Notifications
You must be signed in to change notification settings - Fork 4.2k
/
relocation.py
705 lines (622 loc) · 22.7 KB
/
relocation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
from __future__ import annotations
import logging
from collections.abc import Generator
from contextlib import contextmanager
from enum import Enum, unique
from functools import lru_cache
from string import Template
from typing import Any
from uuid import UUID
from django.utils import timezone
from sentry import options
from sentry.backup.dependencies import dependencies, get_model_name, sorted_dependencies
from sentry.backup.helpers import Printer
from sentry.backup.scopes import RelocationScope
from sentry.http import get_server_hostname
from sentry.models.files.utils import get_relocation_storage
from sentry.models.relocation import Relocation, RelocationFile
from sentry.users.services.user.service import user_service
from sentry.utils.email.message_builder import MessageBuilder as MessageBuilder
logger = logging.getLogger("sentry.relocation.tasks")
# Relocation tasks are always performed in sequential order. We can leverage this to check for any
# weird out-of-order executions.
@unique
class OrderedTask(Enum):
# Note: the numerical values should always be in execution order (ie, the order the tasks should
# be completed in). It is safe to edit the numbers assigned to any given task, as we only store
# tasks in the database by name.
NONE = 0
UPLOADING_START = 1
UPLOADING_COMPLETE = 2
PREPROCESSING_SCAN = 3
PREPROCESSING_TRANSFER = 4
PREPROCESSING_BASELINE_CONFIG = 5
PREPROCESSING_COLLIDING_USERS = 6
PREPROCESSING_COMPLETE = 7
VALIDATING_START = 8
VALIDATING_POLL = 9
VALIDATING_COMPLETE = 10
IMPORTING = 11
POSTPROCESSING = 12
NOTIFYING_UNHIDE = 13
NOTIFYING_USERS = 14
NOTIFYING_OWNER = 15
COMPLETED = 16
# Match each `OrderedTask` to the `Relocation.Step` it is part of.
TASK_TO_STEP: dict[OrderedTask, Relocation.Step] = {
OrderedTask.NONE: Relocation.Step.UNKNOWN,
OrderedTask.UPLOADING_START: Relocation.Step.UPLOADING,
OrderedTask.UPLOADING_COMPLETE: Relocation.Step.UPLOADING,
OrderedTask.PREPROCESSING_SCAN: Relocation.Step.PREPROCESSING,
OrderedTask.PREPROCESSING_TRANSFER: Relocation.Step.PREPROCESSING,
OrderedTask.PREPROCESSING_BASELINE_CONFIG: Relocation.Step.PREPROCESSING,
OrderedTask.PREPROCESSING_COLLIDING_USERS: Relocation.Step.PREPROCESSING,
OrderedTask.PREPROCESSING_COMPLETE: Relocation.Step.PREPROCESSING,
OrderedTask.VALIDATING_START: Relocation.Step.VALIDATING,
OrderedTask.VALIDATING_POLL: Relocation.Step.VALIDATING,
OrderedTask.VALIDATING_COMPLETE: Relocation.Step.VALIDATING,
OrderedTask.IMPORTING: Relocation.Step.IMPORTING,
OrderedTask.POSTPROCESSING: Relocation.Step.POSTPROCESSING,
OrderedTask.NOTIFYING_UNHIDE: Relocation.Step.NOTIFYING,
OrderedTask.NOTIFYING_USERS: Relocation.Step.NOTIFYING,
OrderedTask.NOTIFYING_OWNER: Relocation.Step.NOTIFYING,
OrderedTask.COMPLETED: Relocation.Step.COMPLETED,
}
assert set(OrderedTask._member_map_.keys()) == {k.name for k in TASK_TO_STEP.keys()}
# The file type for a relocation export tarball of any kind.
RELOCATION_FILE_TYPE = "relocation.file"
# Relocation input files are uploaded as tarballs, and chunked and stored using the normal
# `File`/`AbstractFile` mechanism, which has a hard limit of 2GiB, because we need to represent the
# offset into it as a 32-bit int. This means that the largest tarball we are able to import at this
# time is 2GiB. When validating this tarball, we will need to make a "composite object" from the
# uploaded blobs in Google Cloud Storage, which has a limit of 32 components. Thus, we get our blob
# size of the maximum overall file size (2GiB) divided by the maximum number of blobs (32): 65536MiB
# per blob.
#
# Note that the actual production file size limit, set by uwsgi, is currently 209715200 bytes, or
# ~200MB, so we should never see more than ~4 blobs in practice.
RELOCATION_BLOB_SIZE = int((2**31) / 32)
# Create the relevant directories: a `/workspace/in` directory containing the inputs that will
# be imported, a `/workspace/out` directory for exports that will be generated, and
# `/workspace/findings` for findings.
#
# TODO(getsentry/team-ospo#190): Make `get-self-hosted-repo` pull a pinned version, not
# mainline.
#
# TODO(getsentry/team-ospo#216): Use script in self-hosted to completely flush db instead of
# using truncation tables.
CLOUDBUILD_YAML_TEMPLATE = Template(
"""
steps:
##############################
### Setup steps
##############################
- name: "gcr.io/cloud-builders/gsutil"
id: copy-inputs-being-validated
waitFor: ["-"]
args: ["cp", "-r", "$bucket_root/runs/$uuid/in", "."]
timeout: 600s
- name: "gcr.io/cloud-builders/docker"
id: create-working-dirs
waitFor: ["-"]
entrypoint: "bash"
args:
- "-e"
- "-c"
- |
mkdir /workspace/out && chmod 777 /workspace/out
mkdir /workspace/findings && chmod 777 /workspace/findings
echo '[]' > /workspace/findings/null.json
timeout: 15s
- name: "gcr.io/cloud-builders/docker"
id: get-self-hosted-repo
waitFor: ["-"]
entrypoint: "bash"
args:
- "-e"
- "-c"
- |
mkdir self-hosted && cd self-hosted
curl -L "https://github.com/getsentry/self-hosted/archive/$self_hosted_version.tar.gz" | tar xzf - --strip-components=1
echo '{"version": "3.4", "networks":{"default":{"external":{"name":"cloudbuild"}}}}' > docker-compose.override.yml
timeout: 120s
- name: "gcr.io/cloud-builders/docker"
id: run-install-script
waitFor:
- get-self-hosted-repo
entrypoint: "bash"
dir_: self-hosted
args:
- "-e"
- "-c"
- |
./install.sh --skip-commit-check --skip-user-creation
timeout: 600s
- name: "gcr.io/cloud-builders/docker"
id: instance-ready
waitFor:
- run-install-script
args:
$docker_compose_cmd
- "up"
- "-d"
timeout: 900s
- name: "gcr.io/cloud-builders/docker"
id: clear-database
waitFor:
- instance-ready
args:
$docker_compose_cmd
- "exec"
- "-T"
- "postgres"
- "psql"
- "-U"
- "postgres"
- "-c"
- "TRUNCATE $truncate_tables RESTART IDENTITY CASCADE;"
timeout: 30s
##############################
### Validation steps
##############################
$validation_steps
artifacts:
objects:
location: "$bucket_root/runs/$uuid/findings/"
paths: ["/workspace/findings/**"]
timeout: 3600s
options:
machineType: "N1_HIGHCPU_32"
env:
- "REPORT_SELF_HOSTED_ISSUES=0"
tags: ["cloud-builders-community"]
"""
)
IMPORT_VALIDATION_STEP_TEMPLATE = Template(
"""
- name: "gcr.io/cloud-builders/docker"
id: import-$kind
waitFor:
- copy-inputs-being-validated
- create-working-dirs
- clear-database
$wait_for
args:
$docker_compose_cmd
$docker_compose_run
- "-v"
- "/workspace/in:/in"
- "-v"
- "/workspace/findings:/findings"
- "web"
- "import"
- "$scope"
- "/in/$tarfile"
- "--decrypt-with-gcp-kms"
- "/in/kms-config.json"
- "--findings-file"
- "/findings/import-$jsonfile"
$args
timeout: $timeout
"""
)
EXPORT_VALIDATION_STEP_TEMPLATE = Template(
"""
- name: "gcr.io/cloud-builders/docker"
id: export-$kind
waitFor:
- import-$kind
$wait_for
args:
$docker_compose_cmd
$docker_compose_run
- "-v"
- "/workspace/in:/in"
- "-v"
- "/workspace/out:/out"
- "-v"
- "/workspace/findings:/findings"
- "-e"
- "SENTRY_LOG_LEVEL=CRITICAL"
- "web"
- "export"
- "$scope"
- "/out/$tarfile"
- "--encrypt-with-gcp-kms"
- "/in/kms-config.json"
- "--findings-file"
- "/findings/export-$jsonfile"
$args
timeout: $timeout
"""
)
COPY_OUT_DIR_TEMPLATE = Template(
"""
- name: 'gcr.io/cloud-builders/gsutil'
id: copy-out-dir
waitFor:
$wait_for
args:
- 'cp'
- '-r'
- '/workspace/out'
- '$bucket_root/runs/$uuid/out'
timeout: 30s
"""
)
COMPARE_VALIDATION_STEP_TEMPLATE = Template(
"""
- name: "gcr.io/cloud-builders/docker"
id: compare-$kind
waitFor:
- export-$kind
$wait_for
args:
$docker_compose_cmd
$docker_compose_run
- "-v"
- "/workspace/in:/in"
- "-v"
- "/workspace/out:/out"
- "-v"
- "/workspace/findings:/findings"
- "web"
- "backup"
- "compare"
- "/in/$tarfile"
- "/out/$tarfile"
- "--decrypt-left-with-gcp-kms"
- "/in/kms-config.json"
- "--decrypt-right-with-gcp-kms"
- "/in/kms-config.json"
- "--findings-file"
- "/findings/compare-$jsonfile"
$args
timeout: $timeout
"""
)
# A custom logger that roughly matches the parts of the `click.echo` interface that the
# `import_*` methods rely on.
class LoggingPrinter(Printer):
def __init__(self, uuid: UUID):
self.uuid = uuid
super().__init__()
def echo(
self,
text: str,
*,
err: bool = False,
color: bool | None = None,
) -> None:
if err:
logger.error(
"Import failed: %s",
text,
extra={"uuid": str(self.uuid), "task": OrderedTask.IMPORTING.name},
)
else:
logger.info(
"Import info: %s",
text,
extra={"uuid": str(self.uuid), "task": OrderedTask.IMPORTING.name},
)
def send_relocation_update_email(
relocation: Relocation, email_kind: Relocation.EmailKind, args: dict[str, Any]
) -> None:
name = str(email_kind.name)
name_lower = name.lower()
msg = MessageBuilder(
subject=f"{options.get('mail.subject-prefix')} Your Relocation has {name.capitalize()}",
template=f"sentry/emails/relocation_{name_lower}.txt",
html_template=f"sentry/emails/relocation_{name_lower}.html",
type=f"relocation.{name_lower}",
context={"domain": get_server_hostname(), "datetime": timezone.now(), **args},
)
email_to = []
owner = user_service.get_user(user_id=relocation.owner_id)
if owner is not None:
email_to.append(owner.email)
if relocation.owner_id != relocation.creator_id:
creator = user_service.get_user(user_id=relocation.creator_id)
if creator is not None:
email_to.append(creator.email)
msg.send_async(to=email_to)
relocation.latest_notified = email_kind.value
relocation.save()
def start_relocation_task(
uuid: UUID, task: OrderedTask, allowed_task_attempts: int
) -> tuple[Relocation | None, int]:
"""
All tasks for relocation are done sequentially, and take the UUID of the `Relocation` model as
the input. We can leverage this information to do some common pre-task setup.
Returns a tuple of relocation model and the number of attempts remaining for this task.
"""
logger_data = {"uuid": str(uuid)}
try:
relocation: Relocation = Relocation.objects.get(uuid=uuid)
except Relocation.DoesNotExist:
logger.exception("Could not locate Relocation model by UUID: %s", uuid)
return (None, 0)
if relocation.status not in {
Relocation.Status.IN_PROGRESS.value,
Relocation.Status.PAUSE.value,
}:
logger.warning(
"Relocation has already completed as `%s`",
Relocation.Status(relocation.status),
extra=logger_data,
)
return (None, 0)
try:
prev_task_name = "" if task.value == 1 else OrderedTask(task.value - 1).name
except Exception:
logger.exception("Attempted to execute unknown relocation task", extra=logger_data)
fail_relocation(relocation, OrderedTask.NONE)
return (None, 0)
logger_data["task"] = task.name
if relocation.latest_task not in {prev_task_name, task.name}:
logger.error(
"Task %s tried to follow %s which is the wrong order",
task.name,
relocation.latest_task,
extra=logger_data,
)
fail_relocation(relocation, task)
return (None, 0)
if relocation.latest_task == task.name:
# It is possible for a task to have been scheduled even when all of it's attempted have been
# exhausted due to some tasks using `acks_late`, causing them to be retried in the event of
# a worker-wide SIGKILL/TERM/QUIT. This check catches such scenarios on the retry, and
# gracefully marks the task as failed before exiting.
if relocation.latest_task_attempts >= allowed_task_attempts:
logger.error(
"Task %s has exhausted all of its attempts",
task.name,
extra=logger_data,
)
fail_relocation(relocation, task)
return (None, 0)
relocation.latest_task_attempts += 1
else:
relocation.latest_task = task.name
relocation.latest_task_attempts = 1
step = TASK_TO_STEP[task]
is_new_step = relocation.step + 1 == step.value
at_scheduled_cancel = is_new_step and relocation.scheduled_cancel_at_step == step.value
if at_scheduled_cancel:
logger.info("Task aborted due to relocation cancellation request", extra=logger_data)
relocation.step = step.value
relocation.status = Relocation.Status.FAILURE.value
relocation.scheduled_pause_at_step = None
relocation.scheduled_cancel_at_step = None
relocation.failure_reason = "This relocation was cancelled by an administrator."
relocation.save()
return (None, 0)
# TODO(getsentry/team-ospo#216): Add an option like 'relocation:autopause-at-steps', which will
# be an array of steps that we want relocations to automatically pause at. Will be useful once
# we have self-serve relocations, and want a means by which to check their validity (bugfixes,
# etc).
at_scheduled_pause = is_new_step and relocation.scheduled_pause_at_step == step.value
if relocation.status == Relocation.Status.PAUSE.value or at_scheduled_pause:
logger.info("Task aborted due to relocation pause", extra=logger_data)
# Pause the relocation. We will not be able to pause at this step again once we restart.
relocation.step = step.value
relocation.status = Relocation.Status.PAUSE.value
relocation.scheduled_pause_at_step = None
relocation.save()
return (None, 0)
relocation.step = step.value
relocation.save()
logger.info("Task started", extra=logger_data)
return (relocation, allowed_task_attempts - relocation.latest_task_attempts)
def fail_relocation(relocation: Relocation, task: OrderedTask, reason: str = "") -> None:
"""
Helper function that conveniently fails a relocation celery task in such a way that the failure
reason is recorded for the user and no further retries occur. It should be used like:
>>> relocation = Relocation.objects.get(...)
>>> if failure_condition:
>>> fail_relocation(relocation, "Some user-friendly reason why this failed.")
>>> return # Always exit the task immediately upon failure
This function is ideal for non-transient failures, where we know there is no need to retry
because the result won't change, like invalid input data or conclusive validation results. For
transient failures where retrying at a later time may be useful, use
`retry_task_or_fail_relocation` instead.
"""
# Another nested exception handler could have already failed this relocation - in this case, do
# nothing.
if relocation.status == Relocation.Status.FAILURE.value:
return
if reason:
relocation.failure_reason = reason
relocation.status = Relocation.Status.FAILURE.value
relocation.save()
logger.info(
"Task failed", extra={"uuid": str(relocation.uuid), "task": task.name, "reason": reason}
)
send_relocation_update_email(
relocation,
Relocation.EmailKind.FAILED,
{
"uuid": str(relocation.uuid),
"reason": reason,
},
)
@contextmanager
def retry_task_or_fail_relocation(
relocation: Relocation, task: OrderedTask, attempts_left: int, reason: str = ""
) -> Generator[None]:
"""
Catches all exceptions, and does one of two things: calls into `fail_relocation` if there are no
retry attempts forthcoming, or simply bubbles them up (thereby triggering a celery retry) if
there are.
This function is ideal for transient failures, like networked service lag, where retrying at a
later time might yield a different result. For non-transient failures, use `fail_relocation`
instead.
"""
logger_data = {"uuid": str(relocation.uuid), "task": task.name, "attempts_left": attempts_left}
try:
yield
except Exception:
# If this is the last attempt, fail in the manner requested before reraising the exception.
# This ensures that the database entry for this `Relocation` correctly notes it as a
# `FAILURE`.
if attempts_left == 0:
fail_relocation(relocation, task, reason)
else:
logger_data["reason"] = reason
logger.info("Task retried", extra=logger_data)
raise
else:
logger.info("Task finished", extra=logger_data)
def make_cloudbuild_step_args(indent: int, args: list[str]) -> str:
return f"\n{' ' * indent}".join([f'- "{arg}"' for arg in args])
# The set of arguments to invoke a "docker compose" in a cloudbuild step is tedious and repetitive -
# better to just handle it here.
@lru_cache(maxsize=1)
def get_docker_compose_cmd():
return make_cloudbuild_step_args(
3,
[
"compose",
"-f",
"/workspace/self-hosted/docker-compose.yml",
"-f",
"/workspace/self-hosted/docker-compose.override.yml",
],
)
# The set of arguments to invoke a "docker compose run" in a cloudbuild step is tedious and
# repetitive - better to just handle it here.
@lru_cache(maxsize=1)
def get_docker_compose_run():
return make_cloudbuild_step_args(
3,
[
"run",
"--rm",
"-T",
],
)
@lru_cache(maxsize=1)
def get_relocations_bucket_name():
"""
When using the local FileSystemStorage (ie, in tests), we use a contrived bucket name, since
this is really just an alias for a bespoke local directory in that case.
"""
storage = get_relocation_storage()
# Specialize for GCS...
if hasattr(storage, "bucket_name"):
return f"{storage.bucket_name}"
# ...and the local filesystem, when testing.
return "default"
def create_cloudbuild_yaml(relocation: Relocation) -> bytes:
bucket_root = f"gs://{get_relocations_bucket_name()}"
filter_org_slugs_args = ["--filter-org-slugs", ",".join(relocation.want_org_slugs)]
validation_steps = [
create_cloudbuild_validation_step(
id="import-baseline-config",
step=IMPORT_VALIDATION_STEP_TEMPLATE,
scope="config",
timeout=600,
wait_for=[],
kind=RelocationFile.Kind.BASELINE_CONFIG_VALIDATION_DATA,
args=["--overwrite-configs"],
),
create_cloudbuild_validation_step(
id="import-colliding-users",
step=IMPORT_VALIDATION_STEP_TEMPLATE,
scope="users",
timeout=900,
wait_for=["import-baseline-config"],
kind=RelocationFile.Kind.COLLIDING_USERS_VALIDATION_DATA,
args=["--filter-usernames-file", "/in/filter-usernames.txt"],
),
create_cloudbuild_validation_step(
id="import-raw-relocation-data",
step=IMPORT_VALIDATION_STEP_TEMPLATE,
scope="organizations",
timeout=2400,
wait_for=["import-colliding-users"],
kind=RelocationFile.Kind.RAW_USER_DATA,
args=filter_org_slugs_args,
),
create_cloudbuild_validation_step(
id="export-baseline-config",
step=EXPORT_VALIDATION_STEP_TEMPLATE,
scope="config",
timeout=600,
wait_for=["import-raw-relocation-data"],
kind=RelocationFile.Kind.BASELINE_CONFIG_VALIDATION_DATA,
args=[],
),
create_cloudbuild_validation_step(
id="export-colliding-users",
step=EXPORT_VALIDATION_STEP_TEMPLATE,
scope="users",
timeout=600,
wait_for=["export-baseline-config"],
kind=RelocationFile.Kind.COLLIDING_USERS_VALIDATION_DATA,
args=["--filter-usernames-file", "/in/filter-usernames.txt"],
),
COPY_OUT_DIR_TEMPLATE.substitute(
bucket_root=bucket_root,
uuid=relocation.uuid,
wait_for=["export-colliding-users"],
),
create_cloudbuild_validation_step(
id="compare-baseline-config",
step=COMPARE_VALIDATION_STEP_TEMPLATE,
scope="config",
timeout=150,
wait_for=["copy-out-dir"],
kind=RelocationFile.Kind.BASELINE_CONFIG_VALIDATION_DATA,
args=[],
),
create_cloudbuild_validation_step(
id="compare-colliding-users",
step=COMPARE_VALIDATION_STEP_TEMPLATE,
scope="users",
timeout=150,
wait_for=["compare-baseline-config"],
kind=RelocationFile.Kind.COLLIDING_USERS_VALIDATION_DATA,
args=[],
),
]
deps = dependencies()
truncate_tables = [
deps[get_model_name(m)].table_name
for m in sorted_dependencies()
if deps[get_model_name(m)].relocation_scope != RelocationScope.Excluded
]
return CLOUDBUILD_YAML_TEMPLATE.substitute(
docker_compose_cmd=get_docker_compose_cmd(),
bucket_root=bucket_root,
self_hosted_version="master",
truncate_tables=",".join(truncate_tables),
uuid=relocation.uuid,
validation_steps="".join(validation_steps),
).encode("utf-8")
def create_cloudbuild_validation_step(
id: str,
step: Template,
scope: str,
wait_for: list[str],
kind: RelocationFile.Kind,
timeout: int,
args: list[str],
) -> str:
return step.substitute(
args=make_cloudbuild_step_args(3, args),
docker_compose_cmd=get_docker_compose_cmd(),
docker_compose_run=get_docker_compose_run(),
jsonfile=kind.to_filename("json"),
kind=str(kind),
scope=scope,
tarfile=kind.to_filename("tar"),
timeout=str(timeout) + "s",
wait_for=make_cloudbuild_step_args(3, wait_for),
)
def uuid_to_identifier(uuid: UUID) -> int:
"""
Take a UUID object and generated a unique-enough 64-bit identifier from it's final 64 bits.
"""
return uuid.int & ((1 << 63) - 1)