From d5f4a7470b1624864cdd5b5aff957502e0dc6f1e Mon Sep 17 00:00:00 2001 From: Jonathan Lebon Date: Mon, 17 Jun 2019 17:25:48 -0400 Subject: [PATCH] pipeline: stop using /srv as workdir I think we should stop using `/srv` as a workdir entirely and just always build in the workspace. The core issue here is that (1) we want to be able to have concurrent builds, and (2) a workdir can't be easily shared today. This also great simplifies the devel vs prod logic which had some funky conditionals around this. So then, how can developers without S3 creds actually *access* built artifacts? We simply archive them as part of the build. This is in line also with #31, where we'll probably be archiving things anyway. Finally, how *can* we use the PVC as cache in a safe way shareable across all the streams? I see two options offhand: 1. as a local RPM mirror: add flags to `cosa fetch` (and maybe `rpm-ostree`) to read & write RPMs in `/srv`, hold a lock to regen metadata 2. as a pkgcache repo: similarly to the above, but also doing the import, so it's just a pkgcache repo; this would probably require teaching rpm-ostree about this, or `cosa fetch` could just blindly import every ref --- Jenkinsfile | 37 +++++-------------------------------- utils.groovy | 5 ----- 2 files changed, 5 insertions(+), 37 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index cafd908d0..5ae8ac3be 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -54,15 +54,6 @@ if (prod) { podTemplate(cloud: 'openshift', label: 'coreos-assembler', yaml: pod, defaultContainer: 'jnlp') { node('coreos-assembler') { container('coreos-assembler') { - // Only use the PVC for prod caching. For devel pipelines, we just - // always refetch from scratch: we don't want to allocate cached data - // for pipelines which may only run once. - if (prod) { - utils.workdir = "/srv" - } else { - utils.workdir = env.WORKSPACE - } - // this is defined IFF we *should* and we *can* upload to S3 def s3_builddir @@ -80,13 +71,6 @@ podTemplate(cloud: 'openshift', label: 'coreos-assembler', yaml: pod, defaultCon } } - // Special case for devel pipelines not running in our project and not - // uploading to S3; in that case, the only way to make the builds - // accessible at all is to have them in the PVC. - if (!prod && !prod_jenkins && !s3_builddir) { - utils.workdir = "/srv" - } - stage('Init') { def ref = params.STREAM @@ -162,22 +146,6 @@ podTemplate(cloud: 'openshift', label: 'coreos-assembler', yaml: pod, defaultCon """) } - stage('Prune Cache') { - utils.shwrap(""" - coreos-assembler prune --keep=1 - """) - - // If the cache img is larger than e.g. 8G, then nuke it. Otherwise - // it'll just keep growing and we'll hit ENOSPC. - utils.shwrap(""" - if [ \$(du cache/cache.qcow2 | cut -f1) -gt \$((1024*1024*8)) ]; then - rm -vf cache/cache.qcow2 - qemu-img create -f qcow2 cache/cache.qcow2 10G - LIBGUESTFS_BACKEND=direct virt-format --filesystem=xfs -a cache/cache.qcow2 - fi - """) - } - stage('Archive') { // First, compress image artifacts @@ -191,6 +159,11 @@ podTemplate(cloud: 'openshift', label: 'coreos-assembler', yaml: pod, defaultCon utils.shwrap(""" coreos-assembler buildupload s3 --acl=public-read ${s3_builddir} """) + } else if (!prod) { + // In devel mode without an S3 server, just archive in Jenkins + // itself. Otherwise there'd be no other way to retrieve the + // artifacts. + archiveArtifacts('builds/latest/*') } // XXX: For now, we keep uploading the latest build to the artifact diff --git a/utils.groovy b/utils.groovy index 2b252f4ad..ba5e02566 100644 --- a/utils.groovy +++ b/utils.groovy @@ -1,9 +1,6 @@ -workdir = env.WORKSPACE - def shwrap(cmds) { sh """ set -xeuo pipefail - cd ${workdir} ${cmds} """ } @@ -11,7 +8,6 @@ def shwrap(cmds) { def shwrap_capture(cmds) { return sh(returnStdout: true, script: """ set -euo pipefail - cd ${workdir} ${cmds} """).trim() } @@ -19,7 +15,6 @@ def shwrap_capture(cmds) { def shwrap_rc(cmds) { return sh(returnStatus: true, script: """ set -euo pipefail - cd ${workdir} ${cmds} """) }