Skip to content

Commit

Permalink
remove with-helm and associated functions
Browse files Browse the repository at this point in the history
  • Loading branch information
Liam Bennett authored and roboquat committed May 12, 2022
1 parent bbacba4 commit 0a31946
Show file tree
Hide file tree
Showing 2 changed files with 2 additions and 246 deletions.
245 changes: 2 additions & 243 deletions .werft/jobs/build/deploy-to-preview-environment.ts
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,6 @@ export async function deployToPreviewEnvironment(werft: Werft, jobConfig: JobCon
withPayment,
withObservability,
installEELicense,
withHelm,
workspaceFeatureFlags,
dynamicCPULimits,
storage
Expand Down Expand Up @@ -182,41 +181,8 @@ export async function deployToPreviewEnvironment(werft: Werft, jobConfig: JobCon
.finally(() => werft.done(sliceID));
}

werft.phase(phases.PREDEPLOY, "Checking for existing installations...");
// the context namespace is not set at this point
const deploymentKubeconfig = withVM ? PREVIEW_K3S_KUBECONFIG_PATH : CORE_DEV_KUBECONFIG_PATH;
const hasGitpodHelmInstall = exec(`helm --kubeconfig ${deploymentKubeconfig} status ${helmInstallName} -n ${deploymentConfig.namespace}`, { slice: "check for Helm install", dontCheckRc: true }).code === 0;
const hasGitpodInstallerInstall = exec(`kubectl --kubeconfig ${deploymentKubeconfig} get configmap gitpod-app -n ${deploymentConfig.namespace}`, { slice: "check for Installer install", dontCheckRc: true }).code === 0;
werft.log("result of installation checks", `has Helm install: ${hasGitpodHelmInstall}, has Installer install: ${hasGitpodInstallerInstall}`);

if (withHelm) {
werft.log("using Helm", "with-helm was specified.");
// you want helm, but left behind a Gitpod Installer installation, force a clean slate
if (hasGitpodInstallerInstall && !deploymentConfig.cleanSlateDeployment) {
werft.log("warning!", "with-helm was specified, there's an Installer install, but, `with-clean-slate-deployment=false`, forcing to true.");
deploymentConfig.cleanSlateDeployment = true;
}
werft.done(phases.PREDEPLOY);
werft.phase(phases.DEPLOY, "deploying")
await deployToDevWithHelm(werft, jobConfig, deploymentConfig, workspaceFeatureFlags, dynamicCPULimits, storage);
} // scenario: you pushed code to an existing preview environment built with Helm, and didn't with-clean-slate-deployment=true'
else if (hasGitpodHelmInstall && !deploymentConfig.cleanSlateDeployment) {
werft.log("using Helm", "with-helm was not specified, but, a Helm installation exists, and this is not a clean slate deployment.");
werft.log("tip", "Set 'with-clean-slate-deployment=true' if you wish to remove the Helm install and use the Installer.");
werft.done(phases.PREDEPLOY);
werft.phase(phases.DEPLOY, "deploying to dev with Helm");
await deployToDevWithHelm(werft, jobConfig, deploymentConfig, workspaceFeatureFlags, dynamicCPULimits, storage);
} else {
// you get here if
// ...it's a new install with no flag overrides or
// ...it's an existing install and a Helm install doesn't exist or
// ...you have a prexisting Helm install, set 'with-clean-slate-deployment=true', but did not specifiy 'with-helm=true'
// Why? The installer is supposed to be a default so we all dog-food it.
// But, its new, so this may help folks transition with less issues.
werft.done(phases.PREDEPLOY);
werft.phase(phases.DEPLOY, "deploying to dev with Installer");
await deployToDevWithInstaller(werft, jobConfig, deploymentConfig, workspaceFeatureFlags, dynamicCPULimits, storage);
}
werft.phase(phases.DEPLOY, "deploying to dev with Installer");
await deployToDevWithInstaller(werft, jobConfig, deploymentConfig, workspaceFeatureFlags, dynamicCPULimits, storage);
}

/*
Expand Down Expand Up @@ -357,213 +323,6 @@ async function deployToDevWithInstaller(werft: Werft, jobConfig: JobConfig, depl
}
}

/*
* Deploy a preview environment using Helm
*/
async function deployToDevWithHelm(werft: Werft, jobConfig: JobConfig, deploymentConfig: DeploymentConfig, workspaceFeatureFlags: string[], dynamicCPULimits, storage) {
const { version, destname, namespace, domain, monitoringDomain, url } = deploymentConfig;
// find free ports
werft.log("find free ports", "Check for some free ports.");
const [wsdaemonPortMeta, registryNodePortMeta, nodeExporterPort] = await findFreeHostPorts([
{ start: 10000, end: 11000 },
{ start: 30000, end: 31000 },
{ start: 31001, end: 32000 },
], CORE_DEV_KUBECONFIG_PATH, metaEnv({ slice: "find free ports", silent: true }));
werft.log("find free ports",
`wsdaemonPortMeta: ${wsdaemonPortMeta}, registryNodePortMeta: ${registryNodePortMeta}, and nodeExporterPort ${nodeExporterPort}.`);
werft.done("find free ports");

// trigger certificate issuing
werft.log('certificate', "organizing a certificate for the preview environment...");
let namespaceRecreatedResolve = undefined;
let namespaceRecreatedPromise = new Promise((resolve) => {
namespaceRecreatedResolve = resolve;
});

try {
if (deploymentConfig.cleanSlateDeployment) {
// re-create namespace
await cleanStateEnv(metaEnv());
} else {
createNamespace(namespace, CORE_DEV_KUBECONFIG_PATH, metaEnv({ slice: 'prep' }));
}
// Now we want to execute further kubectl operations only in the created namespace
setKubectlContextNamespace(namespace, metaEnv({ slice: 'prep' }));

// trigger certificate issuing
werft.log('certificate', "organizing a certificate for the preview environment...");
await installMetaCertificates(werft, jobConfig.repository.branch, jobConfig.withVM, namespace, CORE_DEV_KUBECONFIG_PATH, 'certificate');
werft.done('certificate');
await addDNSRecord(werft, deploymentConfig.namespace, deploymentConfig.domain, false, CORE_DEV_KUBECONFIG_PATH)
werft.done('prep');
} catch (err) {
werft.fail('prep', err);
}

// core-dev specific section start
werft.log("secret", "copy secret into namespace")
try {
const auth = exec(`printf "%s" "_json_key:$(kubectl --kubeconfig ${CORE_DEV_KUBECONFIG_PATH} get secret ${IMAGE_PULL_SECRET_NAME} --namespace=keys -o yaml \
| yq r - data['.dockerconfigjson'] \
| base64 -d)" | base64 -w 0`, { silent: true }).stdout.trim();
fs.writeFileSync("chart/gcp-sa-registry-auth",
`{
"auths": {
"eu.gcr.io": {
"auth": "${auth}"
},
"europe-docker.pkg.dev": {
"auth": "${auth}"
}
}
}` );
werft.done('secret');
} catch (err) {
werft.fail('secret', err);
}

werft.log("authProviders", "copy authProviders")
try {
exec(`kubectl --kubeconfig ${CORE_DEV_KUBECONFIG_PATH} get secret preview-envs-authproviders --namespace=keys -o yaml \
| yq r - data.authProviders \
| base64 -d -w 0 \
> authProviders`, { slice: "authProviders" });
exec(`yq merge --inplace .werft/jobs/build/helm/values.dev.yaml ./authProviders`, { slice: "authProviders" })
werft.done('authProviders');
} catch (err) {
werft.fail('authProviders', err);
}
// core-dev specific section end


// If observability is enabled, we want to deploy it before installing Gitpod itself.
// The reason behind it is because Gitpod components will start sending traces to a non-existent
// OpenTelemetry-collector otherwise.
werft.log(`observability`, "Running observability static checks.")
werft.log(`observability`, "Installing monitoring-satellite...")
if (deploymentConfig.withObservability) {
try {
const installMonitoringSatellite = new MonitoringSatelliteInstaller({
kubeconfigPath: CORE_DEV_KUBECONFIG_PATH,
branch: jobConfig.observability.branch,
satelliteNamespace: namespace,
clusterName: namespace,
nodeExporterPort: nodeExporterPort,
previewDomain: domain,
stackdriverServiceAccount: STACKDRIVER_SERVICEACCOUNT,
withVM: false,
werft: werft
});
await installMonitoringSatellite.install()
} catch (err) {
werft.fail('observability', err);
}
} else {
exec(`echo '"with-observability" annotation not set, skipping...'`, { slice: `observability` })
exec(`echo 'To deploy monitoring-satellite, please add "/werft with-observability" to your PR description.'`, { slice: `observability` })
}
werft.done('observability');

// deployment config
try {
shell.cd("/workspace/chart");
werft.log('helm', 'installing Gitpod');

const commonFlags = addDeploymentFlags();
installGitpod(commonFlags);

werft.log('helm', 'done');
werft.done('helm');
} catch (err) {
werft.fail('deploy', err);
} finally {
// produce the result independently of Helm succeding, so that in case Helm fails we still have the URL.
exec(`werft log result -d "dev installation" -c github-check-preview-env url ${url}/workspaces`);
}

function installGitpod(commonFlags: string) {
let flags = commonFlags
flags += ` --set components.wsDaemon.servicePort=${wsdaemonPortMeta}`;
flags += ` --set components.registryFacade.ports.registry.servicePort=${registryNodePortMeta}`;

const nodeAffinityValues = getNodeAffinities();

if (storage === "gcp") {
exec(`kubectl --kubeconfig ${CORE_DEV_KUBECONFIG_PATH} get secret gcp-sa-gitpod-dev-deployer -n werft -o yaml | yq d - metadata | yq w - metadata.name remote-storage-gcloud | kubectl --kubeconfig ${CORE_DEV_KUBECONFIG_PATH} apply -f -`);
flags += ` -f ../.werft/jobs/build/helm/values.dev.gcp-storage.yaml`;
}

/* A hash is caclulated from the branch name and a subset of that string is parsed to a number x,
x mod the number of different nodepool-sets defined in the files listed in nodeAffinityValues
is used to generate a pseudo-random number that consistent as long as the branchname persists.
We use it to reduce the number of preview-environments accumulating on a singe nodepool.
*/
const nodepoolIndex = getNodePoolIndex(namespace);

exec(`helm dependencies up`);
exec(`/usr/local/bin/helm3 --kubeconfig ${CORE_DEV_KUBECONFIG_PATH} upgrade --install --timeout 10m -f ../.werft/jobs/build/helm/${nodeAffinityValues[nodepoolIndex]} -f ../.werft/jobs/build/helm/values.dev.yaml ${flags} ${helmInstallName} .`);
}

function addDeploymentFlags() {
let flags = ""
flags += ` --namespace ${namespace}`;
flags += ` --set components.imageBuilder.hostDindData=/mnt/disks/raid0/docker-${namespace}`;
flags += ` --set components.wsDaemon.hostWorkspaceArea=/mnt/disks/raid0/workspaces-${namespace}`;
flags += ` --set version=${version}`;
flags += ` --set hostname=${domain}`;
flags += ` --set devBranch=${destname}`;
workspaceFeatureFlags.forEach((f, i) => {
flags += ` --set components.server.defaultFeatureFlags[${i}]='${f}'`;
});
if (dynamicCPULimits) {
flags += ` -f ../.werft/jobs/build/helm/values.variant.cpuLimits.yaml`;
}
if ((deploymentConfig.analytics || "").startsWith("segment|")) {
flags += ` --set analytics.writer=segment`;
flags += ` --set analytics.segmentKey=${deploymentConfig.analytics!.substring("segment|".length)}`;
} else if (!!deploymentConfig.analytics) {
flags += ` --set analytics.writer=${deploymentConfig.analytics!}`;
}
if (deploymentConfig.withObservability) {
flags += ` -f ../.werft/jobs/build/helm/values.tracing.yaml`;
}
werft.log("helm", "extracting versions");
try {
exec(`docker run --rm eu.gcr.io/gitpod-core-dev/build/versions:${version} cat /versions.yaml | tee versions.yaml`);
} catch (err) {
werft.fail('helm', err);
}
const pathToVersions = `${shell.pwd().toString()}/versions.yaml`;
flags += ` -f ${pathToVersions}`;

if (deploymentConfig.installEELicense) {
// We're adding the license rather late just to prevent accidentially printing it.
// If anyone got ahold of the license not much would be lost, but hey, no need to plaster it on the walls.
flags += ` --set license=${fs.readFileSync('/mnt/secrets/gpsh-coredev/license').toString()}`
}
if (deploymentConfig.withPayment) {
flags += ` -f ../.werft/jobs/build/helm/values.payment.yaml`;
exec(`cp /mnt/secrets/payment-provider-config/providerOptions payment-core-dev-options.json`);
flags += ` --set payment.chargebee.providerOptionsFile=payment-core-dev-options.json`;
exec(`cp /mnt/secrets/payment-webhook-config/license payment-core-dev-webhook.json`);
flags += ` --set components.paymentEndpoint.webhookFile="payment-core-dev-webhook.json"`;
}
return flags;
}

async function cleanStateEnv(shellOpts: ExecOptions) {
await wipeAndRecreateNamespace(helmInstallName, namespace, CORE_DEV_KUBECONFIG_PATH, { ...shellOpts, slice: 'prep' });
// cleanup non-namespace objects
werft.log("predeploy cleanup", "removing old unnamespaced objects - this might take a while");
try {
await deleteNonNamespaceObjects(namespace, destname, CORE_DEV_KUBECONFIG_PATH, { ...shellOpts, slice: 'predeploy cleanup' });
werft.done('predeploy cleanup');
} catch (err) {
werft.fail('predeploy cleanup', err);
}
}
}

/* A hash is caclulated from the branch name and a subset of that string is parsed to a number x,
x mod the number of different nodepool-sets defined in the files listed in nodeAffinityValues
is used to generate a pseudo-random number that consistent as long as the branchname persists.
Expand Down
3 changes: 0 additions & 3 deletions .werft/jobs/build/job-config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ export interface JobConfig {
storage: string;
version: string;
withContrib: boolean
withHelm: boolean
withIntegrationTests: boolean;
withObservability: boolean
withPayment: boolean
Expand Down Expand Up @@ -87,7 +86,6 @@ export function jobConfig(werft: Werft, context: any): JobConfig {
const installEELicense = !("without-ee-license" in buildConfig) || mainBuild;
const withPayment = "with-payment" in buildConfig && !mainBuild;
const withObservability = "with-observability" in buildConfig && !mainBuild;
const withHelm = "with-helm" in buildConfig && !mainBuild;
const repository: Repository = {
owner: context.Repository.owner,
repo: context.Repository.repo,
Expand Down Expand Up @@ -134,7 +132,6 @@ export function jobConfig(werft: Werft, context: any): JobConfig {
storage,
version,
withContrib,
withHelm,
withIntegrationTests,
withObservability,
withPayment,
Expand Down

0 comments on commit 0a31946

Please sign in to comment.