From 456743ce01742051cbad99c307f0bebb9c08ecfd Mon Sep 17 00:00:00 2001 From: Lukas Krejci Date: Wed, 11 Aug 2021 14:07:44 +0200 Subject: [PATCH] feat: Import Devworkspace Che Operator (#925) * Import of DWCO into CO. Co-authored-by: Anatolii Bazko Co-authored-by: Michal Vala --- .ci/cico_updates_openshift.sh | 4 - .ci/oci-devworkspace-happy-path.sh | 1 - .ci/oci-multi-host.sh | 11 +- .ci/oci-single-host.sh | 4 - .github/bin/common.sh | 9 +- .github/bin/minikube/test-olm.sh | 4 + .../test-operator-singlehost-gateway.sh | 4 + .../test-operator-singlehost-native.sh | 4 + .github/workflows/release.yml | 6 +- Dockerfile | 6 - Makefile | 58 +- .../che-operator.clusterserviceversion.yaml | 409 +-- .../che-operator.clusterserviceversion.yaml | 432 +-- .../che-operator.clusterserviceversion.yaml | 10 - .../che-operator.clusterserviceversion.yaml | 10 - config/manager/manager.yaml | 55 +- config/rbac/cluster_role.yaml | 328 +- config/rbac/role.yaml | 59 +- .../devworkspace_flattened_theia-nodejs.yaml | 100 + config/samples/kustomization.yaml | 2 + controllers/che/checluster_controller.go | 9 +- .../checlusterbackup_controller.go | 17 +- .../checlusterrestore_controller.go | 17 +- controllers/devworkspace/README.adoc | 12 + controllers/devworkspace/controller.go | 459 +++ controllers/devworkspace/controller_test.go | 576 ++++ controllers/devworkspace/defaults/defaults.go | 107 + .../devworkspace/solver/che_routing.go | 519 +++ .../devworkspace/solver/che_routing_test.go | 622 ++++ controllers/devworkspace/solver/doc.go | 5 + .../devworkspace/solver/endpoint_exposer.go | 239 ++ controllers/devworkspace/solver/solver.go | 199 ++ .../devworkspace/solver/traefik_config.go | 39 + controllers/devworkspace/sync/sync.go | 208 ++ controllers/devworkspace/sync/sync_test.go | 195 ++ go.mod | 4 +- go.sum | 27 + main.go | 80 +- make-release.sh | 14 +- olm/buildDigestMap.sh | 4 - olm/images.sh | 4 - olm/release-olm-files.sh | 3 - pkg/deploy/defaults.go | 7 - pkg/deploy/dev-workspace/dev_workspace.go | 38 +- .../dev-workspace/dev_workspace_test.go | 10 +- pkg/util/k8s_helpers.go | 19 + vendor/github.com/devfile/api/v2/LICENSE | 277 ++ .../pkg/apis/workspaces/v1alpha2/commands.go | 182 ++ .../v1alpha2/component_container.go | 93 + .../v1alpha2/component_kubernetes_like.go | 45 + .../workspaces/v1alpha2/component_plugin.go | 7 + .../workspaces/v1alpha2/component_volume.go | 19 + .../apis/workspaces/v1alpha2/components.go | 104 + .../pkg/apis/workspaces/v1alpha2/devfile.go | 14 + .../v1alpha2/devworkspace_conversion.go | 4 + .../workspaces/v1alpha2/devworkspace_types.go | 97 + .../devworkspacetemplate_conversion.go | 4 + .../v1alpha2/devworkspacetemplate_spec.go | 79 + .../v1alpha2/devworkspacetemplate_types.go | 31 + .../v2/pkg/apis/workspaces/v1alpha2/doc.go | 6 + .../pkg/apis/workspaces/v1alpha2/endpoint.go | 115 + .../v2/pkg/apis/workspaces/v1alpha2/events.go | 26 + .../workspaces/v1alpha2/import_reference.go | 53 + .../v2/pkg/apis/workspaces/v1alpha2/keyed.go | 51 + .../v1alpha2/keyed_implementations.go | 41 + .../v1alpha2/override_directives.go | 58 + .../pkg/apis/workspaces/v1alpha2/overrides.go | 11 + .../v2/pkg/apis/workspaces/v1alpha2/parent.go | 6 + .../pkg/apis/workspaces/v1alpha2/projects.go | 128 + .../pkg/apis/workspaces/v1alpha2/register.go | 22 + .../v2/pkg/apis/workspaces/v1alpha2/union.go | 21 + .../v1alpha2/union_implementation.go | 103 + .../v1alpha2/zz_generated.deepcopy.go | 2855 +++++++++++++++++ .../zz_generated.keyed_definitions.go | 49 + .../v1alpha2/zz_generated.parent_overrides.go | 1143 +++++++ .../v1alpha2/zz_generated.plugin_overrides.go | 470 +++ ...rated.toplevellistcontainer_definitions.go | 33 + .../zz_generated.union_definitions.go | 360 +++ .../api/v2/pkg/attributes/attributes.go | 453 +++ .../devfile/api/v2/pkg/attributes/errors.go | 12 + .../devfile/api/v2/pkg/devfile/header.go | 84 + .../devfile/devworkspace-operator/LICENSE | 277 ++ .../apis/controller/v1alpha1/common.go | 59 + .../apis/controller/v1alpha1/devfile.go | 31 + .../v1alpha1/devworkspacerouting_types.go | 107 + .../apis/controller/v1alpha1/doc.go | 16 + .../controller/v1alpha1/groupversion_info.go | 32 + .../v1alpha1/zz_generated.deepcopy.go | 292 ++ .../devworkspacerouting_controller.go | 329 ++ .../devworkspacerouting/predicates.go | 69 + .../solvers/basic_solver.go | 81 + .../solvers/cluster_solver.go | 126 + .../devworkspacerouting/solvers/common.go | 247 ++ .../devworkspacerouting/solvers/errors.go | 48 + .../solvers/resolve_endpoints.go | 107 + .../devworkspacerouting/solvers/solver.go | 119 + .../devworkspacerouting/sync_ingresses.go | 111 + .../devworkspacerouting/sync_routes.go | 124 + .../devworkspacerouting/sync_services.go | 118 + .../internal/images/image.go | 152 + .../devworkspace-operator/internal/map/map.go | 35 + .../pkg/common/naming.go | 86 + .../pkg/config/cmd_terminal.go | 69 + .../pkg/config/config.go | 263 ++ .../devworkspace-operator/pkg/config/doc.go | 23 + .../devworkspace-operator/pkg/config/env.go | 70 + .../pkg/config/property.go | 45 + .../pkg/constants/attributes.go | 24 + .../pkg/constants/constants.go | 72 + .../pkg/constants/metadata.go | 84 + .../pkg/infrastructure/cluster.go | 108 + .../pkg/infrastructure/namespace.go | 50 + .../pkg/infrastructure/webhook.go | 52 + vendor/modules.txt | 22 +- 114 files changed, 14401 insertions(+), 1052 deletions(-) create mode 100644 config/samples/devworkspace_flattened_theia-nodejs.yaml create mode 100644 controllers/devworkspace/README.adoc create mode 100644 controllers/devworkspace/controller.go create mode 100644 controllers/devworkspace/controller_test.go create mode 100644 controllers/devworkspace/defaults/defaults.go create mode 100644 controllers/devworkspace/solver/che_routing.go create mode 100644 controllers/devworkspace/solver/che_routing_test.go create mode 100644 controllers/devworkspace/solver/doc.go create mode 100644 controllers/devworkspace/solver/endpoint_exposer.go create mode 100644 controllers/devworkspace/solver/solver.go create mode 100644 controllers/devworkspace/solver/traefik_config.go create mode 100644 controllers/devworkspace/sync/sync.go create mode 100644 controllers/devworkspace/sync/sync_test.go create mode 100644 vendor/github.com/devfile/api/v2/LICENSE create mode 100644 vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/commands.go create mode 100644 vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_container.go create mode 100644 vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_kubernetes_like.go create mode 100644 vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_plugin.go create mode 100644 vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_volume.go create mode 100644 vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/components.go create mode 100644 vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devfile.go create mode 100644 vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspace_conversion.go create mode 100644 vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspace_types.go create mode 100644 vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspacetemplate_conversion.go create mode 100644 vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspacetemplate_spec.go create mode 100644 vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspacetemplate_types.go create mode 100644 vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/doc.go create mode 100644 vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/endpoint.go create mode 100644 vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/events.go create mode 100644 vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/import_reference.go create mode 100644 vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/keyed.go create mode 100644 vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/keyed_implementations.go create mode 100644 vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/override_directives.go create mode 100644 vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/overrides.go create mode 100644 vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/parent.go create mode 100644 vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/projects.go create mode 100644 vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/register.go create mode 100644 vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/union.go create mode 100644 vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/union_implementation.go create mode 100644 vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.deepcopy.go create mode 100644 vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.keyed_definitions.go create mode 100644 vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.parent_overrides.go create mode 100644 vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.plugin_overrides.go create mode 100644 vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.toplevellistcontainer_definitions.go create mode 100644 vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.union_definitions.go create mode 100644 vendor/github.com/devfile/api/v2/pkg/attributes/attributes.go create mode 100644 vendor/github.com/devfile/api/v2/pkg/attributes/errors.go create mode 100644 vendor/github.com/devfile/api/v2/pkg/devfile/header.go create mode 100644 vendor/github.com/devfile/devworkspace-operator/LICENSE create mode 100644 vendor/github.com/devfile/devworkspace-operator/apis/controller/v1alpha1/common.go create mode 100644 vendor/github.com/devfile/devworkspace-operator/apis/controller/v1alpha1/devfile.go create mode 100644 vendor/github.com/devfile/devworkspace-operator/apis/controller/v1alpha1/devworkspacerouting_types.go create mode 100644 vendor/github.com/devfile/devworkspace-operator/apis/controller/v1alpha1/doc.go create mode 100644 vendor/github.com/devfile/devworkspace-operator/apis/controller/v1alpha1/groupversion_info.go create mode 100644 vendor/github.com/devfile/devworkspace-operator/apis/controller/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/devworkspacerouting_controller.go create mode 100644 vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/predicates.go create mode 100644 vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/solvers/basic_solver.go create mode 100644 vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/solvers/cluster_solver.go create mode 100644 vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/solvers/common.go create mode 100644 vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/solvers/errors.go create mode 100644 vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/solvers/resolve_endpoints.go create mode 100644 vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/solvers/solver.go create mode 100644 vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/sync_ingresses.go create mode 100644 vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/sync_routes.go create mode 100644 vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/sync_services.go create mode 100644 vendor/github.com/devfile/devworkspace-operator/internal/images/image.go create mode 100644 vendor/github.com/devfile/devworkspace-operator/internal/map/map.go create mode 100644 vendor/github.com/devfile/devworkspace-operator/pkg/common/naming.go create mode 100644 vendor/github.com/devfile/devworkspace-operator/pkg/config/cmd_terminal.go create mode 100644 vendor/github.com/devfile/devworkspace-operator/pkg/config/config.go create mode 100644 vendor/github.com/devfile/devworkspace-operator/pkg/config/doc.go create mode 100644 vendor/github.com/devfile/devworkspace-operator/pkg/config/env.go create mode 100644 vendor/github.com/devfile/devworkspace-operator/pkg/config/property.go create mode 100644 vendor/github.com/devfile/devworkspace-operator/pkg/constants/attributes.go create mode 100644 vendor/github.com/devfile/devworkspace-operator/pkg/constants/constants.go create mode 100644 vendor/github.com/devfile/devworkspace-operator/pkg/constants/metadata.go create mode 100644 vendor/github.com/devfile/devworkspace-operator/pkg/infrastructure/cluster.go create mode 100644 vendor/github.com/devfile/devworkspace-operator/pkg/infrastructure/namespace.go create mode 100644 vendor/github.com/devfile/devworkspace-operator/pkg/infrastructure/webhook.go diff --git a/.ci/cico_updates_openshift.sh b/.ci/cico_updates_openshift.sh index 1b2aadafb..7aeae46b5 100755 --- a/.ci/cico_updates_openshift.sh +++ b/.ci/cico_updates_openshift.sh @@ -39,10 +39,6 @@ runTests() { sleep 10s createWorkspaceDevWorkspaceController waitAllPodsRunning ${DEVWORKSPACE_CONTROLLER_TEST_NAMESPACE} - - sleep 10s - createWorkspaceDevWorkspaceCheOperator - waitAllPodsRunning ${DEVWORKSPACE_CHE_OPERATOR_TEST_NAMESPACE} } initDefaults diff --git a/.ci/oci-devworkspace-happy-path.sh b/.ci/oci-devworkspace-happy-path.sh index 451bb3636..22d06685f 100755 --- a/.ci/oci-devworkspace-happy-path.sh +++ b/.ci/oci-devworkspace-happy-path.sh @@ -48,7 +48,6 @@ function bumpPodsInfo() { function Catch_Finish() { # grab devworkspace-controller namespace events after running e2e bumpPodsInfo "devworkspace-controller" - bumpPodsInfo "devworkspace-che" bumpPodsInfo "admin-che" oc get devworkspaces -n "admin-che" -o=yaml > $ARTIFACTS_DIR/devworkspaces.yaml diff --git a/.ci/oci-multi-host.sh b/.ci/oci-multi-host.sh index 9bd976a17..328b7d170 100755 --- a/.ci/oci-multi-host.sh +++ b/.ci/oci-multi-host.sh @@ -48,14 +48,9 @@ runTests() { enableDevWorkspaceEngine waitDevWorkspaceControllerStarted - sleep 10s - createWorkspaceDevWorkspaceController - waitAllPodsRunning ${DEVWORKSPACE_CONTROLLER_TEST_NAMESPACE} - - sleep 10s - createWorkspaceDevWorkspaceCheOperator - waitAllPodsRunning ${DEVWORKSPACE_CHE_OPERATOR_TEST_NAMESPACE} - + sleep 10s + createWorkspaceDevWorkspaceController + waitAllPodsRunning ${DEVWORKSPACE_CONTROLLER_TEST_NAMESPACE} } initDefaults diff --git a/.ci/oci-single-host.sh b/.ci/oci-single-host.sh index ec0caaa3e..6f23039bf 100755 --- a/.ci/oci-single-host.sh +++ b/.ci/oci-single-host.sh @@ -51,10 +51,6 @@ runTests() { sleep 10s createWorkspaceDevWorkspaceController waitAllPodsRunning ${DEVWORKSPACE_CONTROLLER_TEST_NAMESPACE} - - sleep 10s - createWorkspaceDevWorkspaceCheOperator - waitAllPodsRunning ${DEVWORKSPACE_CHE_OPERATOR_TEST_NAMESPACE} } initDefaults diff --git a/.github/bin/common.sh b/.github/bin/common.sh index 0ffc18696..09e2766ee 100755 --- a/.github/bin/common.sh +++ b/.github/bin/common.sh @@ -487,7 +487,6 @@ waitDevWorkspaceControllerStarted() { OPERATOR_POD=$(oc get pods -o json -n ${NAMESPACE} | jq -r '.items[] | select(.metadata.name | test("che-operator-")).metadata.name') oc logs ${OPERATOR_POD} -c che-operator -n ${NAMESPACE} - oc logs ${OPERATOR_POD} -c devworkspace-che-operator -n ${NAMESPACE} exit 1 } @@ -500,7 +499,7 @@ createWorkspaceDevWorkspaceController () { CURRENT_TIME=$(date +%s) ENDTIME=$(($CURRENT_TIME + 180)) while [ $(date +%s) -lt $ENDTIME ]; do - if oc apply -f https://raw.githubusercontent.com/che-incubator/devworkspace-che-operator/main/samples/flattened_theia-nodejs.yaml -n ${DEVWORKSPACE_CONTROLLER_TEST_NAMESPACE}; then + if oc apply -f ${OPERATOR_REPO}/config/samples/devworkspace_flattened_theia-nodejs.yaml -n ${DEVWORKSPACE_CONTROLLER_TEST_NAMESPACE}; then break fi sleep 10 @@ -528,12 +527,6 @@ waitAllPodsRunning() { exit 1 } -createWorkspaceDevWorkspaceCheOperator() { - oc create namespace ${DEVWORKSPACE_CHE_OPERATOR_TEST_NAMESPACE} - sleep 10s - oc apply -f https://raw.githubusercontent.com/che-incubator/devworkspace-che-operator/main/samples/flattened_theia-nodejs.yaml -n ${DEVWORKSPACE_CHE_OPERATOR_TEST_NAMESPACE} -} - enableDevWorkspaceEngine() { kubectl patch checluster/eclipse-che -n ${NAMESPACE} --type=merge -p "{\"spec\":{\"server\":{\"customCheProperties\": {\"CHE_INFRA_KUBERNETES_ENABLE__UNSUPPORTED__K8S\": \"true\"}}}}" kubectl patch checluster/eclipse-che -n ${NAMESPACE} --type=merge -p '{"spec":{"devWorkspace":{"enable": true}}}' diff --git a/.github/bin/minikube/test-olm.sh b/.github/bin/minikube/test-olm.sh index 13fc8de91..a8006fd6c 100755 --- a/.github/bin/minikube/test-olm.sh +++ b/.github/bin/minikube/test-olm.sh @@ -39,6 +39,10 @@ runTest() { # Dev Workspace controller tests enableDevWorkspaceEngine waitDevWorkspaceControllerStarted + + sleep 10s + createWorkspaceDevWorkspaceController + waitAllPodsRunning ${DEVWORKSPACE_CONTROLLER_TEST_NAMESPACE} } initDefaults diff --git a/.github/bin/minikube/test-operator-singlehost-gateway.sh b/.github/bin/minikube/test-operator-singlehost-gateway.sh index 0753d6bae..15801848d 100755 --- a/.github/bin/minikube/test-operator-singlehost-gateway.sh +++ b/.github/bin/minikube/test-operator-singlehost-gateway.sh @@ -48,6 +48,10 @@ runTest() { # Dev Workspace controller tests enableDevWorkspaceEngine waitDevWorkspaceControllerStarted + + sleep 10s + createWorkspaceDevWorkspaceController + waitAllPodsRunning ${DEVWORKSPACE_CONTROLLER_TEST_NAMESPACE} } initDefaults diff --git a/.github/bin/minikube/test-operator-singlehost-native.sh b/.github/bin/minikube/test-operator-singlehost-native.sh index 511dea110..f39bba09e 100755 --- a/.github/bin/minikube/test-operator-singlehost-native.sh +++ b/.github/bin/minikube/test-operator-singlehost-native.sh @@ -47,6 +47,10 @@ runTest() { # Dev Workspace controller tests enableDevWorkspaceEngine waitDevWorkspaceControllerStarted + + sleep 10s + createWorkspaceDevWorkspaceController + waitAllPodsRunning ${DEVWORKSPACE_CONTROLLER_TEST_NAMESPACE} } initDefaults diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 56ef5e94e..a54617e50 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -73,8 +73,6 @@ jobs: CHE_VERSION=${{ github.event.inputs.version }} DWO_VERSION=${{ github.event.inputs.dwoVersion }} if [[ ${DWO_VERSION} != "v"* ]]; then DWO_VERSION="v${DWO_VERSION}"; fi - DWO_CHE_VERSION=${{ github.event.inputs.dwoCheVersion }} - if [[ ${DWO_CHE_VERSION} != "v"* ]]; then DWO_CHE_VERSION="v${DWO_CHE_VERSION}"; fi echo "CHE_VERSION=${CHE_VERSION}" BRANCH=${CHE_VERSION%.*}.x echo "BRANCH=${BRANCH}" @@ -93,10 +91,10 @@ jobs: export QUAY_ECLIPSE_CHE_PASSWORD=${{ secrets.QUAY_PASSWORD }} if [[ ${CHE_VERSION} == *".0" ]]; then - ./make-release.sh ${CHE_VERSION} --release --check-resources --release-olm-files --dev-workspace-controller-version ${DWO_VERSION} --dev-workspace-che-operator-version ${DWO_CHE_VERSION} + ./make-release.sh ${CHE_VERSION} --release --check-resources --release-olm-files --dev-workspace-controller-version ${DWO_VERSION} else git checkout ${BRANCH} - ./make-release.sh ${CHE_VERSION} --release --release-olm-files --dev-workspace-controller-version ${DWO_VERSION} --dev-workspace-che-operator-version ${DWO_CHE_VERSION} + ./make-release.sh ${CHE_VERSION} --release --release-olm-files --dev-workspace-controller-version ${DWO_VERSION} fi # default robot account on quay does not have permissions for application repos diff --git a/Dockerfile b/Dockerfile index 02c0bb31c..accc840ca 100644 --- a/Dockerfile +++ b/Dockerfile @@ -14,7 +14,6 @@ FROM registry.access.redhat.com/ubi8/go-toolset:1.15.13-4 as builder ENV GOPATH=/go/ ENV RESTIC_TAG=v0.12.0 ARG DEV_WORKSPACE_CONTROLLER_VERSION="main" -ARG DEV_WORKSPACE_CHE_OPERATOR_VERSION="main" ARG DEV_HEADER_REWRITE_TRAEFIK_PLUGIN="main" USER root @@ -52,10 +51,6 @@ RUN unzip /tmp/asset-devworkspace-operator.zip */deploy/deployment/* -d /tmp && mkdir -p /tmp/devworkspace-operator/templates/ && \ mv /tmp/devfile-devworkspace-operator-*/deploy /tmp/devworkspace-operator/templates/ -RUN unzip /tmp/asset-devworkspace-che-operator.zip */deploy/deployment/* -d /tmp && \ - mkdir -p /tmp/devworkspace-che-operator/templates/ && \ - mv /tmp/che-incubator-devworkspace-che-operator-*/deploy /tmp/devworkspace-che-operator/templates/ - RUN unzip /tmp/asset-header-rewrite-traefik-plugin.zip -d /tmp && \ mkdir -p /tmp/header-rewrite-traefik-plugin && \ mv /tmp/*-header-rewrite-traefik-plugin-*/headerRewrite.go /tmp/*-header-rewrite-traefik-plugin-*/.traefik.yml /tmp/header-rewrite-traefik-plugin @@ -71,7 +66,6 @@ FROM registry.access.redhat.com/ubi8-minimal:8.4-205.1626828526 COPY --from=builder /che-operator/che-operator /manager COPY --from=builder /che-operator/templates/*.sh /tmp/ COPY --from=builder /tmp/devworkspace-operator/templates/deploy /tmp/devworkspace-operator/templates -COPY --from=builder /tmp/devworkspace-che-operator/templates/deploy /tmp/devworkspace-che-operator/templates COPY --from=builder /tmp/header-rewrite-traefik-plugin /tmp/header-rewrite-traefik-plugin COPY --from=builder /tmp/restic/restic /usr/local/bin/restic COPY --from=builder /go/restic/LICENSE /usr/local/bin/restic-LICENSE.txt diff --git a/Makefile b/Makefile index 5259ff73a..86ee77136 100644 --- a/Makefile +++ b/Makefile @@ -331,19 +331,6 @@ prepare-templates: cp -rf /tmp/devfile-devworkspace-operator*/deploy/* /tmp/devworkspace-operator/templates echo "[INFO] Downloading Dev Workspace operator templates completed." - # Download Dev Workspace Che operator templates - echo "[INFO] Downloading Dev Workspace Che operator templates ..." - rm -f /tmp/devworkspace-che-operator.zip - rm -rf /tmp/che-incubator-devworkspace-che-operator-* - rm -rf /tmp/devworkspace-che-operator/ - mkdir -p /tmp/devworkspace-che-operator/templates - - curl -sL https://api.github.com/repos/che-incubator/devworkspace-che-operator/zipball/${DEV_WORKSPACE_CHE_OPERATOR_VERSION} > /tmp/devworkspace-che-operator.zip - - unzip -q /tmp/devworkspace-che-operator.zip '*/deploy/deployment/*' -d /tmp - cp -r /tmp/che-incubator-devworkspace-che-operator*/deploy/* /tmp/devworkspace-che-operator/templates - echo "[INFO] Downloading Dev Workspace operator templates completed." - create-namespace: set +e kubectl create namespace ${ECLIPSE_CHE_NAMESPACE} || true @@ -432,7 +419,7 @@ rm -rf $$TMP_DIR ;\ endef update-roles: - echo "[INFO] Updating roles with DW and DWCO roles" + echo "[INFO] Updating roles with DW roles" CLUSTER_ROLES=( https://raw.githubusercontent.com/devfile/devworkspace-operator/main/deploy/deployment/openshift/objects/devworkspace-controller-view-workspaces.ClusterRole.yaml @@ -441,11 +428,9 @@ update-roles: https://raw.githubusercontent.com/devfile/devworkspace-operator/main/deploy/deployment/openshift/objects/devworkspace-controller-proxy-role.ClusterRole.yaml https://raw.githubusercontent.com/devfile/devworkspace-operator/main/deploy/deployment/openshift/objects/devworkspace-controller-role.ClusterRole.yaml https://raw.githubusercontent.com/devfile/devworkspace-operator/main/deploy/deployment/openshift/objects/devworkspace-controller-view-workspaces.ClusterRole.yaml - https://raw.githubusercontent.com/che-incubator/devworkspace-che-operator/main/deploy/deployment/openshift/objects/devworkspace-che-role.ClusterRole.yaml - https://raw.githubusercontent.com/che-incubator/devworkspace-che-operator/main/deploy/deployment/openshift/objects/devworkspace-che-metrics-reader.ClusterRole.yaml ) - # Updates cluster_role.yaml based on DW and DWCO roles + # Updates cluster_role.yaml based on DW roles ## Removes old cluster roles cat config/rbac/cluster_role.yaml | sed '/CHE-OPERATOR ROLES ONLY: END/q0' > config/rbac/cluster_role.yaml.tmp mv config/rbac/cluster_role.yaml.tmp config/rbac/cluster_role.yaml @@ -461,7 +446,7 @@ update-roles: done ROLES=( - https://raw.githubusercontent.com/che-incubator/devworkspace-che-operator/main/deploy/deployment/openshift/objects/devworkspace-che-leader-election-role.Role.yaml + # currently, there are no other roles we need to incorporate ) # Updates role.yaml @@ -669,8 +654,6 @@ bundle: generate manifests kustomize ## Generate bundle manifests and metadata, if [ "$${platform}" = "openshift" ]; then yq -riSY '(.spec.install.spec.deployments[0].spec.template.spec.containers[0].securityContext."allowPrivilegeEscalation") = false' "$${NEW_CSV}" yq -riSY '(.spec.install.spec.deployments[0].spec.template.spec.containers[0].securityContext."runAsNonRoot") = true' "$${NEW_CSV}" - yq -riSY '(.spec.install.spec.deployments[0].spec.template.spec.containers[1].securityContext."allowPrivilegeEscalation") = false' "$${NEW_CSV}" - yq -riSY '(.spec.install.spec.deployments[0].spec.template.spec.containers[1].securityContext."runAsNonRoot") = true' "$${NEW_CSV}" fi # Format code. @@ -809,39 +792,6 @@ update-deployment-yaml-images: yq -riY "( .spec.template.spec.containers[] | select(.name == \"che-operator\").env[] | select(.name == \"RELATED_IMAGE_che_server_secure_exposer_jwt_proxy_image\") | .value ) = \"$(JWT_PROXY_IMAGE)\"" $(OPERATOR_YAML) $(MAKE) ensure-license-header FILE="config/manager/manager.yaml" -update-devworkspace-container: - echo "[INFO] Update devworkspace container in the che-operator deployment" - # Deletes old DWCO container - yq -riY "del(.spec.template.spec.containers[1])" $(OPERATOR_YAML) - yq -riY ".spec.template.spec.containers[1].name = \"devworkspace-container\"" $(OPERATOR_YAML) - - # Extract DWCO container spec from deployment - DWCO_CONTAINER=$$(curl -sL https://raw.githubusercontent.com/che-incubator/devworkspace-che-operator/main/deploy/deployment/openshift/objects/devworkspace-che-manager.Deployment.yaml \ - | sed '1,/containers:/d' \ - | sed -n '/serviceAccountName:/q;p' \ - | sed -e 's/^/ /') - echo "$${DWCO_CONTAINER}" > dwcontainer - - # Add DWCO container to manager.yaml - sed -i -e '/- name: devworkspace-container/{r dwcontainer' -e 'd}' $(OPERATOR_YAML) - rm dwcontainer - - # update securityContext - yq -riY ".spec.template.spec.containers[1].securityContext.privileged = false" $(OPERATOR_YAML) - yq -riY ".spec.template.spec.containers[1].securityContext.readOnlyRootFilesystem = false" $(OPERATOR_YAML) - yq -riY ".spec.template.spec.containers[1].securityContext.capabilities.drop[0] = \"ALL\"" $(OPERATOR_YAML) - - # update env variable - yq -riY "del( .spec.template.spec.containers[1].env[] | select(.name == \"CONTROLLER_SERVICE_ACCOUNT_NAME\") | .valueFrom)" $(OPERATOR_YAML) - yq -riY "( .spec.template.spec.containers[1].env[] | select(.name == \"CONTROLLER_SERVICE_ACCOUNT_NAME\") | .value) = \"che-operator\"" $(OPERATOR_YAML) - yq -riY "del( .spec.template.spec.containers[1].env[] | select(.name == \"WATCH_NAMESPACE\") | .value)" $(OPERATOR_YAML) - yq -riY "( .spec.template.spec.containers[1].env[] | select(.name == \"WATCH_NAMESPACE\") | .valueFrom.fieldRef.fieldPath) = \"metadata.namespace\"" $(OPERATOR_YAML) - - yq -riY ".spec.template.spec.containers[1].args[1] = \"--metrics-addr\"" $(OPERATOR_YAML) - yq -riY ".spec.template.spec.containers[1].args[2] = \"0\"" $(OPERATOR_YAML) - - # $(MAKE) ensureLicense $(OPERATOR_YAML) - update-dockerfile-image: if [ -z $(UBI8_MINIMAL_IMAGE) ]; then echo "[ERROR] Define `UBI8_MINIMAL_IMAGE` argument" @@ -878,8 +828,6 @@ update-resource-images: # Update che-operator Dockerfile $(MAKE) update-dockerfile-image UBI8_MINIMAL_IMAGE="$${UBI8_MINIMAL_IMAGE}" - $(MAKE) update-devworkspace-container - .PHONY: bundle-build bundle-build: ## Build the bundle image. if [ -z "$(platform)" ]; then diff --git a/bundle/next/eclipse-che-preview-kubernetes/manifests/che-operator.clusterserviceversion.yaml b/bundle/next/eclipse-che-preview-kubernetes/manifests/che-operator.clusterserviceversion.yaml index c81684219..856ab7d5c 100644 --- a/bundle/next/eclipse-che-preview-kubernetes/manifests/che-operator.clusterserviceversion.yaml +++ b/bundle/next/eclipse-che-preview-kubernetes/manifests/che-operator.clusterserviceversion.yaml @@ -83,7 +83,7 @@ metadata: operators.operatorframework.io/project_layout: go.kubebuilder.io/v3 repository: https://github.com/eclipse-che/che-operator support: Eclipse Foundation - name: eclipse-che-preview-kubernetes.v7.35.0-276.next + name: eclipse-che-preview-kubernetes.v7.35.0-279.next namespace: placeholder spec: apiservicedefinitions: {} @@ -294,8 +294,9 @@ spec: - oauthclients verbs: - create - - get - delete + - deletecollection + - get - list - patch - update @@ -304,16 +305,6 @@ spec: - rbac.authorization.k8s.io resources: - clusterrolebindings - verbs: - - list - - create - - watch - - update - - get - - delete - - apiGroups: - - rbac.authorization.k8s.io - resources: - clusterroles verbs: - list @@ -326,14 +317,6 @@ spec: - rbac.authorization.k8s.io resources: - roles - verbs: - - get - - create - - update - - delete - - apiGroups: - - rbac.authorization.k8s.io - resources: - rolebindings verbs: - get @@ -347,6 +330,14 @@ spec: - checlusters/status - checlusters/finalizers - checlusters/status + - checlusterbackups + - checlusterbackups/status + - checlusterbackups/finalizers + - checlusterrestores + - checlusterrestores/status + - backupserverconfigurations + - backupserverconfigurations/status + - chebackupserverconfigurations verbs: - '*' - apiGroups: @@ -358,14 +349,6 @@ spec: - list - create - update - - apiGroups: - - "" - resources: - - serviceaccounts - verbs: - - get - - create - - watch - apiGroups: - "" resources: @@ -411,40 +394,24 @@ spec: - delete - apiGroups: - apps + - extensions resources: - deployments + - replicasets verbs: - - get - - list - - create - - patch - - watch - - delete - - apiGroups: - - "" - resources: - - services - verbs: - - list - - create - - delete + - '*' - apiGroups: - - "" + - route.openshift.io resources: - - configmaps + - routes verbs: - - get - - create - - delete - - list + - '*' - apiGroups: - route.openshift.io resources: - - routes + - routes/custom-host verbs: - - list - create - - delete - apiGroups: - "" resources: @@ -465,11 +432,7 @@ spec: resources: - ingresses verbs: - - list - - create - - watch - - get - - delete + - '*' - apiGroups: - networking.k8s.io resources: @@ -494,6 +457,14 @@ spec: - subscriptions verbs: - get + - apiGroups: + - operators.coreos.com + resources: + - clusterserviceversions + verbs: + - list + - get + - watch - apiGroups: - metrics.k8s.io resources: @@ -513,6 +484,72 @@ spec: - get - list - update + - apiGroups: + - "" + resources: + - configmaps + - persistentvolumeclaims + - pods + - secrets + - serviceaccounts + - services + verbs: + - '*' + - apiGroups: + - apps + resourceNames: + - che-operator + resources: + - deployments/finalizers + verbs: + - update + - apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - update + - watch + - apiGroups: + - controller.devfile.io + resources: + - devworkspaceroutings + verbs: + - '*' + - apiGroups: + - controller.devfile.io + resources: + - devworkspaceroutings/finalizers + verbs: + - update + - apiGroups: + - controller.devfile.io + resources: + - devworkspaceroutings/status + verbs: + - get + - patch + - update + - apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - create + - get + - nonResourceURLs: + - /metrics + verbs: + - get + - apiGroups: + - che.eclipse.org + resources: + - kubernetesimagepullers + verbs: + - '*' - apiGroups: - workspace.devfile.io resources: @@ -776,173 +813,6 @@ spec: - get - list - watch - - apiGroups: - - "" - resources: - - configmaps - - persistentvolumeclaims - - pods - - secrets - - serviceaccounts - verbs: - - '*' - - apiGroups: - - "" - resources: - - namespaces - verbs: - - get - - apiGroups: - - "" - resources: - - pods/exec - verbs: - - create - - apiGroups: - - "" - resources: - - services - verbs: - - '*' - - apiGroups: - - apps - resourceNames: - - devworkspace-che-operator - resources: - - deployments/finalizers - verbs: - - update - - apiGroups: - - apps - - extensions - resources: - - deployments - verbs: - - get - - list - - watch - - apiGroups: - - apps - - extensions - resources: - - deployments - - replicasets - verbs: - - '*' - - apiGroups: - - apps - - extensions - resources: - - replicasets - verbs: - - get - - list - - watch - - apiGroups: - - batch - resources: - - jobs - verbs: - - create - - delete - - get - - update - - watch - - apiGroups: - - org.eclipse.che - resources: - - checlusters - - checlusters/status - - checlusters/finalizers - verbs: - - '*' - - apiGroups: - - controller.devfile.io - resources: - - devworkspaceroutings - verbs: - - '*' - - apiGroups: - - controller.devfile.io - resources: - - devworkspaceroutings/finalizers - verbs: - - update - - apiGroups: - - controller.devfile.io - resources: - - devworkspaceroutings/status - verbs: - - get - - patch - - update - - apiGroups: - - "" - resources: - - configmap - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - extensions - resources: - - ingresses - verbs: - - '*' - - apiGroups: - - monitoring.coreos.com - resources: - - servicemonitors - verbs: - - create - - get - - apiGroups: - - oauth.openshift.io - resources: - - oauthclients - verbs: - - create - - delete - - deletecollection - - get - - list - - patch - - update - - watch - - apiGroups: - - rbac.authorization.k8s.io - resources: - - clusterrolebindings - - clusterroles - - rolebindings - - roles - verbs: - - create - - get - - list - - update - - watch - - apiGroups: - - route.openshift.io - resources: - - routes - verbs: - - '*' - - apiGroups: - - route.openshift.io - resources: - - routes/custom-host - verbs: - - create - - nonResourceURLs: - - /metrics - verbs: - - get serviceAccountName: che-operator deployments: - name: che-operator @@ -991,7 +861,7 @@ spec: - name: RELATED_IMAGE_che_tls_secrets_creation_job value: quay.io/eclipse/che-tls-secret-creator:alpine-d1ed4ad - name: RELATED_IMAGE_pvc_jobs - value: registry.access.redhat.com/ubi8-minimal:8.4-205.1626828526 + value: registry.access.redhat.com/ubi8-minimal:8.4-208 - name: RELATED_IMAGE_postgres value: quay.io/eclipse/che--centos--postgresql-96-centos7:9.6-b681d78125361519180a6ac05242c296f8906c11eab7e207b5ca9a89b6344392 - name: RELATED_IMAGE_keycloak @@ -1038,6 +908,8 @@ spec: value: che-postgres-secret - name: CHE_SERVER_TRUST_STORE_CONFIGMAP_NAME value: ca-certs + - name: MAX_CONCURRENT_RECONCILES + value: "1" image: quay.io/eclipse/che-operator:next imagePullPolicy: Always livenessProbe: @@ -1075,47 +947,6 @@ spec: - ALL privileged: false readOnlyRootFilesystem: false - - args: - - --enable-leader-election - - --metrics-addr - - "0" - command: - - /usr/local/bin/devworkspace-che-operator - env: - - name: WATCH_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.annotations['olm.targetNamespaces'] - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: OPERATOR_NAME - value: devworkspace-che-operator - - name: MAX_CONCURRENT_RECONCILES - value: "1" - - name: CONTROLLER_SERVICE_ACCOUNT_NAME - value: che-operator - - name: RELATED_IMAGE_gateway - value: quay.io/eclipse/che--traefik:v2.3.2-6e6d4dc5a19afe06778ca092cdbbb98e31cb9f9c313edafa23f81a0e6ddf8a23 - - name: RELATED_IMAGE_gateway_configurer - value: quay.io/che-incubator/configbump:0.1.4 - image: quay.io/che-incubator/devworkspace-che-operator:ci - imagePullPolicy: Always - name: devworkspace-che-operator - resources: - limits: - cpu: 100m - memory: 256Mi - requests: - cpu: 100m - memory: 32Mi - securityContext: - capabilities: - drop: - - ALL - privileged: false - readOnlyRootFilesystem: false hostIPC: false hostNetwork: false hostPID: false @@ -1179,8 +1010,10 @@ spec: - get - apiGroups: - apps + - extensions resources: - deployments + - replicasets verbs: - '*' - apiGroups: @@ -1215,12 +1048,6 @@ spec: - get - list - watch - - apiGroups: - - che.eclipse.org - resources: - - kubernetesimagepullers - verbs: - - '*' - apiGroups: - operators.coreos.com resources: @@ -1239,29 +1066,45 @@ spec: - apiGroups: - "" resources: - - configmaps + - configmaps/status verbs: - get - - list - - watch - - create - update - patch - - delete - apiGroups: - "" resources: - - configmaps/status + - events + verbs: + - create + - apiGroups: + - apps + resourceNames: + - che-operator + resources: + - deployments/finalizers verbs: - - get - update - - patch - apiGroups: - - "" + - controller.devfile.io resources: - - events + - devworkspaceroutings verbs: - - create + - '*' + - apiGroups: + - controller.devfile.io + resources: + - devworkspaceroutings/finalizers + verbs: + - update + - apiGroups: + - controller.devfile.io + resources: + - devworkspaceroutings/status + verbs: + - get + - patch + - update - apiGroups: - "" resources: @@ -1324,4 +1167,4 @@ spec: maturity: stable provider: name: Eclipse Foundation - version: 7.35.0-276.next + version: 7.35.0-279.next diff --git a/bundle/next/eclipse-che-preview-openshift/manifests/che-operator.clusterserviceversion.yaml b/bundle/next/eclipse-che-preview-openshift/manifests/che-operator.clusterserviceversion.yaml index d5b3ca5a3..be2edadf4 100644 --- a/bundle/next/eclipse-che-preview-openshift/manifests/che-operator.clusterserviceversion.yaml +++ b/bundle/next/eclipse-che-preview-openshift/manifests/che-operator.clusterserviceversion.yaml @@ -76,7 +76,7 @@ metadata: operators.operatorframework.io/project_layout: go.kubebuilder.io/v3 repository: https://github.com/eclipse-che/che-operator support: Eclipse Foundation - name: eclipse-che-preview-openshift.v7.35.0-276.next + name: eclipse-che-preview-openshift.v7.35.0-279.next namespace: placeholder spec: apiservicedefinitions: {} @@ -287,8 +287,9 @@ spec: - oauthclients verbs: - create - - get - delete + - deletecollection + - get - list - patch - update @@ -339,16 +340,6 @@ spec: - rbac.authorization.k8s.io resources: - clusterrolebindings - verbs: - - list - - create - - watch - - update - - get - - delete - - apiGroups: - - rbac.authorization.k8s.io - resources: - clusterroles verbs: - list @@ -358,17 +349,9 @@ spec: - get - delete - apiGroups: - - authorization.openshift.io + - rbac.authorization.k8s.io resources: - roles - verbs: - - get - - create - - update - - delete - - apiGroups: - - authorization.openshift.io - resources: - rolebindings verbs: - get @@ -376,17 +359,9 @@ spec: - update - delete - apiGroups: - - rbac.authorization.k8s.io + - authorization.openshift.io resources: - roles - verbs: - - get - - create - - update - - delete - - apiGroups: - - rbac.authorization.k8s.io - resources: - rolebindings verbs: - get @@ -400,6 +375,14 @@ spec: - checlusters/status - checlusters/finalizers - checlusters/status + - checlusterbackups + - checlusterbackups/status + - checlusterbackups/finalizers + - checlusterrestores + - checlusterrestores/status + - backupserverconfigurations + - backupserverconfigurations/status + - chebackupserverconfigurations verbs: - '*' - apiGroups: @@ -425,14 +408,6 @@ spec: - list - create - update - - apiGroups: - - "" - resources: - - serviceaccounts - verbs: - - get - - create - - watch - apiGroups: - "" resources: @@ -478,40 +453,24 @@ spec: - delete - apiGroups: - apps + - extensions resources: - deployments + - replicasets verbs: - - get - - list - - create - - patch - - watch - - delete - - apiGroups: - - "" - resources: - - services - verbs: - - list - - create - - delete + - '*' - apiGroups: - - "" + - route.openshift.io resources: - - configmaps + - routes verbs: - - get - - create - - delete - - list + - '*' - apiGroups: - route.openshift.io resources: - - routes + - routes/custom-host verbs: - - list - create - - delete - apiGroups: - "" resources: @@ -532,11 +491,7 @@ spec: resources: - ingresses verbs: - - list - - create - - watch - - get - - delete + - '*' - apiGroups: - networking.k8s.io resources: @@ -561,6 +516,14 @@ spec: - subscriptions verbs: - get + - apiGroups: + - operators.coreos.com + resources: + - clusterserviceversions + verbs: + - list + - get + - watch - apiGroups: - metrics.k8s.io resources: @@ -570,6 +533,72 @@ spec: - get - list - watch + - apiGroups: + - "" + resources: + - configmaps + - persistentvolumeclaims + - pods + - secrets + - serviceaccounts + - services + verbs: + - '*' + - apiGroups: + - apps + resourceNames: + - che-operator + resources: + - deployments/finalizers + verbs: + - update + - apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - update + - watch + - apiGroups: + - controller.devfile.io + resources: + - devworkspaceroutings + verbs: + - '*' + - apiGroups: + - controller.devfile.io + resources: + - devworkspaceroutings/finalizers + verbs: + - update + - apiGroups: + - controller.devfile.io + resources: + - devworkspaceroutings/status + verbs: + - get + - patch + - update + - apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - create + - get + - nonResourceURLs: + - /metrics + verbs: + - get + - apiGroups: + - che.eclipse.org + resources: + - kubernetesimagepullers + verbs: + - '*' - apiGroups: - workspace.devfile.io resources: @@ -833,173 +862,6 @@ spec: - get - list - watch - - apiGroups: - - "" - resources: - - configmaps - - persistentvolumeclaims - - pods - - secrets - - serviceaccounts - verbs: - - '*' - - apiGroups: - - "" - resources: - - namespaces - verbs: - - get - - apiGroups: - - "" - resources: - - pods/exec - verbs: - - create - - apiGroups: - - "" - resources: - - services - verbs: - - '*' - - apiGroups: - - apps - resourceNames: - - devworkspace-che-operator - resources: - - deployments/finalizers - verbs: - - update - - apiGroups: - - apps - - extensions - resources: - - deployments - verbs: - - get - - list - - watch - - apiGroups: - - apps - - extensions - resources: - - deployments - - replicasets - verbs: - - '*' - - apiGroups: - - apps - - extensions - resources: - - replicasets - verbs: - - get - - list - - watch - - apiGroups: - - batch - resources: - - jobs - verbs: - - create - - delete - - get - - update - - watch - - apiGroups: - - org.eclipse.che - resources: - - checlusters - - checlusters/status - - checlusters/finalizers - verbs: - - '*' - - apiGroups: - - controller.devfile.io - resources: - - devworkspaceroutings - verbs: - - '*' - - apiGroups: - - controller.devfile.io - resources: - - devworkspaceroutings/finalizers - verbs: - - update - - apiGroups: - - controller.devfile.io - resources: - - devworkspaceroutings/status - verbs: - - get - - patch - - update - - apiGroups: - - "" - resources: - - configmap - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - extensions - resources: - - ingresses - verbs: - - '*' - - apiGroups: - - monitoring.coreos.com - resources: - - servicemonitors - verbs: - - create - - get - - apiGroups: - - oauth.openshift.io - resources: - - oauthclients - verbs: - - create - - delete - - deletecollection - - get - - list - - patch - - update - - watch - - apiGroups: - - rbac.authorization.k8s.io - resources: - - clusterrolebindings - - clusterroles - - rolebindings - - roles - verbs: - - create - - get - - list - - update - - watch - - apiGroups: - - route.openshift.io - resources: - - routes - verbs: - - '*' - - apiGroups: - - route.openshift.io - resources: - - routes/custom-host - verbs: - - create - - nonResourceURLs: - - /metrics - verbs: - - get serviceAccountName: che-operator deployments: - name: che-operator @@ -1046,7 +908,7 @@ spec: - name: RELATED_IMAGE_devfile_registry value: quay.io/eclipse/che-devfile-registry:next - name: RELATED_IMAGE_pvc_jobs - value: registry.access.redhat.com/ubi8-minimal:8.4-205.1626828526 + value: registry.access.redhat.com/ubi8-minimal:8.4-208 - name: RELATED_IMAGE_postgres value: quay.io/eclipse/che--centos--postgresql-96-centos7:9.6-b681d78125361519180a6ac05242c296f8906c11eab7e207b5ca9a89b6344392 - name: RELATED_IMAGE_keycloak @@ -1093,6 +955,8 @@ spec: value: che-postgres-secret - name: CHE_SERVER_TRUST_STORE_CONFIGMAP_NAME value: ca-certs + - name: MAX_CONCURRENT_RECONCILES + value: "1" image: quay.io/eclipse/che-operator:next imagePullPolicy: Always livenessProbe: @@ -1132,49 +996,6 @@ spec: privileged: false readOnlyRootFilesystem: false runAsNonRoot: true - - args: - - --enable-leader-election - - --metrics-addr - - "0" - command: - - /usr/local/bin/devworkspace-che-operator - env: - - name: WATCH_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.annotations['olm.targetNamespaces'] - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: OPERATOR_NAME - value: devworkspace-che-operator - - name: MAX_CONCURRENT_RECONCILES - value: "1" - - name: CONTROLLER_SERVICE_ACCOUNT_NAME - value: che-operator - - name: RELATED_IMAGE_gateway - value: quay.io/eclipse/che--traefik:v2.3.2-6e6d4dc5a19afe06778ca092cdbbb98e31cb9f9c313edafa23f81a0e6ddf8a23 - - name: RELATED_IMAGE_gateway_configurer - value: quay.io/che-incubator/configbump:0.1.4 - image: quay.io/che-incubator/devworkspace-che-operator:ci - imagePullPolicy: Always - name: devworkspace-che-operator - resources: - limits: - cpu: 100m - memory: 256Mi - requests: - cpu: 100m - memory: 32Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - privileged: false - readOnlyRootFilesystem: false - runAsNonRoot: true hostIPC: false hostNetwork: false hostPID: false @@ -1245,8 +1066,10 @@ spec: - get - apiGroups: - apps + - extensions resources: - deployments + - replicasets verbs: - '*' - apiGroups: @@ -1281,12 +1104,6 @@ spec: - get - list - watch - - apiGroups: - - che.eclipse.org - resources: - - kubernetesimagepullers - verbs: - - '*' - apiGroups: - operators.coreos.com resources: @@ -1305,29 +1122,58 @@ spec: - apiGroups: - "" resources: - - configmaps + - configmaps/status verbs: - get - - list - - watch - - create - update - patch - - delete - apiGroups: - "" resources: - - configmaps/status + - events + verbs: + - create + - apiGroups: + - apps + resourceNames: + - che-operator + resources: + - deployments/finalizers verbs: - - get - update + - apiGroups: + - controller.devfile.io + resources: + - devworkspaceroutings + verbs: + - '*' + - apiGroups: + - controller.devfile.io + resources: + - devworkspaceroutings/finalizers + verbs: + - update + - apiGroups: + - controller.devfile.io + resources: + - devworkspaceroutings/status + verbs: + - get - patch + - update - apiGroups: - - "" + - oauth.openshift.io resources: - - events + - oauthclients verbs: - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch - apiGroups: - "" resources: @@ -1390,4 +1236,4 @@ spec: maturity: stable provider: name: Eclipse Foundation - version: 7.35.0-276.next + version: 7.35.0-279.next diff --git a/bundle/stable/eclipse-che-preview-kubernetes/manifests/che-operator.clusterserviceversion.yaml b/bundle/stable/eclipse-che-preview-kubernetes/manifests/che-operator.clusterserviceversion.yaml index 68204e2f2..299749dcb 100644 --- a/bundle/stable/eclipse-che-preview-kubernetes/manifests/che-operator.clusterserviceversion.yaml +++ b/bundle/stable/eclipse-che-preview-kubernetes/manifests/che-operator.clusterserviceversion.yaml @@ -802,14 +802,6 @@ spec: - services verbs: - '*' - - apiGroups: - - apps - resourceNames: - - devworkspace-che-operator - resources: - - deployments/finalizers - verbs: - - update - apiGroups: - apps - extensions @@ -1006,8 +998,6 @@ spec: value: quay.io/eclipse/che--traefik@sha256:df90799aaca1ad6fb9e06d311140035d2a0c2295a4f8f508f6b55ee056bb677e - name: RELATED_IMAGE_single_host_gateway_config_sidecar value: quay.io/che-incubator/configbump@sha256:175ff2ba1bd74429de192c0a9facf39da5699c6da9f151bd461b3dc8624dd532 - - name: RELATED_IMAGE_devworkspace_che_operator - value: quay.io/che-incubator/devworkspace-che-operator@sha256:f943ada4d07ae8375f5a93bcc57f7f66335b14940bfe2c5d9565d155588ef514 - name: RELATED_IMAGE_devworkspace_controller value: quay.io/devfile/devworkspace-controller@sha256:f17dad6df3f2f0f7b245e05677293bef1d35a17e0349002f9e47816de03c0cdd - name: RELATED_IMAGE_internal_rest_backup_server diff --git a/bundle/stable/eclipse-che-preview-openshift/manifests/che-operator.clusterserviceversion.yaml b/bundle/stable/eclipse-che-preview-openshift/manifests/che-operator.clusterserviceversion.yaml index 74bc34967..4a17cb221 100644 --- a/bundle/stable/eclipse-che-preview-openshift/manifests/che-operator.clusterserviceversion.yaml +++ b/bundle/stable/eclipse-che-preview-openshift/manifests/che-operator.clusterserviceversion.yaml @@ -858,14 +858,6 @@ spec: - services verbs: - '*' - - apiGroups: - - apps - resourceNames: - - devworkspace-che-operator - resources: - - deployments/finalizers - verbs: - - update - apiGroups: - apps - extensions @@ -1060,8 +1052,6 @@ spec: value: quay.io/eclipse/che--traefik@sha256:df90799aaca1ad6fb9e06d311140035d2a0c2295a4f8f508f6b55ee056bb677e - name: RELATED_IMAGE_single_host_gateway_config_sidecar value: quay.io/che-incubator/configbump@sha256:175ff2ba1bd74429de192c0a9facf39da5699c6da9f151bd461b3dc8624dd532 - - name: RELATED_IMAGE_devworkspace_che_operator - value: quay.io/che-incubator/devworkspace-che-operator@sha256:f943ada4d07ae8375f5a93bcc57f7f66335b14940bfe2c5d9565d155588ef514 - name: RELATED_IMAGE_devworkspace_controller value: quay.io/devfile/devworkspace-controller@sha256:f17dad6df3f2f0f7b245e05677293bef1d35a17e0349002f9e47816de03c0cdd - name: RELATED_IMAGE_internal_rest_backup_server diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 34dad4c1a..14ea6615a 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -1,3 +1,13 @@ +# +# Copyright (c) 2019-2021 Red Hat, Inc. +# This program and the accompanying materials are made +# available under the terms of the Eclipse Public License 2.0 +# which is available at https://www.eclipse.org/legal/epl-2.0/ +# +# SPDX-License-Identifier: EPL-2.0 +# +# Contributors: +# Red Hat, Inc. - initial API and implementation apiVersion: apps/v1 kind: Deployment metadata: @@ -58,7 +68,7 @@ spec: - name: RELATED_IMAGE_che_tls_secrets_creation_job value: quay.io/eclipse/che-tls-secret-creator:alpine-d1ed4ad - name: RELATED_IMAGE_pvc_jobs - value: registry.access.redhat.com/ubi8-minimal:8.4-205.1626828526 + value: registry.access.redhat.com/ubi8-minimal:8.4-208 - name: RELATED_IMAGE_postgres value: quay.io/eclipse/che--centos--postgresql-96-centos7:9.6-b681d78125361519180a6ac05242c296f8906c11eab7e207b5ca9a89b6344392 - name: RELATED_IMAGE_keycloak @@ -105,6 +115,8 @@ spec: value: che-postgres-secret - name: CHE_SERVER_TRUST_STORE_CONFIGMAP_NAME value: ca-certs + - name: MAX_CONCURRENT_RECONCILES + value: "1" livenessProbe: httpGet: path: /healthz @@ -136,47 +148,6 @@ spec: requests: cpu: 100m memory: 64Mi - - args: - - --enable-leader-election - - --metrics-addr - - '0' - command: - - /usr/local/bin/devworkspace-che-operator - env: - - name: WATCH_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: OPERATOR_NAME - value: devworkspace-che-operator - - name: MAX_CONCURRENT_RECONCILES - value: "1" - - name: CONTROLLER_SERVICE_ACCOUNT_NAME - value: che-operator - - name: RELATED_IMAGE_gateway - value: quay.io/eclipse/che--traefik:v2.3.2-6e6d4dc5a19afe06778ca092cdbbb98e31cb9f9c313edafa23f81a0e6ddf8a23 - - name: RELATED_IMAGE_gateway_configurer - value: quay.io/che-incubator/configbump:0.1.4 - image: quay.io/che-incubator/devworkspace-che-operator:ci - imagePullPolicy: Always - name: devworkspace-che-operator - resources: - limits: - cpu: 100m - memory: 256Mi - requests: - cpu: 100m - memory: 32Mi - securityContext: - privileged: false - readOnlyRootFilesystem: false - capabilities: - drop: - - ALL hostIPC: false hostNetwork: false hostPID: false diff --git a/config/rbac/cluster_role.yaml b/config/rbac/cluster_role.yaml index 5f04e3682..1f77cbba4 100644 --- a/config/rbac/cluster_role.yaml +++ b/config/rbac/cluster_role.yaml @@ -31,8 +31,9 @@ rules: - oauthclients verbs: - create - - get - delete + - deletecollection + - get - list - patch - update @@ -83,16 +84,6 @@ rules: - rbac.authorization.k8s.io resources: - clusterrolebindings - verbs: - - list - - create - - watch - - update - - get - - delete - - apiGroups: - - rbac.authorization.k8s.io - resources: - clusterroles verbs: - list @@ -102,17 +93,9 @@ rules: - get - delete - apiGroups: - - authorization.openshift.io + - rbac.authorization.k8s.io resources: - roles - verbs: - - get - - create - - update - - delete - - apiGroups: - - authorization.openshift.io - resources: - rolebindings verbs: - get @@ -120,17 +103,9 @@ rules: - update - delete - apiGroups: - - rbac.authorization.k8s.io + - authorization.openshift.io resources: - roles - verbs: - - get - - create - - update - - delete - - apiGroups: - - rbac.authorization.k8s.io - resources: - rolebindings verbs: - get @@ -144,6 +119,14 @@ rules: - checlusters/status - checlusters/finalizers - checlusters/status + - checlusterbackups + - checlusterbackups/status + - checlusterbackups/finalizers + - checlusterrestores + - checlusterrestores/status + - backupserverconfigurations + - backupserverconfigurations/status + - chebackupserverconfigurations verbs: - '*' - apiGroups: @@ -169,14 +152,6 @@ rules: - list - create - update - - apiGroups: - - '' - resources: - - serviceaccounts - verbs: - - get - - create - - watch - apiGroups: - '' resources: @@ -222,40 +197,24 @@ rules: - delete - apiGroups: - apps + - extensions resources: - deployments + - replicasets verbs: - - get - - list - - create - - patch - - watch - - delete - - apiGroups: - - '' - resources: - - services - verbs: - - list - - create - - delete + - '*' - apiGroups: - - '' + - route.openshift.io resources: - - configmaps + - routes verbs: - - get - - create - - delete - - list + - '*' - apiGroups: - route.openshift.io resources: - - routes + - routes/custom-host verbs: - - list - create - - delete - apiGroups: - '' resources: @@ -276,11 +235,7 @@ rules: resources: - ingresses verbs: - - list - - create - - watch - - get - - delete + - '*' - apiGroups: - networking.k8s.io resources: @@ -305,6 +260,14 @@ rules: - subscriptions verbs: - get + - apiGroups: + - operators.coreos.com + resources: + - clusterserviceversions + verbs: + - list + - get + - watch - apiGroups: - metrics.k8s.io resources: @@ -324,6 +287,72 @@ rules: - get - list - update + - apiGroups: + - '' + resources: + - configmaps + - persistentvolumeclaims + - pods + - secrets + - serviceaccounts + - services + verbs: + - '*' + - apiGroups: + - apps + resourceNames: + - che-operator + resources: + - deployments/finalizers + verbs: + - update + - apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - update + - watch + - apiGroups: + - controller.devfile.io + resources: + - devworkspaceroutings + verbs: + - '*' + - apiGroups: + - controller.devfile.io + resources: + - devworkspaceroutings/finalizers + verbs: + - update + - apiGroups: + - controller.devfile.io + resources: + - devworkspaceroutings/status + verbs: + - get + - patch + - update + - apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - create + - get + - nonResourceURLs: + - /metrics + verbs: + - get + - apiGroups: + - che.eclipse.org + resources: + - kubernetesimagepullers + verbs: + - '*' ### CHE-OPERATOR ROLES ONLY: END # devworkspace-controller-view-workspaces.ClusterRole.yaml - apiGroups: @@ -594,172 +623,3 @@ rules: - get - list - watch - # devworkspace-che-role.ClusterRole.yaml - - apiGroups: - - "" - resources: - - configmaps - - persistentvolumeclaims - - pods - - secrets - - serviceaccounts - verbs: - - '*' - - apiGroups: - - "" - resources: - - namespaces - verbs: - - get - - apiGroups: - - "" - resources: - - pods/exec - verbs: - - create - - apiGroups: - - "" - resources: - - services - verbs: - - '*' - - apiGroups: - - apps - resourceNames: - - devworkspace-che-operator - resources: - - deployments/finalizers - verbs: - - update - - apiGroups: - - apps - - extensions - resources: - - deployments - verbs: - - get - - list - - watch - - apiGroups: - - apps - - extensions - resources: - - deployments - - replicasets - verbs: - - '*' - - apiGroups: - - apps - - extensions - resources: - - replicasets - verbs: - - get - - list - - watch - - apiGroups: - - batch - resources: - - jobs - verbs: - - create - - delete - - get - - update - - watch - - apiGroups: - - org.eclipse.che - resources: - - checlusters - - checlusters/status - - checlusters/finalizers - verbs: - - '*' - - apiGroups: - - controller.devfile.io - resources: - - devworkspaceroutings - verbs: - - '*' - - apiGroups: - - controller.devfile.io - resources: - - devworkspaceroutings/finalizers - verbs: - - update - - apiGroups: - - controller.devfile.io - resources: - - devworkspaceroutings/status - verbs: - - get - - patch - - update - - apiGroups: - - "" - resources: - - configmap - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - extensions - resources: - - ingresses - verbs: - - '*' - - apiGroups: - - monitoring.coreos.com - resources: - - servicemonitors - verbs: - - create - - get - - apiGroups: - - oauth.openshift.io - resources: - - oauthclients - verbs: - - create - - delete - - deletecollection - - get - - list - - patch - - update - - watch - - apiGroups: - - rbac.authorization.k8s.io - resources: - - clusterrolebindings - - clusterroles - - rolebindings - - roles - verbs: - - create - - get - - list - - update - - watch - - apiGroups: - - route.openshift.io - resources: - - routes - verbs: - - '*' - - apiGroups: - - route.openshift.io - resources: - - routes/custom-host - verbs: - - create - # devworkspace-che-metrics-reader.ClusterRole.yaml - - nonResourceURLs: - - /metrics - verbs: - - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 6e6e4f083..f3a0129bb 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -82,8 +82,10 @@ rules: - get - apiGroups: - apps + - extensions resources: - deployments + - replicasets verbs: - '*' - apiGroups: @@ -118,12 +120,6 @@ rules: - get - list - watch -- apiGroups: - - che.eclipse.org - resources: - - kubernetesimagepullers - verbs: - - '*' - apiGroups: - operators.coreos.com resources: @@ -139,31 +135,60 @@ rules: verbs: - get - list -### CHE-OPERATOR ROLES ONLY: END -# devworkspace-che-leader-election-role.Role.yaml - apiGroups: - "" resources: - - configmaps + - configmaps/status verbs: - get - - list - - watch - - create - update - patch - - delete - apiGroups: - "" resources: - - configmaps/status + - events + verbs: + - create +- apiGroups: + - apps + resourceNames: + - che-operator + resources: + - deployments/finalizers verbs: - - get - update +- apiGroups: + - controller.devfile.io + resources: + - devworkspaceroutings + verbs: + - '*' +- apiGroups: + - controller.devfile.io + resources: + - devworkspaceroutings/finalizers + verbs: + - update +- apiGroups: + - controller.devfile.io + resources: + - devworkspaceroutings/status + verbs: + - get - patch + - update - apiGroups: - - "" + - oauth.openshift.io resources: - - events + - oauthclients verbs: - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + +### CHE-OPERATOR ROLES ONLY: END diff --git a/config/samples/devworkspace_flattened_theia-nodejs.yaml b/config/samples/devworkspace_flattened_theia-nodejs.yaml new file mode 100644 index 000000000..1575367fa --- /dev/null +++ b/config/samples/devworkspace_flattened_theia-nodejs.yaml @@ -0,0 +1,100 @@ +kind: DevWorkspace +apiVersion: workspace.devfile.io/v1alpha2 +metadata: + name: theia-next +spec: + started: true + routingClass: che + template: + projects: + - name: web-nodejs-sample + git: + remotes: + origin: "https://github.com/che-samples/web-nodejs-sample.git" + components: + ### BEGIN Contributions from Theia plugin ### + - name: plugins + volume: {} + - name: theia-ide + attributes: + "app.kubernetes.io/name": che-theia.eclipse.org + "app.kubernetes.io/part-of": che.eclipse.org + "app.kubernetes.io/component": editor + container: + image: "quay.io/eclipse/che-theia:next" + env: + - name: THEIA_PLUGINS + value: local-dir:///plugins + - name: HOSTED_PLUGIN_HOSTNAME + value: 0.0.0.0 + - name: HOSTED_PLUGIN_PORT + value: "3130" + - name: THEIA_HOST + value: 0.0.0.0 + volumeMounts: + - path: "/plugins" + name: plugins + mountSources: true + memoryLimit: "512M" + endpoints: + - name: "theia" + exposure: public + targetPort: 3100 + secure: true + protocol: http + attributes: + type: ide + - name: "webviews" + exposure: public + targetPort: 3100 + protocol: http + secure: true + attributes: + type: webview + unique: "true" + - name: "theia-dev" + exposure: public + targetPort: 3130 + protocol: http + attributes: + type: ide-dev + - name: "theia-redir-1" + exposure: public + targetPort: 13131 + protocol: http + - name: "theia-redir-2" + exposure: public + targetPort: 13132 + protocol: http + - name: "theia-redir-3" + exposure: public + targetPort: 13133 + protocol: http + - name: che-theia-terminal + attributes: + "app.kubernetes.io/name": che-theia.eclipse.org + "app.kubernetes.io/part-of": che.eclipse.org + "app.kubernetes.io/component": che-theia-terminal + container: + image: "quay.io/eclipse/che-machine-exec:nightly" + command: ['/go/bin/che-machine-exec'] + args: + - '--url' + - '0.0.0.0:3333' + - '--pod-selector' + - controller.devfile.io/devworkspace_id=$(DEVWORKSPACE_ID) + endpoints: + - name: "che-theia-terminal" + exposure: public + targetPort: 3333 + protocol: ws + secure: true + attributes: + type: collocated-terminal + ### END Contributions from che-theia plugin ### + commands: + - id: say-hello + exec: + component: plugin + commandLine: echo "Hello from $(pwd)" + workingDir: ${PROJECTS_ROOT}/project/app diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 6297e13f3..b6c1ff8b6 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -6,4 +6,6 @@ resources: # - org_v1_chebackupserverconfiguration.yaml # - org_v1_checlusterbackup.yaml # - org_v1_checlusterrestore.yaml +# Uncomment to enable a devworkspace sample +# - devworkspace_flattened_theia-nodejs.yaml #+kubebuilder:scaffold:manifestskustomizesamples diff --git a/controllers/che/checluster_controller.go b/controllers/che/checluster_controller.go index bdcca6d44..06e27afe2 100644 --- a/controllers/che/checluster_controller.go +++ b/controllers/che/checluster_controller.go @@ -94,10 +94,12 @@ type CheClusterReconciler struct { tests bool userHandler OpenShiftOAuthUserHandler permissionChecker PermissionChecker + // the namespace to which to limit the reconciliation. If empty, all namespaces are considered + namespace string } // NewReconciler returns a new CheClusterReconciler -func NewReconciler(mgr ctrl.Manager) (*CheClusterReconciler, error) { +func NewReconciler(mgr ctrl.Manager, namespace string) (*CheClusterReconciler, error) { noncachedClient, err := client.New(mgr.GetConfig(), client.Options{Scheme: mgr.GetScheme()}) if err != nil { return nil, err @@ -115,6 +117,7 @@ func NewReconciler(mgr ctrl.Manager) (*CheClusterReconciler, error) { discoveryClient: discoveryClient, userHandler: NewOpenShiftOAuthUserHandler(noncachedClient), permissionChecker: &K8sApiPermissionChecker{}, + namespace: namespace, }, nil } @@ -214,6 +217,10 @@ func (r *CheClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { }) } + if r.namespace != "" { + contollerBuilder.WithEventFilter(util.InNamespaceEventFilter(r.namespace)) + } + return contollerBuilder. For(&orgv1.CheCluster{}). Complete(r) diff --git a/controllers/checlusterbackup/checlusterbackup_controller.go b/controllers/checlusterbackup/checlusterbackup_controller.go index 85574fcbc..c0167522f 100644 --- a/controllers/checlusterbackup/checlusterbackup_controller.go +++ b/controllers/checlusterbackup/checlusterbackup_controller.go @@ -18,6 +18,7 @@ import ( "time" chev1 "github.com/eclipse-che/che-operator/api/v1" + "github.com/eclipse-che/che-operator/pkg/util" "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" @@ -44,11 +45,13 @@ type ReconcileCheClusterBackup struct { // that reads objects from the cache and writes to the apiserver client client.Client scheme *runtime.Scheme + // the namespace to which to limit the reconciliation. If empty, all namespaces are considered + namespace string } // NewReconciler returns a new reconcile.Reconciler -func NewReconciler(mgr manager.Manager) *ReconcileCheClusterBackup { - return &ReconcileCheClusterBackup{client: mgr.GetClient(), scheme: mgr.GetScheme()} +func NewReconciler(mgr manager.Manager, namespace string) *ReconcileCheClusterBackup { + return &ReconcileCheClusterBackup{client: mgr.GetClient(), scheme: mgr.GetScheme(), namespace: namespace} } // SetupWithManager sets up the controller with the Manager. @@ -69,9 +72,15 @@ func (r *ReconcileCheClusterBackup) SetupWithManager(mgr ctrl.Manager) error { }, } - return ctrl.NewControllerManagedBy(mgr). + bldr := ctrl.NewControllerManagedBy(mgr). Named("checlusterbackup-controller"). - Watches(&source.Kind{Type: &chev1.CheClusterBackup{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(backupCRPredicate)). + Watches(&source.Kind{Type: &chev1.CheClusterBackup{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(backupCRPredicate)) + + if r.namespace != "" { + bldr.WithEventFilter(util.InNamespaceEventFilter(r.namespace)) + } + + return bldr. For(&chev1.CheClusterBackup{}). Complete(r) } diff --git a/controllers/checlusterrestore/checlusterrestore_controller.go b/controllers/checlusterrestore/checlusterrestore_controller.go index a1e818e79..3f12d8b3a 100644 --- a/controllers/checlusterrestore/checlusterrestore_controller.go +++ b/controllers/checlusterrestore/checlusterrestore_controller.go @@ -18,6 +18,7 @@ import ( "time" chev1 "github.com/eclipse-che/che-operator/api/v1" + "github.com/eclipse-che/che-operator/pkg/util" "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" @@ -42,11 +43,13 @@ type ReconcileCheClusterRestore struct { // that reads objects from the cache and writes to the apiserver client client.Client scheme *runtime.Scheme + // the namespace to which to limit the reconciliation. If empty, all namespaces are considered + namespace string } // NewReconciler returns a new reconcile.Reconciler -func NewReconciler(mgr manager.Manager) *ReconcileCheClusterRestore { - return &ReconcileCheClusterRestore{client: mgr.GetClient(), scheme: mgr.GetScheme()} +func NewReconciler(mgr manager.Manager, namespace string) *ReconcileCheClusterRestore { + return &ReconcileCheClusterRestore{client: mgr.GetClient(), scheme: mgr.GetScheme(), namespace: namespace} } func (r *ReconcileCheClusterRestore) SetupWithManager(mgr ctrl.Manager) error { @@ -66,9 +69,15 @@ func (r *ReconcileCheClusterRestore) SetupWithManager(mgr ctrl.Manager) error { }, } - return ctrl.NewControllerManagedBy(mgr). + bldr := ctrl.NewControllerManagedBy(mgr). Named("checlusterrestore-controller"). - Watches(&source.Kind{Type: &chev1.CheClusterRestore{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(restoreCRPredicate)). + Watches(&source.Kind{Type: &chev1.CheClusterRestore{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(restoreCRPredicate)) + + if r.namespace != "" { + bldr.WithEventFilter(util.InNamespaceEventFilter(r.namespace)) + } + + return bldr. For(&chev1.CheClusterRestore{}). Complete(r) } diff --git a/controllers/devworkspace/README.adoc b/controllers/devworkspace/README.adoc new file mode 100644 index 000000000..82005db06 --- /dev/null +++ b/controllers/devworkspace/README.adoc @@ -0,0 +1,12 @@ += Devworkspace Che controller + +This is an import of originally standalone Devworkspace Che operator. +As such many things, that could be shared or reused with/from the rest +of the che-operator codebase, aren't. + +This situation will hopefully improve over time as we integrate the two +codebases more and more. + +In particular, the `controller/devworkspace/sync` subpackage is more +or less identical to `deploy/sync` and should be replaced by `deploy/sync` +after a careful inspection, if possible. diff --git a/controllers/devworkspace/controller.go b/controllers/devworkspace/controller.go new file mode 100644 index 000000000..ee532ba26 --- /dev/null +++ b/controllers/devworkspace/controller.go @@ -0,0 +1,459 @@ +// +// Copyright (c) 2019-2020 Red Hat, Inc. +// This program and the accompanying materials are made +// available under the terms of the Eclipse Public License 2.0 +// which is available at https://www.eclipse.org/legal/epl-2.0/ +// +// SPDX-License-Identifier: EPL-2.0 +// +// Contributors: +// Red Hat, Inc. - initial API and implementation +// + +package devworkspace + +import ( + "context" + "encoding/hex" + stdErrors "errors" + "fmt" + "math/rand" + "reflect" + "strings" + "sync" + "time" + + "github.com/devfile/devworkspace-operator/pkg/infrastructure" + checluster "github.com/eclipse-che/che-operator/api" + checlusterv1 "github.com/eclipse-che/che-operator/api/v1" + "github.com/eclipse-che/che-operator/api/v2alpha1" + "github.com/eclipse-che/che-operator/controllers/devworkspace/defaults" + datasync "github.com/eclipse-che/che-operator/controllers/devworkspace/sync" + "github.com/eclipse-che/che-operator/pkg/deploy" + "github.com/eclipse-che/che-operator/pkg/util" + routev1 "github.com/openshift/api/route/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/api/extensions/v1beta1" + rbac "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var ( + log = ctrl.Log.WithName("che") + currentCheInstances = map[client.ObjectKey]v2alpha1.CheCluster{} + cheInstancesAccess = sync.Mutex{} +) + +const ( + // FinalizerName is the name of the finalizer put on the Che Cluster resources by the controller. Public for testing purposes. + FinalizerName = "checluster.che.eclipse.org" +) + +type CheClusterReconciler struct { + client client.Client + scheme *runtime.Scheme + syncer datasync.Syncer +} + +// GetCurrentCheClusterInstances returns a map of all che clusters (keyed by their namespaced name) +// the che cluster controller currently knows of. This returns any meaningful data +// only after reconciliation has taken place. +// +// If this method is called from another controller, it effectively couples that controller +// with the che manager controller. Such controller will therefore have to run in the same +// process as the che manager controller. On the other hand, using this method, and somehow +// tolerating its eventual consistency, makes the other controller more efficient such that +// it doesn't have to find the che managers in the cluster (which is what che manager reconciler +// is doing). +// +// If need be, this method can be replaced by a simply calling client.List to get all the che +// managers in the cluster. +func GetCurrentCheClusterInstances() map[client.ObjectKey]v2alpha1.CheCluster { + cheInstancesAccess.Lock() + defer cheInstancesAccess.Unlock() + + ret := map[client.ObjectKey]v2alpha1.CheCluster{} + + for k, v := range currentCheInstances { + ret[k] = v + } + + return ret +} + +// New returns a new instance of the Che manager reconciler. This is mainly useful for +// testing because it doesn't set up any watches in the cluster, etc. For that use SetupWithManager. +func New(cl client.Client, scheme *runtime.Scheme) CheClusterReconciler { + return CheClusterReconciler{ + client: cl, + scheme: scheme, + syncer: datasync.New(cl, scheme), + } +} + +func (r *CheClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { + r.client = mgr.GetClient() + r.scheme = mgr.GetScheme() + r.syncer = datasync.New(r.client, r.scheme) + + bld := ctrl.NewControllerManagedBy(mgr). + For(&checlusterv1.CheCluster{}). + Owns(&corev1.Service{}). + Owns(&v1beta1.Ingress{}). + Owns(&corev1.ConfigMap{}). + Owns(&appsv1.Deployment{}). + Owns(&corev1.Pod{}). + Owns(&corev1.ServiceAccount{}). + Owns(&rbac.Role{}). + Owns(&rbac.RoleBinding{}) + if infrastructure.IsOpenShift() { + bld.Owns(&routev1.Route{}) + } + return bld.Complete(r) +} + +func (r *CheClusterReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { + ctx := context.Background() + + cheInstancesAccess.Lock() + defer cheInstancesAccess.Unlock() + + // remove the manager from the shared map for the time of the reconciliation + // we'll add it back if it is successfully reconciled. + // The access to the map is locked for the time of reconciliation so that outside + // callers don't witness this intermediate state. + delete(currentCheInstances, req.NamespacedName) + + // make sure we've checked we're in a valid state + currentV1 := &checlusterv1.CheCluster{} + err := r.client.Get(ctx, req.NamespacedName, currentV1) + if err != nil { + if errors.IsNotFound(err) { + // Ok, our current router disappeared... + return ctrl.Result{}, nil + } + // other error - let's requeue + return ctrl.Result{}, err + } + + current := checluster.AsV2alpha1(currentV1) + + if current.GetDeletionTimestamp() != nil { + return ctrl.Result{}, r.finalize(ctx, current, currentV1) + } + + var disabledMessage string + + if !r.scheme.IsGroupRegistered("controller.devfile.io") { + disabledMessage = "Devworkspace CRDs are not installed" + } + + if disabledMessage == "" && !current.Spec.IsEnabled() { + disabledMessage = "Devworkspace Che is disabled" + } + + if disabledMessage != "" { + res, err := r.updateStatus(ctx, current, currentV1, nil, current.Status.GatewayHost, current.Status.WorkspaceBaseDomain, v2alpha1.ClusterPhaseInactive, disabledMessage) + if err != nil { + return res, err + } + + currentV1 = &checlusterv1.CheCluster{} + _ = r.client.Get(ctx, req.NamespacedName, currentV1) + + return res, nil + } + + finalizerUpdated, err := r.ensureFinalizer(ctx, current) + if err != nil { + log.Info("Failed to set a finalizer", "object", req.String()) + return ctrl.Result{}, err + } else if finalizerUpdated { + // we've updated the object with a new finalizer, so we will enter another reconciliation loop shortly + // we don't add the manager into the shared map just yet, because we have actually not reconciled it fully. + return ctrl.Result{}, nil + } + + // validate the CR + err = r.validate(current) + if err != nil { + log.Info("validation errors", "errors", err.Error()) + res, err := r.updateStatus(ctx, current, currentV1, nil, current.Status.GatewayHost, current.Status.WorkspaceBaseDomain, v2alpha1.ClusterPhaseInactive, err.Error()) + if err != nil { + return res, err + } + + return res, nil + } + + // now, finally, the actual reconciliation + var changed bool + var host string + + // We are no longer in charge of the gateway, leaving the responsibility for managing it on the che-operator. + // But we need to detect the hostname on which the gateway is exposed so that the rest of our subsystems work. + host, err = r.detectCheHost(ctx, currentV1) + if err != nil { + return ctrl.Result{}, err + } + + // setting changed to false, because we jump from inactive directly to established, because we are no longer in + // control of gateway creation + changed = false + + workspaceBaseDomain := current.Spec.WorkspaceDomainEndpoints.BaseDomain + + if workspaceBaseDomain == "" { + workspaceBaseDomain, err = r.detectOpenShiftRouteBaseDomain(current) + if err != nil { + return ctrl.Result{}, err + } + + if workspaceBaseDomain == "" { + res, err := r.updateStatus(ctx, current, currentV1, nil, current.Status.GatewayHost, current.Status.WorkspaceBaseDomain, v2alpha1.ClusterPhaseInactive, "Could not auto-detect the workspaceBaseDomain. Please set it explicitly in the spec.") + if err != nil { + return res, err + } + + return res, nil + } + } + + res, err := r.updateStatus(ctx, current, currentV1, &changed, host, workspaceBaseDomain, v2alpha1.ClusterPhaseActive, "") + + if err != nil { + return res, err + } + + // everything went fine and the manager exists, put it back in the shared map + currentCheInstances[req.NamespacedName] = *current + + return res, nil +} + +func (r *CheClusterReconciler) updateStatus(ctx context.Context, cluster *v2alpha1.CheCluster, v1Cluster *checlusterv1.CheCluster, changed *bool, host string, workspaceDomain string, phase v2alpha1.ClusterPhase, phaseMessage string) (ctrl.Result, error) { + currentPhase := cluster.Status.GatewayPhase + + if changed != nil { + if !cluster.Spec.Gateway.IsEnabled() { + cluster.Status.GatewayPhase = v2alpha1.GatewayPhaseInactive + } else if *changed { + cluster.Status.GatewayPhase = v2alpha1.GatewayPhaseInitializing + } else { + cluster.Status.GatewayPhase = v2alpha1.GatewayPhaseEstablished + } + } + + cluster.Status.GatewayHost = host + cluster.Status.WorkspaceBaseDomain = workspaceDomain + + // set this unconditionally, because the only other value is set using the finalizer + cluster.Status.Phase = phase + cluster.Status.Message = phaseMessage + + var err error + if !reflect.DeepEqual(v1Cluster.Status.DevworkspaceStatus, cluster.Status) { + v1Cluster.Status.DevworkspaceStatus = cluster.Status + err = r.client.Status().Update(ctx, v1Cluster) + } + + requeue := cluster.Spec.IsEnabled() && (currentPhase == v2alpha1.GatewayPhaseInitializing || + cluster.Status.Phase != v2alpha1.ClusterPhaseActive) + + return ctrl.Result{Requeue: requeue}, err +} + +func (r *CheClusterReconciler) validate(cluster *v2alpha1.CheCluster) error { + validationErrors := []string{} + + if !infrastructure.IsOpenShift() { + // The validation error messages must correspond to the storage version of the resource, which is currently + // v1... + if cluster.Spec.WorkspaceDomainEndpoints.BaseDomain == "" { + validationErrors = append(validationErrors, "spec.k8s.ingressDomain must be specified") + } + } + + if len(validationErrors) > 0 { + message := "The following validation errors were detected:\n" + for _, m := range validationErrors { + message += "- " + m + "\n" + } + + return stdErrors.New(message) + } + + return nil +} + +func (r *CheClusterReconciler) finalize(ctx context.Context, cluster *v2alpha1.CheCluster, v1Cluster *checlusterv1.CheCluster) (err error) { + err = r.gatewayConfigFinalize(ctx, cluster) + + if err == nil { + finalizers := []string{} + for i := range cluster.Finalizers { + if cluster.Finalizers[i] != FinalizerName { + finalizers = append(finalizers, cluster.Finalizers[i]) + } + } + + cluster.Finalizers = finalizers + + err = r.client.Update(ctx, checluster.AsV1(cluster)) + } else { + cluster.Status.Phase = v2alpha1.ClusterPhasePendingDeletion + cluster.Status.Message = fmt.Sprintf("Finalization has failed: %s", err.Error()) + + v1Cluster.Status.DevworkspaceStatus = cluster.Status + err = r.client.Status().Update(ctx, v1Cluster) + } + + return err +} + +func (r *CheClusterReconciler) ensureFinalizer(ctx context.Context, cluster *v2alpha1.CheCluster) (updated bool, err error) { + + needsUpdate := true + if cluster.Finalizers != nil { + for i := range cluster.Finalizers { + if cluster.Finalizers[i] == FinalizerName { + needsUpdate = false + break + } + } + } else { + cluster.Finalizers = []string{} + } + + if needsUpdate { + cluster.Finalizers = append(cluster.Finalizers, FinalizerName) + err = r.client.Update(ctx, checluster.AsV1(cluster)) + } + + return needsUpdate, err +} + +// Tries to autodetect the route base domain. +func (r *CheClusterReconciler) detectOpenShiftRouteBaseDomain(cluster *v2alpha1.CheCluster) (string, error) { + if !infrastructure.IsOpenShift() { + return "", nil + } + + name := "devworkspace-che-test-" + randomSuffix(8) + testRoute := &routev1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: cluster.Namespace, + Name: name, + }, + Spec: routev1.RouteSpec{ + To: routev1.RouteTargetReference{ + Kind: "Service", + Name: name, + }, + }, + } + + err := r.client.Create(context.TODO(), testRoute) + if err != nil { + return "", err + } + defer r.client.Delete(context.TODO(), testRoute) + host := testRoute.Spec.Host + + prefixToRemove := name + "-" + cluster.Namespace + "." + return strings.TrimPrefix(host, prefixToRemove), nil +} + +func randomSuffix(length int) string { + var rnd = rand.New(rand.NewSource(time.Now().UnixNano())) + + arr := make([]byte, (length+1)/2) // to make even-length array so that it is convertible to hex + rnd.Read(arr) + + return hex.EncodeToString(arr) +} + +func (r *CheClusterReconciler) detectCheHost(ctx context.Context, cluster *checlusterv1.CheCluster) (string, error) { + host := cluster.Spec.Server.CheHost + + if host == "" { + expectedLabels := deploy.GetLabels(cluster, deploy.DefaultCheFlavor(cluster)) + lbls := labels.SelectorFromSet(expectedLabels) + + if util.IsOpenShift { + list := routev1.RouteList{} + err := r.client.List(ctx, &list, &client.ListOptions{ + Namespace: cluster.Namespace, + LabelSelector: lbls, + }) + + if err != nil { + return "", err + } + + if len(list.Items) == 0 { + return "", fmt.Errorf("expecting exactly 1 route to match Che gateway labels but found %d", len(list.Items)) + } + + host = list.Items[0].Spec.Host + } else { + list := v1beta1.IngressList{} + err := r.client.List(ctx, &list, &client.ListOptions{ + Namespace: cluster.Namespace, + LabelSelector: lbls, + }) + + if err != nil { + return "", err + } + + if len(list.Items) == 0 { + return "", fmt.Errorf("expecting exactly 1 ingress to match Che gateway labels but found %d", len(list.Items)) + } + + if len(list.Items[0].Spec.Rules) != 1 { + return "", fmt.Errorf("expecting exactly 1 rule on the Che gateway ingress but found %d. This is a bug", len(list.Items[0].Spec.Rules)) + } + + host = list.Items[0].Spec.Rules[0].Host + } + } + + return host, nil +} + +// Checks that there are no devworkspace configurations for the gateway (which would mean running devworkspaces). +// If there are some, an error is returned. +func (r *CheClusterReconciler) gatewayConfigFinalize(ctx context.Context, cluster *v2alpha1.CheCluster) error { + // we need to stop the reconcile if there are devworkspaces handled by it. + // we detect that by the presence of the gateway configmaps in the namespace of the manager + list := corev1.ConfigMapList{} + + err := r.client.List(ctx, &list, &client.ListOptions{ + Namespace: cluster.Namespace, + LabelSelector: labels.SelectorFromSet(defaults.GetLabelsForComponent(cluster, "gateway-config")), + }) + if err != nil { + return err + } + + workspaceCount := 0 + + for _, c := range list.Items { + if c.Annotations[defaults.ConfigAnnotationCheManagerName] == cluster.Name && c.Annotations[defaults.ConfigAnnotationCheManagerNamespace] == cluster.Namespace { + workspaceCount++ + } + } + + if workspaceCount > 0 { + return fmt.Errorf("there are %d devworkspaces associated with this Che manager", workspaceCount) + } + + return nil +} diff --git a/controllers/devworkspace/controller_test.go b/controllers/devworkspace/controller_test.go new file mode 100644 index 000000000..ff7b5ed9e --- /dev/null +++ b/controllers/devworkspace/controller_test.go @@ -0,0 +1,576 @@ +package devworkspace + +import ( + "context" + "os" + "testing" + "time" + + dwo "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1" + "github.com/devfile/devworkspace-operator/pkg/infrastructure" + checluster "github.com/eclipse-che/che-operator/api" + v1 "github.com/eclipse-che/che-operator/api/v1" + "github.com/eclipse-che/che-operator/api/v2alpha1" + "github.com/eclipse-che/che-operator/controllers/devworkspace/defaults" + "github.com/eclipse-che/che-operator/controllers/devworkspace/sync" + "github.com/eclipse-che/che-operator/pkg/deploy" + "github.com/eclipse-che/che-operator/pkg/util" + + routev1 "github.com/openshift/api/route/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" + "k8s.io/api/node/v1alpha1" + rbac "k8s.io/api/rbac/v1" + "k8s.io/utils/pointer" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +func createTestScheme() *runtime.Scheme { + infrastructure.InitializeForTesting(infrastructure.Kubernetes) + + scheme := runtime.NewScheme() + utilruntime.Must(v1alpha1.AddToScheme(scheme)) + utilruntime.Must(extensions.AddToScheme(scheme)) + utilruntime.Must(corev1.AddToScheme(scheme)) + utilruntime.Must(appsv1.AddToScheme(scheme)) + utilruntime.Must(rbac.AddToScheme(scheme)) + utilruntime.Must(routev1.AddToScheme(scheme)) + utilruntime.Must(v1.AddToScheme(scheme)) + utilruntime.Must(dwo.AddToScheme(scheme)) + + return scheme +} + +func TestNoCustomResourceSharedWhenReconcilingNonExistent(t *testing.T) { + // clear the map before the test + for k := range currentCheInstances { + delete(currentCheInstances, k) + } + + managerName := "che" + ns := "default" + scheme := createTestScheme() + cl := fake.NewFakeClientWithScheme(scheme) + + ctx := context.TODO() + + reconciler := CheClusterReconciler{client: cl, scheme: scheme, syncer: sync.New(cl, scheme)} + + _, err := reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: managerName, Namespace: ns}}) + if err != nil { + t.Fatalf("Failed to reconcile che manager with error: %s", err) + } + + // there is nothing in our context, so the map should still be empty + managers := GetCurrentCheClusterInstances() + if len(managers) != 0 { + t.Fatalf("There should have been no managers after a reconcile of a non-existent manager.") + } + + // now add some manager and reconcile a non-existent one + cl.Create(ctx, asV1(&v2alpha1.CheCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: managerName + "-not-me", + Namespace: ns, + Finalizers: []string{FinalizerName}, + }, + Spec: v2alpha1.CheClusterSpec{ + Gateway: v2alpha1.CheGatewaySpec{ + Host: "over.the.rainbow", + Enabled: pointer.BoolPtr(false), + }, + WorkspaceDomainEndpoints: v2alpha1.WorkspaceDomainEndpoints{ + BaseDomain: "down.on.earth", + }, + }, + })) + + _, err = reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: managerName, Namespace: ns}}) + if err != nil { + t.Fatalf("Failed to reconcile che manager with error: %s", err) + } + + managers = GetCurrentCheClusterInstances() + if len(managers) != 0 { + t.Fatalf("There should have been no managers after a reconcile of a non-existent manager.") + } +} + +func TestAddsCustomResourceToSharedMapOnCreate(t *testing.T) { + // clear the map before the test + for k := range currentCheInstances { + delete(currentCheInstances, k) + } + + managerName := "che" + ns := "default" + scheme := createTestScheme() + cl := fake.NewFakeClientWithScheme(scheme, asV1(&v2alpha1.CheCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: managerName, + Namespace: ns, + Finalizers: []string{FinalizerName}, + }, + Spec: v2alpha1.CheClusterSpec{ + Gateway: v2alpha1.CheGatewaySpec{ + Host: "over.the.rainbow", + Enabled: pointer.BoolPtr(false), + }, + WorkspaceDomainEndpoints: v2alpha1.WorkspaceDomainEndpoints{ + BaseDomain: "down.on.earth", + }, + }, + })) + + reconciler := CheClusterReconciler{client: cl, scheme: scheme, syncer: sync.New(cl, scheme)} + + _, err := reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: managerName, Namespace: ns}}) + if err != nil { + t.Fatalf("Failed to reconcile che manager with error: %s", err) + } + + managers := GetCurrentCheClusterInstances() + if len(managers) != 1 { + t.Fatalf("There should have been exactly 1 manager after a reconcile but there is %d.", len(managers)) + } + + mgr, ok := managers[types.NamespacedName{Name: managerName, Namespace: ns}] + if !ok { + t.Fatalf("The map of the current managers doesn't contain the expected one.") + } + + if mgr.Name != managerName { + t.Fatalf("Found a manager that we didn't reconcile. Curious (and buggy). We found %s but should have found %s", mgr.Name, managerName) + } +} + +func TestUpdatesCustomResourceInSharedMapOnUpdate(t *testing.T) { + // clear the map before the test + for k := range currentCheInstances { + delete(currentCheInstances, k) + } + + managerName := "che" + ns := "default" + scheme := createTestScheme() + + cl := fake.NewFakeClientWithScheme(scheme, asV1(&v2alpha1.CheCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: managerName, + Namespace: ns, + Finalizers: []string{FinalizerName}, + }, + Spec: v2alpha1.CheClusterSpec{ + Gateway: v2alpha1.CheGatewaySpec{ + Enabled: pointer.BoolPtr(false), + Host: "over.the.rainbow", + }, + WorkspaceDomainEndpoints: v2alpha1.WorkspaceDomainEndpoints{ + BaseDomain: "down.on.earth", + }, + }, + })) + + reconciler := CheClusterReconciler{client: cl, scheme: scheme, syncer: sync.New(cl, scheme)} + + _, err := reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: managerName, Namespace: ns}}) + if err != nil { + t.Fatalf("Failed to reconcile che manager with error: %s", err) + } + + managers := GetCurrentCheClusterInstances() + if len(managers) != 1 { + t.Fatalf("There should have been exactly 1 manager after a reconcile but there is %d.", len(managers)) + } + + mgr, ok := managers[types.NamespacedName{Name: managerName, Namespace: ns}] + if !ok { + t.Fatalf("The map of the current managers doesn't contain the expected one.") + } + + if mgr.Name != managerName { + t.Fatalf("Found a manager that we didn't reconcile. Curious (and buggy). We found %s but should have found %s", mgr.Name, managerName) + } + + if mgr.Spec.Gateway.Host != "over.the.rainbow" { + t.Fatalf("Unexpected host value: expected: over.the.rainbow, actual: %s", mgr.Spec.Gateway.Host) + } + + // now update the manager and reconcile again. See that the map contains the updated value + mgrInCluster := v1.CheCluster{} + cl.Get(context.TODO(), client.ObjectKey{Name: managerName, Namespace: ns}, &mgrInCluster) + + // to be able to update, we need to set the resource version + mgr.SetResourceVersion(mgrInCluster.GetResourceVersion()) + + mgr.Spec.Gateway.Host = "over.the.shoulder" + err = cl.Update(context.TODO(), asV1(&mgr)) + if err != nil { + t.Fatalf("Failed to update. Wat? %s", err) + } + + // before the reconcile, the map still should containe the old value + managers = GetCurrentCheClusterInstances() + mgr, ok = managers[types.NamespacedName{Name: managerName, Namespace: ns}] + if !ok { + t.Fatalf("The map of the current managers doesn't contain the expected one.") + } + + if mgr.Name != managerName { + t.Fatalf("Found a manager that we didn't reconcile. Curious (and buggy). We found %s but should have found %s", mgr.Name, managerName) + } + + if mgr.Spec.Gateway.Host != "over.the.rainbow" { + t.Fatalf("Unexpected host value: expected: over.the.rainbow, actual: %s", mgr.Spec.Gateway.Host) + } + + // now reconcile and see that the value in the map is now updated + + _, err = reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: managerName, Namespace: ns}}) + if err != nil { + t.Fatalf("Failed to reconcile che manager with error: %s", err) + } + + managers = GetCurrentCheClusterInstances() + mgr, ok = managers[types.NamespacedName{Name: managerName, Namespace: ns}] + if !ok { + t.Fatalf("The map of the current managers doesn't contain the expected one.") + } + + if mgr.Name != managerName { + t.Fatalf("Found a manager that we didn't reconcile. Curious (and buggy). We found %s but should have found %s", mgr.Name, managerName) + } + + if mgr.Spec.Gateway.Host != "over.the.shoulder" { + t.Fatalf("Unexpected host value: expected: over.the.shoulder, actual: %s", mgr.Spec.Gateway.Host) + } +} + +func TestRemovesCustomResourceFromSharedMapOnDelete(t *testing.T) { + // clear the map before the test + for k := range currentCheInstances { + delete(currentCheInstances, k) + } + + managerName := "che" + ns := "default" + scheme := createTestScheme() + + cl := fake.NewFakeClientWithScheme(scheme, asV1(&v2alpha1.CheCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: managerName, + Namespace: ns, + Finalizers: []string{FinalizerName}, + }, + Spec: v2alpha1.CheClusterSpec{ + Gateway: v2alpha1.CheGatewaySpec{ + Host: "over.the.rainbow", + Enabled: pointer.BoolPtr(false), + }, + WorkspaceDomainEndpoints: v2alpha1.WorkspaceDomainEndpoints{ + BaseDomain: "down.on.earth", + }, + }, + })) + + reconciler := CheClusterReconciler{client: cl, scheme: scheme, syncer: sync.New(cl, scheme)} + + _, err := reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: managerName, Namespace: ns}}) + if err != nil { + t.Fatalf("Failed to reconcile che manager with error: %s", err) + } + + managers := GetCurrentCheClusterInstances() + if len(managers) != 1 { + t.Fatalf("There should have been exactly 1 manager after a reconcile but there is %d.", len(managers)) + } + + mgr, ok := managers[types.NamespacedName{Name: managerName, Namespace: ns}] + if !ok { + t.Fatalf("The map of the current managers doesn't contain the expected one.") + } + + if mgr.Name != managerName { + t.Fatalf("Found a manager that we didn't reconcile. Curious (and buggy). We found %s but should have found %s", mgr.Name, managerName) + } + + cl.Delete(context.TODO(), asV1(&mgr)) + + // now reconcile and see that the value is no longer in the map + + _, err = reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: managerName, Namespace: ns}}) + if err != nil { + t.Fatalf("Failed to reconcile che manager with error: %s", err) + } + + managers = GetCurrentCheClusterInstances() + _, ok = managers[types.NamespacedName{Name: managerName, Namespace: ns}] + if ok { + t.Fatalf("The map of the current managers should no longer contain the manager after it has been deleted.") + } +} + +func TestCustomResourceFinalization(t *testing.T) { + managerName := "che" + ns := "default" + scheme := createTestScheme() + ctx := context.TODO() + cl := fake.NewFakeClientWithScheme(scheme, + asV1(&v2alpha1.CheCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: managerName, + Namespace: ns, + Finalizers: []string{FinalizerName}, + }, + Spec: v2alpha1.CheClusterSpec{ + Gateway: v2alpha1.CheGatewaySpec{ + Host: "over.the.rainbow", + }, + WorkspaceDomainEndpoints: v2alpha1.WorkspaceDomainEndpoints{ + BaseDomain: "down.on.earth", + }, + }, + }), + &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ws1", + Namespace: ns, + Annotations: map[string]string{ + defaults.ConfigAnnotationCheManagerName: managerName, + defaults.ConfigAnnotationCheManagerNamespace: ns, + }, + Labels: defaults.GetLabelsFromNames(managerName, "gateway-config"), + }, + }) + + reconciler := CheClusterReconciler{client: cl, scheme: scheme, syncer: sync.New(cl, scheme)} + + _, err := reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: managerName, Namespace: ns}}) + if err != nil { + t.Fatalf("Failed to reconcile che manager with error: %s", err) + } + + // check that the reconcile loop added the finalizer + manager := v1.CheCluster{} + err = cl.Get(ctx, client.ObjectKey{Name: managerName, Namespace: ns}, &manager) + if err != nil { + t.Fatalf("Failed to obtain the manager from the fake client: %s", err) + } + + if len(manager.Finalizers) != 1 { + t.Fatalf("Expected a single finalizer on the manager but found: %d", len(manager.Finalizers)) + } + + if manager.Finalizers[0] != FinalizerName { + t.Fatalf("Expected a finalizer called %s but got %s", FinalizerName, manager.Finalizers[0]) + } + + // try to delete the manager and check that the configmap disallows that and that the status of the manager is updated + manager.DeletionTimestamp = &metav1.Time{Time: time.Now()} + err = cl.Update(ctx, &manager) + if err != nil { + t.Fatalf("Failed to update the manager in the fake client: %s", err) + } + _, err = reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: managerName, Namespace: ns}}) + if err != nil { + t.Fatalf("Failed to reconcile che manager with error: %s", err) + } + + manager = v1.CheCluster{} + err = cl.Get(ctx, client.ObjectKey{Name: managerName, Namespace: ns}, &manager) + if err != nil { + t.Fatalf("Failed to obtain the manager from the fake client: %s", err) + } + + if len(manager.Finalizers) != 1 { + t.Fatalf("There should have been a finalizer on the manager after a failed finalization attempt") + } + + if manager.Status.DevworkspaceStatus.Phase != v2alpha1.ClusterPhasePendingDeletion { + t.Fatalf("Expected the manager to be in the pending deletion phase but it is: %s", manager.Status.DevworkspaceStatus.Phase) + } + if len(manager.Status.DevworkspaceStatus.Message) == 0 { + t.Fatalf("Expected an non-empty message about the failed finalization in the manager status") + } + + // now remove the config map and check that the finalization proceeds + err = cl.Delete(ctx, &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ws1", + Namespace: ns, + }, + }) + if err != nil { + t.Fatalf("Failed to delete the test configmap: %s", err) + } + + _, err = reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: managerName, Namespace: ns}}) + if err != nil { + t.Fatalf("Failed to reconcile che manager with error: %s", err) + } + + manager = v1.CheCluster{} + err = cl.Get(ctx, client.ObjectKey{Name: managerName, Namespace: ns}, &manager) + if err != nil { + t.Fatalf("Failed to obtain the manager from the fake client: %s", err) + } + + if len(manager.Finalizers) != 0 { + t.Fatalf("The finalizers should be cleared after the finalization success but there were still some: %d", len(manager.Finalizers)) + } +} + +// This test should be removed if we are again in charge of gateway creation. +func TestExternalGatewayDetection(t *testing.T) { + origFlavor := os.Getenv("CHE_FLAVOR") + t.Cleanup(func() { + os.Setenv("CHE_FLAVOR", origFlavor) + }) + + os.Setenv("CHE_FLAVOR", "test-che") + + scheme := createTestScheme() + + clusterName := "eclipse-che" + ns := "default" + + v2cluster := &v2alpha1.CheCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Namespace: ns, + }, + Spec: v2alpha1.CheClusterSpec{ + WorkspaceDomainEndpoints: v2alpha1.WorkspaceDomainEndpoints{ + BaseDomain: "down.on.earth", + }, + }, + } + + onKubernetes(func() { + v1Cluster := asV1(v2cluster) + + cl := fake.NewFakeClientWithScheme(scheme, + v1Cluster, + &extensions.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ingress", + Namespace: ns, + Labels: deploy.GetLabels(v1Cluster, "test-che"), + }, + Spec: extensions.IngressSpec{ + Rules: []extensions.IngressRule{ + { + Host: "ingress.host", + }, + }, + }, + }, + ) + + reconciler := CheClusterReconciler{client: cl, scheme: scheme, syncer: sync.New(cl, scheme)} + + // first reconcile sets the finalizer, second reconcile actually finishes the process + _, err := reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: clusterName, Namespace: ns}}) + if err != nil { + t.Fatalf("Failed to reconcile che manager with error: %s", err) + } + _, err = reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: clusterName, Namespace: ns}}) + if err != nil { + t.Fatalf("Failed to reconcile che manager with error: %s", err) + } + + persisted := v1.CheCluster{} + if err := cl.Get(context.TODO(), types.NamespacedName{Name: clusterName, Namespace: ns}, &persisted); err != nil { + t.Fatal(err) + } + + if persisted.Status.DevworkspaceStatus.Phase != v2alpha1.ClusterPhaseActive { + t.Fatalf("Unexpected cluster state: %v", persisted.Status.DevworkspaceStatus.Phase) + } + + if persisted.Status.DevworkspaceStatus.GatewayHost != "ingress.host" { + t.Fatalf("Unexpected gateway host: %v", persisted.Status.DevworkspaceStatus.GatewayHost) + } + }) + + onOpenShift(func() { + v1Cluster := asV1(v2cluster) + + cl := fake.NewFakeClientWithScheme(scheme, + v1Cluster, + &routev1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "route", + Namespace: ns, + Labels: deploy.GetLabels(v1Cluster, "test-che"), + }, + Spec: routev1.RouteSpec{ + Host: "route.host", + }, + }, + ) + + reconciler := CheClusterReconciler{client: cl, scheme: scheme, syncer: sync.New(cl, scheme)} + + // first reconcile sets the finalizer, second reconcile actually finishes the process + _, err := reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: clusterName, Namespace: ns}}) + if err != nil { + t.Fatalf("Failed to reconcile che manager with error: %s", err) + } + _, err = reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: clusterName, Namespace: ns}}) + if err != nil { + t.Fatalf("Failed to reconcile che manager with error: %s", err) + } + + persisted := v1.CheCluster{} + if err := cl.Get(context.TODO(), types.NamespacedName{Name: clusterName, Namespace: ns}, &persisted); err != nil { + t.Fatal(err) + } + + if persisted.Status.DevworkspaceStatus.Phase != v2alpha1.ClusterPhaseActive { + t.Fatalf("Unexpected cluster state: %v", persisted.Status.DevworkspaceStatus.Phase) + } + + if persisted.Status.DevworkspaceStatus.GatewayHost != "route.host" { + t.Fatalf("Unexpected gateway host: %v", persisted.Status.DevworkspaceStatus.GatewayHost) + } + }) +} + +func asV1(v2Obj *v2alpha1.CheCluster) *v1.CheCluster { + return checluster.AsV1(v2Obj) +} + +func onKubernetes(f func()) { + isOpenShift := util.IsOpenShift + isOpenShift4 := util.IsOpenShift4 + + util.IsOpenShift = false + util.IsOpenShift4 = false + + f() + + util.IsOpenShift = isOpenShift + util.IsOpenShift4 = isOpenShift4 +} + +func onOpenShift(f func()) { + isOpenShift := util.IsOpenShift + isOpenShift4 := util.IsOpenShift4 + + util.IsOpenShift = true + util.IsOpenShift4 = true + + f() + + util.IsOpenShift = isOpenShift + util.IsOpenShift4 = isOpenShift4 +} diff --git a/controllers/devworkspace/defaults/defaults.go b/controllers/devworkspace/defaults/defaults.go new file mode 100644 index 000000000..a2a473dc0 --- /dev/null +++ b/controllers/devworkspace/defaults/defaults.go @@ -0,0 +1,107 @@ +package defaults + +import ( + "os" + "runtime" + + "github.com/eclipse-che/che-operator/api/v2alpha1" + ctrl "sigs.k8s.io/controller-runtime" +) + +const ( + gatewayImageEnvVarName = "RELATED_IMAGE_gateway" + gatewayConfigurerImageEnvVarName = "RELATED_IMAGE_gateway_configurer" + + defaultGatewayImage = "quay.io/eclipse/che--traefik:v2.3.2-6e6d4dc5a19afe06778ca092cdbbb98e31cb9f9c313edafa23f81a0e6ddf8a23" + defaultGatewayConfigurerImage = "quay.io/che-incubator/configbump:0.1.4" + + configAnnotationPrefix = "che.routing.controller.devfile.io/" + ConfigAnnotationCheManagerName = configAnnotationPrefix + "che-name" + ConfigAnnotationCheManagerNamespace = configAnnotationPrefix + "che-namespace" + ConfigAnnotationDevWorkspaceRoutingName = configAnnotationPrefix + "devworkspacerouting-name" + ConfigAnnotationDevWorkspaceRoutingNamespace = configAnnotationPrefix + "devworkspacerouting-namespace" + ConfigAnnotationEndpointName = configAnnotationPrefix + "endpoint-name" + ConfigAnnotationComponentName = configAnnotationPrefix + "component-name" +) + +var ( + log = ctrl.Log.WithName("defaults") + + DefaultIngressAnnotations = map[string]string{ + "kubernetes.io/ingress.class": "nginx", + "nginx.ingress.kubernetes.io/proxy-read-timeout": "3600", + "nginx.ingress.kubernetes.io/proxy-connect-timeout": "3600", + "nginx.ingress.kubernetes.io/ssl-redirect": "true", + } + + // If this looks weirdly out of place to you from all other labels, then you're completely right! + // These labels are the default ones used by che-operator and Che7. Let's keep the defaults + // the same for the ease of translation... + defaultGatewayConfigLabels = map[string]string{ + "app": "che", + "component": "che-gateway-config", + } +) + +func GetGatewayWorkpaceConfigMapName(workspaceID string) string { + return workspaceID +} + +func GetLabelsForComponent(cluster *v2alpha1.CheCluster, component string) map[string]string { + return GetLabelsFromNames(cluster.Name, component) +} + +func GetLabelsFromNames(appName string, component string) map[string]string { + return AddStandardLabelsFromNames(appName, component, map[string]string{}) +} + +func AddStandardLabelsForComponent(cluster *v2alpha1.CheCluster, component string, labels map[string]string) map[string]string { + return AddStandardLabelsFromNames(cluster.Name, component, labels) +} + +func AddStandardLabelsFromNames(appName string, component string, labels map[string]string) map[string]string { + labels["app.kubernetes.io/name"] = appName + labels["app.kubernetes.io/part-of"] = appName + labels["app.kubernetes.io/component"] = component + return labels +} + +func GetGatewayImage() string { + return read(gatewayImageEnvVarName, defaultGatewayImage) +} + +func GetGatewayConfigurerImage() string { + return read(gatewayConfigurerImageEnvVarName, defaultGatewayConfigurerImage) +} + +func GetIngressAnnotations(cluster *v2alpha1.CheCluster) map[string]string { + if len(cluster.Spec.K8s.IngressAnnotations) > 0 { + return cluster.Spec.K8s.IngressAnnotations + } + return DefaultIngressAnnotations +} + +func GetGatewayWorkspaceConfigMapLabels(cluster *v2alpha1.CheCluster) map[string]string { + if len(cluster.Spec.Gateway.ConfigLabels) > 0 { + return cluster.Spec.Gateway.ConfigLabels + } + return defaultGatewayConfigLabels +} + +func read(varName string, fallback string) string { + ret := os.Getenv(varName) + + if len(ret) == 0 { + ret = os.Getenv(archDependent(varName)) + if len(ret) == 0 { + log.Info("Failed to read the default value from the environment. Will use the hardcoded default value.", "envvar", varName, "value", fallback) + ret = fallback + } + } + + return ret +} + +func archDependent(envVarName string) string { + return envVarName + "_" + runtime.GOARCH +} diff --git a/controllers/devworkspace/solver/che_routing.go b/controllers/devworkspace/solver/che_routing.go new file mode 100644 index 000000000..6364874c8 --- /dev/null +++ b/controllers/devworkspace/solver/che_routing.go @@ -0,0 +1,519 @@ +// +// Copyright (c) 2019-2021 Red Hat, Inc. +// This program and the accompanying materials are made +// available under the terms of the Eclipse Public License 2.0 +// which is available at https://www.eclipse.org/legal/epl-2.0/ +// +// SPDX-License-Identifier: EPL-2.0 +// +// Contributors: +// Red Hat, Inc. - initial API and implementation +// + +package solver + +import ( + "context" + "fmt" + "path" + "strings" + + dw "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" + dwo "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1" + "github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/solvers" + "github.com/devfile/devworkspace-operator/pkg/common" + "github.com/devfile/devworkspace-operator/pkg/constants" + "github.com/devfile/devworkspace-operator/pkg/infrastructure" + "github.com/eclipse-che/che-operator/api/v2alpha1" + "github.com/eclipse-che/che-operator/controllers/devworkspace/defaults" + "github.com/eclipse-che/che-operator/controllers/devworkspace/sync" + "github.com/google/go-cmp/cmp/cmpopts" + routeV1 "github.com/openshift/api/route/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/api/extensions/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" +) + +const ( + uniqueEndpointAttributeName = "unique" + urlRewriteSupportedEndpointAttributeName = "urlRewriteSupported" + endpointURLPrefixPattern = "/%s/%s/%d" + // note - che-theia DEPENDS on this format - we should not change this unless crosschecked with the che-theia impl + uniqueEndpointURLPrefixPattern = "/%s/%s/%s" +) + +var ( + configMapDiffOpts = cmpopts.IgnoreFields(corev1.ConfigMap{}, "TypeMeta", "ObjectMeta") +) + +// keys are port numbers, values are maps where keys are endpoint names (in case we need more than 1 endpoint for a single port) and values +// contain info about the intended endpoint scheme and the order in which the port is defined (used for unique naming) +type portMapping map[int32]map[string]portMappingValue +type portMappingValue struct { + endpointScheme string + order int +} + +func (c *CheRoutingSolver) cheSpecObjects(cheManager *v2alpha1.CheCluster, routing *dwo.DevWorkspaceRouting, workspaceMeta solvers.DevWorkspaceMetadata) (solvers.RoutingObjects, error) { + objs := solvers.RoutingObjects{} + + objs.Services = solvers.GetDiscoverableServicesForEndpoints(routing.Spec.Endpoints, workspaceMeta) + + commonService := solvers.GetServiceForEndpoints(routing.Spec.Endpoints, workspaceMeta, false, dw.PublicEndpointExposure, dw.InternalEndpointExposure) + if commonService != nil { + objs.Services = append(objs.Services, *commonService) + } + + annos := map[string]string{} + annos[defaults.ConfigAnnotationCheManagerName] = cheManager.Name + annos[defaults.ConfigAnnotationCheManagerNamespace] = cheManager.Namespace + + additionalLabels := defaults.GetLabelsForComponent(cheManager, "exposure") + + for i := range objs.Services { + // need to use a ref otherwise s would be a copy + s := &objs.Services[i] + + if s.Labels == nil { + s.Labels = map[string]string{} + } + + for k, v := range additionalLabels { + + if len(s.Labels[k]) == 0 { + s.Labels[k] = v + } + } + + if s.Annotations == nil { + s.Annotations = map[string]string{} + } + + for k, v := range annos { + + if len(s.Annotations[k]) == 0 { + s.Annotations[k] = v + } + } + } + + // k, now we have to create our own objects for configuring the gateway + configMaps, err := c.getGatewayConfigsAndFillRoutingObjects(cheManager, workspaceMeta.DevWorkspaceId, routing, &objs) + if err != nil { + return solvers.RoutingObjects{}, err + } + + syncer := sync.New(c.client, c.scheme) + + for _, cm := range configMaps { + _, _, err := syncer.Sync(context.TODO(), nil, &cm, configMapDiffOpts) + if err != nil { + return solvers.RoutingObjects{}, err + } + + } + + return objs, nil +} + +func (c *CheRoutingSolver) cheExposedEndpoints(manager *v2alpha1.CheCluster, workspaceID string, endpoints map[string]dwo.EndpointList, routingObj solvers.RoutingObjects) (exposedEndpoints map[string]dwo.ExposedEndpointList, ready bool, err error) { + if manager.Status.GatewayPhase == v2alpha1.GatewayPhaseInitializing { + return nil, false, nil + } + + gatewayHost := manager.Status.GatewayHost + + exposed := map[string]dwo.ExposedEndpointList{} + + for machineName, endpoints := range endpoints { + exposedEndpoints := dwo.ExposedEndpointList{} + for _, endpoint := range endpoints { + if endpoint.Exposure != dw.PublicEndpointExposure { + continue + } + + scheme := determineEndpointScheme(manager.Spec.Gateway.IsEnabled(), endpoint) + + if !isExposableScheme(scheme) { + // we cannot expose non-http endpoints publicly, because ingresses/routes only support http(s) + continue + } + + // try to find the endpoint in the ingresses/routes first. If it is there, it is exposed on a subdomain + // otherwise it is exposed through the gateway + var endpointURL string + if infrastructure.IsOpenShift() { + route := findRouteForEndpoint(machineName, endpoint, &routingObj) + if route != nil { + endpointURL = path.Join(route.Spec.Host, endpoint.Path) + } + } else { + ingress := findIngressForEndpoint(machineName, endpoint, &routingObj) + if ingress != nil { + endpointURL = path.Join(ingress.Spec.Rules[0].Host, endpoint.Path) + } + } + + if endpointURL == "" { + if !manager.Spec.Gateway.IsEnabled() { + return map[string]dwo.ExposedEndpointList{}, false, fmt.Errorf("couldn't find an ingress/route for an endpoint `%s` in workspace `%s`, this is a bug", endpoint.Name, workspaceID) + } + + if gatewayHost == "" { + // the gateway has not yet established the host + return map[string]dwo.ExposedEndpointList{}, false, nil + } + + publicURLPrefix := getPublicURLPrefixForEndpoint(workspaceID, machineName, endpoint) + endpointURL = path.Join(gatewayHost, publicURLPrefix, endpoint.Path) + } + + publicURL := scheme + "://" + endpointURL + + // path.Join() removes the trailing slashes, so make sure to reintroduce that if required. + if endpoint.Path == "" || strings.HasSuffix(endpoint.Path, "/") { + publicURL = publicURL + "/" + } + + exposedEndpoints = append(exposedEndpoints, dwo.ExposedEndpoint{ + Name: endpoint.Name, + Url: publicURL, + Attributes: endpoint.Attributes, + }) + } + exposed[machineName] = exposedEndpoints + } + + return exposed, true, nil +} + +func isExposableScheme(scheme string) bool { + return strings.HasPrefix(scheme, "http") || strings.HasPrefix(scheme, "ws") +} + +func secureScheme(scheme string) string { + if scheme == "http" { + return "https" + } else if scheme == "ws" { + return "wss" + } else { + return scheme + } +} + +func isSecureScheme(scheme string) bool { + return scheme == "https" || scheme == "wss" +} + +func (c *CheRoutingSolver) getGatewayConfigsAndFillRoutingObjects(cheManager *v2alpha1.CheCluster, workspaceID string, routing *dwo.DevWorkspaceRouting, objs *solvers.RoutingObjects) ([]corev1.ConfigMap, error) { + restrictedAnno, setRestrictedAnno := routing.Annotations[constants.DevWorkspaceRestrictedAccessAnnotation] + + labels := defaults.AddStandardLabelsForComponent(cheManager, "gateway-config", defaults.GetGatewayWorkspaceConfigMapLabels(cheManager)) + labels[constants.DevWorkspaceIDLabel] = workspaceID + if setRestrictedAnno { + labels[constants.DevWorkspaceRestrictedAccessAnnotation] = restrictedAnno + } + + configMap := corev1.ConfigMap{ + ObjectMeta: v1.ObjectMeta{ + Name: defaults.GetGatewayWorkpaceConfigMapName(workspaceID), + Namespace: cheManager.Namespace, + Labels: labels, + Annotations: map[string]string{ + defaults.ConfigAnnotationDevWorkspaceRoutingName: routing.Name, + defaults.ConfigAnnotationDevWorkspaceRoutingNamespace: routing.Namespace, + }, + }, + Data: map[string]string{}, + } + + config := traefikConfig{ + HTTP: traefikConfigHTTP{ + Routers: map[string]traefikConfigRouter{}, + Services: map[string]traefikConfigService{}, + Middlewares: map[string]traefikConfigMiddleware{}, + }, + } + + // we just need something to make the route names unique.. We also need to make the names as short as possible while + // being relatable to the workspaceID by mere human inspection. So let's just suffix the workspaceID with a "unique" + // suffix, the easiest of which is the iteration order in the map. + // Note that this means that the endpoints might get a different route/ingress name on each workspace start because + // the iteration order is not guaranteed in Go maps. If we want stable ingress/route names for the endpoints, we need + // to devise a different algorithm to produce them. Some kind of hash of workspaceID, component name, endpoint name and port + // might work but will not be relatable to the workspace ID just by looking at it anymore. + order := 0 + if infrastructure.IsOpenShift() { + exposer := &RouteExposer{} + if err := exposer.initFrom(context.TODO(), c.client, cheManager, routing); err != nil { + return []corev1.ConfigMap{}, err + } + + exposeAllEndpoints(&order, cheManager, routing, &config, objs, func(info *EndpointInfo) { + route := exposer.getRouteForService(info) + objs.Routes = append(objs.Routes, route) + }) + } else { + exposer := &IngressExposer{} + if err := exposer.initFrom(context.TODO(), c.client, cheManager, routing, defaults.GetIngressAnnotations(cheManager)); err != nil { + return []corev1.ConfigMap{}, err + } + + exposeAllEndpoints(&order, cheManager, routing, &config, objs, func(info *EndpointInfo) { + ingress := exposer.getIngressForService(info) + objs.Ingresses = append(objs.Ingresses, ingress) + }) + } + + if len(config.HTTP.Routers) > 0 { + contents, err := yaml.Marshal(config) + if err != nil { + return []corev1.ConfigMap{}, err + } + + configMap.Data[workspaceID+".yml"] = string(contents) + + return []corev1.ConfigMap{configMap}, nil + } + + return []corev1.ConfigMap{}, nil +} + +func exposeAllEndpoints(order *int, cheManager *v2alpha1.CheCluster, routing *dwo.DevWorkspaceRouting, config *traefikConfig, objs *solvers.RoutingObjects, ingressExpose func(*EndpointInfo)) { + info := &EndpointInfo{} + for componentName, endpoints := range routing.Spec.Endpoints { + info.componentName = componentName + singlehostPorts, multihostPorts := classifyEndpoints(cheManager.Spec.Gateway.IsEnabled(), order, &endpoints) + + addToTraefikConfig(routing.Namespace, routing.Spec.DevWorkspaceId, componentName, singlehostPorts, config) + + for port, names := range multihostPorts { + backingService := findServiceForPort(port, objs) + for endpointName, val := range names { + info.endpointName = endpointName + info.order = val.order + info.port = port + info.scheme = val.endpointScheme + info.service = backingService + + ingressExpose(info) + } + } + } +} + +func getTrackedEndpointName(endpoint *dw.Endpoint) string { + name := "" + if endpoint.Attributes.GetString(uniqueEndpointAttributeName, nil) == "true" { + name = endpoint.Name + } + + return name +} + +// we need to support unique endpoints - so 1 port can actually be accessible +// multiple times, each time using a different resulting external URL. +// non-unique endpoints are all represented using a single external URL +func classifyEndpoints(gatewayEnabled bool, order *int, endpoints *dwo.EndpointList) (singlehostPorts portMapping, multihostPorts portMapping) { + singlehostPorts = portMapping{} + multihostPorts = portMapping{} + for _, e := range *endpoints { + if e.Exposure != dw.PublicEndpointExposure { + continue + } + + i := int32(e.TargetPort) + + name := "" + if e.Attributes.GetString(uniqueEndpointAttributeName, nil) == "true" { + name = e.Name + } + + ports := multihostPorts + if gatewayEnabled && e.Attributes.GetString(urlRewriteSupportedEndpointAttributeName, nil) == "true" { + ports = singlehostPorts + } + + if ports[i] == nil { + ports[i] = map[string]portMappingValue{} + } + + if _, ok := ports[i][name]; !ok { + ports[i][name] = portMappingValue{ + order: *order, + endpointScheme: determineEndpointScheme(gatewayEnabled, e), + } + *order = *order + 1 + } + } + + return +} + +func addToTraefikConfig(namespace string, workspaceID string, machineName string, portMapping portMapping, cfg *traefikConfig) { + rtrs := cfg.HTTP.Routers + srvcs := cfg.HTTP.Services + mdls := cfg.HTTP.Middlewares + + for port, names := range portMapping { + for endpointName := range names { + name := getEndpointExposingObjectName(machineName, workspaceID, port, endpointName) + var prefix string + var serviceURL string + + prefix = getPublicURLPrefix(workspaceID, machineName, port, endpointName) + serviceURL = getServiceURL(port, workspaceID, namespace) + + rtrs[name] = traefikConfigRouter{ + Rule: fmt.Sprintf("PathPrefix(`%s`)", prefix), + Service: name, + Middlewares: []string{name}, + Priority: 100, + } + + srvcs[name] = traefikConfigService{ + LoadBalancer: traefikConfigLoadbalancer{ + Servers: []traefikConfigLoadbalancerServer{ + { + URL: serviceURL, + }, + }, + }, + } + + mdls[name] = traefikConfigMiddleware{ + StripPrefix: traefikConfigStripPrefix{ + Prefixes: []string{prefix}, + }, + } + } + } +} + +func findServiceForPort(port int32, objs *solvers.RoutingObjects) *corev1.Service { + for i := range objs.Services { + svc := &objs.Services[i] + for j := range svc.Spec.Ports { + if svc.Spec.Ports[j].Port == port { + return svc + } + } + } + + return nil +} + +func findIngressForEndpoint(machineName string, endpoint dw.Endpoint, objs *solvers.RoutingObjects) *v1beta1.Ingress { + for i := range objs.Ingresses { + ingress := &objs.Ingresses[i] + + if ingress.Annotations[defaults.ConfigAnnotationComponentName] != machineName || + ingress.Annotations[defaults.ConfigAnnotationEndpointName] != getTrackedEndpointName(&endpoint) { + continue + } + + for r := range ingress.Spec.Rules { + rule := ingress.Spec.Rules[r] + for p := range rule.HTTP.Paths { + path := rule.HTTP.Paths[p] + if path.Backend.ServicePort.IntVal == int32(endpoint.TargetPort) { + return ingress + } + } + } + } + + return nil +} + +func findRouteForEndpoint(machineName string, endpoint dw.Endpoint, objs *solvers.RoutingObjects) *routeV1.Route { + service := findServiceForPort(int32(endpoint.TargetPort), objs) + + for r := range objs.Routes { + route := &objs.Routes[r] + if route.Annotations[defaults.ConfigAnnotationComponentName] == machineName && + route.Annotations[defaults.ConfigAnnotationEndpointName] == getTrackedEndpointName(&endpoint) && + route.Spec.To.Kind == "Service" && + route.Spec.To.Name == service.Name && + route.Spec.Port.TargetPort.IntValue() == endpoint.TargetPort { + return route + } + } + + return nil +} + +func (c *CheRoutingSolver) cheRoutingFinalize(cheManager *v2alpha1.CheCluster, routing *dwo.DevWorkspaceRouting) error { + configs := &corev1.ConfigMapList{} + + selector, err := labels.Parse(fmt.Sprintf("%s=%s", constants.DevWorkspaceIDLabel, routing.Spec.DevWorkspaceId)) + if err != nil { + return err + } + + listOpts := &client.ListOptions{ + Namespace: cheManager.Namespace, + LabelSelector: selector, + } + + err = c.client.List(context.TODO(), configs, listOpts) + if err != nil { + return err + } + + for _, cm := range configs.Items { + err = c.client.Delete(context.TODO(), &cm) + if err != nil { + return err + } + } + + return nil +} + +func getServiceURL(port int32, workspaceID string, workspaceNamespace string) string { + // the default .cluster.local suffix of the internal domain names seems to be configurable, so let's just + // not use it so we don't have to know about it... + return fmt.Sprintf("http://%s.%s.svc:%d", common.ServiceName(workspaceID), workspaceNamespace, port) +} + +func getPublicURLPrefixForEndpoint(workspaceID string, machineName string, endpoint dw.Endpoint) string { + endpointName := "" + if endpoint.Attributes.GetString(uniqueEndpointAttributeName, nil) == "true" { + endpointName = endpoint.Name + } + + return getPublicURLPrefix(workspaceID, machineName, int32(endpoint.TargetPort), endpointName) +} + +func getPublicURLPrefix(workspaceID string, machineName string, port int32, uniqueEndpointName string) string { + if uniqueEndpointName == "" { + return fmt.Sprintf(endpointURLPrefixPattern, workspaceID, machineName, port) + } + return fmt.Sprintf(uniqueEndpointURLPrefixPattern, workspaceID, machineName, uniqueEndpointName) +} + +func determineEndpointScheme(gatewayEnabled bool, e dw.Endpoint) string { + var scheme string + if e.Protocol == "" { + scheme = "http" + } else { + scheme = string(e.Protocol) + } + + upgradeToSecure := e.Secure + + // gateway is always on HTTPS, so if the endpoint is served through the gateway, we need to use the TLS'd variant. + if gatewayEnabled && e.Attributes.GetString(urlRewriteSupportedEndpointAttributeName, nil) == "true" { + upgradeToSecure = true + } + + if upgradeToSecure { + scheme = secureScheme(scheme) + } + + return scheme +} diff --git a/controllers/devworkspace/solver/che_routing_test.go b/controllers/devworkspace/solver/che_routing_test.go new file mode 100644 index 000000000..82486abef --- /dev/null +++ b/controllers/devworkspace/solver/che_routing_test.go @@ -0,0 +1,622 @@ +package solver + +import ( + "context" + "fmt" + "strings" + "testing" + + dw "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" + "github.com/devfile/api/v2/pkg/attributes" + dwo "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1" + "github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/solvers" + "github.com/devfile/devworkspace-operator/pkg/constants" + "github.com/devfile/devworkspace-operator/pkg/infrastructure" + org "github.com/eclipse-che/che-operator/api" + v1 "github.com/eclipse-che/che-operator/api/v1" + "github.com/eclipse-che/che-operator/api/v2alpha1" + controller "github.com/eclipse-che/che-operator/controllers/devworkspace" + "github.com/eclipse-che/che-operator/controllers/devworkspace/defaults" + routev1 "github.com/openshift/api/route/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" + rbac "k8s.io/api/rbac/v1" + apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/yaml" +) + +func createTestScheme() *runtime.Scheme { + scheme := runtime.NewScheme() + utilruntime.Must(extensions.AddToScheme(scheme)) + utilruntime.Must(corev1.AddToScheme(scheme)) + utilruntime.Must(appsv1.AddToScheme(scheme)) + utilruntime.Must(rbac.AddToScheme(scheme)) + utilruntime.Must(dw.AddToScheme(scheme)) + utilruntime.Must(dwo.AddToScheme(scheme)) + utilruntime.Must(routev1.AddToScheme(scheme)) + utilruntime.Must(v1.AddToScheme(scheme)) + + return scheme +} + +func getSpecObjectsForManager(t *testing.T, mgr *v2alpha1.CheCluster, routing *dwo.DevWorkspaceRouting, additionalInitialObjects ...runtime.Object) (client.Client, solvers.RoutingSolver, solvers.RoutingObjects) { + scheme := createTestScheme() + + allObjs := []runtime.Object{asV1(mgr)} + for i := range additionalInitialObjects { + allObjs = append(allObjs, additionalInitialObjects[i]) + } + cl := fake.NewFakeClientWithScheme(scheme, allObjs...) + + solver, err := Getter(scheme).GetSolver(cl, "che") + if err != nil { + t.Fatal(err) + } + + meta := solvers.DevWorkspaceMetadata{ + DevWorkspaceId: routing.Spec.DevWorkspaceId, + Namespace: routing.GetNamespace(), + PodSelector: routing.Spec.PodSelector, + } + + // we need to do 1 round of che manager reconciliation so that the solver gets initialized + cheRecon := controller.New(cl, scheme) + _, err = cheRecon.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: mgr.Name, Namespace: mgr.Namespace}}) + if err != nil { + t.Fatal(err) + } + + objs, err := solver.GetSpecObjects(routing, meta) + if err != nil { + t.Fatal(err) + } + + // now we need a second round of che manager reconciliation so that it proclaims the che gateway as established + cheRecon.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Name: "che", Namespace: "ns"}}) + + return cl, solver, objs +} + +func getSpecObjects(t *testing.T, routing *dwo.DevWorkspaceRouting) (client.Client, solvers.RoutingSolver, solvers.RoutingObjects) { + return getSpecObjectsForManager(t, &v2alpha1.CheCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "che", + Namespace: "ns", + Finalizers: []string{controller.FinalizerName}, + }, + Spec: v2alpha1.CheClusterSpec{ + Gateway: v2alpha1.CheGatewaySpec{ + Host: "over.the.rainbow", + }, + WorkspaceDomainEndpoints: v2alpha1.WorkspaceDomainEndpoints{ + BaseDomain: "down.on.earth", + }, + }, + }, routing) +} + +func subdomainDevWorkspaceRouting() *dwo.DevWorkspaceRouting { + return &dwo.DevWorkspaceRouting{ + ObjectMeta: metav1.ObjectMeta{ + Name: "routing", + Namespace: "ws", + }, + Spec: dwo.DevWorkspaceRoutingSpec{ + DevWorkspaceId: "wsid", + RoutingClass: "che", + Endpoints: map[string]dwo.EndpointList{ + "m1": { + { + Name: "e1", + TargetPort: 9999, + Exposure: dw.PublicEndpointExposure, + Protocol: "https", + Path: "/1/", + }, + { + Name: "e2", + TargetPort: 9999, + Exposure: dw.PublicEndpointExposure, + Protocol: "http", + Path: "/2.js", + Secure: true, + }, + { + Name: "e3", + TargetPort: 9999, + Exposure: dw.PublicEndpointExposure, + }, + }, + }, + }, + } +} + +func relocatableDevWorkspaceRouting() *dwo.DevWorkspaceRouting { + return &dwo.DevWorkspaceRouting{ + ObjectMeta: metav1.ObjectMeta{ + Name: "routing", + Namespace: "ws", + }, + Spec: dwo.DevWorkspaceRoutingSpec{ + DevWorkspaceId: "wsid", + RoutingClass: "che", + Endpoints: map[string]dwo.EndpointList{ + "m1": { + { + Name: "e1", + TargetPort: 9999, + Exposure: dw.PublicEndpointExposure, + Protocol: "https", + Path: "/1/", + Attributes: attributes.Attributes{ + urlRewriteSupportedEndpointAttributeName: apiext.JSON{Raw: []byte("\"true\"")}, + }, + }, + { + Name: "e2", + TargetPort: 9999, + Exposure: dw.PublicEndpointExposure, + Protocol: "http", + Path: "/2.js", + Secure: true, + Attributes: attributes.Attributes{ + urlRewriteSupportedEndpointAttributeName: apiext.JSON{Raw: []byte("\"true\"")}, + }, + }, + { + Name: "e3", + TargetPort: 9999, + Exposure: dw.PublicEndpointExposure, + Attributes: attributes.Attributes{ + urlRewriteSupportedEndpointAttributeName: apiext.JSON{Raw: []byte("\"true\"")}, + }, + }, + }, + }, + }, + } +} + +func TestCreateRelocatedObjects(t *testing.T) { + infrastructure.InitializeForTesting(infrastructure.Kubernetes) + cl, _, objs := getSpecObjects(t, relocatableDevWorkspaceRouting()) + + t.Run("noIngresses", func(t *testing.T) { + if len(objs.Ingresses) != 0 { + t.Error() + } + }) + + t.Run("noRoutes", func(t *testing.T) { + if len(objs.Routes) != 0 { + t.Error() + } + }) + + t.Run("noPodAdditions", func(t *testing.T) { + if objs.PodAdditions != nil { + t.Error() + } + }) + + for i := range objs.Services { + t.Run(fmt.Sprintf("service-%d", i), func(t *testing.T) { + svc := &objs.Services[i] + if svc.Annotations[defaults.ConfigAnnotationCheManagerName] != "che" { + t.Errorf("The name of the associated che manager should have been recorded in the service annotation") + } + + if svc.Annotations[defaults.ConfigAnnotationCheManagerNamespace] != "ns" { + t.Errorf("The namespace of the associated che manager should have been recorded in the service annotation") + } + + if svc.Labels[constants.DevWorkspaceIDLabel] != "wsid" { + t.Errorf("The workspace ID should be recorded in the service labels") + } + }) + } + + t.Run("traefikConfig", func(t *testing.T) { + cms := &corev1.ConfigMapList{} + cl.List(context.TODO(), cms) + + if len(cms.Items) != 1 { + t.Errorf("there should be 1 configmap created for the gateway config of the workspace but there were: %d", len(cms.Items)) + } + + var workspaceCfg *corev1.ConfigMap + + for _, cfg := range cms.Items { + if cfg.Name == "wsid" { + workspaceCfg = &cfg + } + } + + if workspaceCfg == nil { + t.Fatalf("traefik configuration for the workspace not found") + } + + traefikWorkspaceConfig := workspaceCfg.Data["wsid.yml"] + + if len(traefikWorkspaceConfig) == 0 { + t.Fatal("No traefik config file found in the workspace config configmap") + } + + workspaceConfig := traefikConfig{} + if err := yaml.Unmarshal([]byte(traefikWorkspaceConfig), &workspaceConfig); err != nil { + t.Fatal(err) + } + + if len(workspaceConfig.HTTP.Routers) != 1 { + t.Fatalf("Expected exactly one traefik router but got %d", len(workspaceConfig.HTTP.Routers)) + } + + if _, ok := workspaceConfig.HTTP.Routers["wsid-m1-9999"]; !ok { + t.Fatal("traefik config doesn't contain expected workspace configuration") + } + }) +} + +func TestCreateSubDomainObjects(t *testing.T) { + testCommon := func(infra infrastructure.Type) solvers.RoutingObjects { + infrastructure.InitializeForTesting(infra) + + cl, _, objs := getSpecObjects(t, subdomainDevWorkspaceRouting()) + + t.Run("noPodAdditions", func(t *testing.T) { + if objs.PodAdditions != nil { + t.Error() + } + }) + + for i := range objs.Services { + t.Run(fmt.Sprintf("service-%d", i), func(t *testing.T) { + svc := &objs.Services[i] + if svc.Annotations[defaults.ConfigAnnotationCheManagerName] != "che" { + t.Errorf("The name of the associated che manager should have been recorded in the service annotation") + } + + if svc.Annotations[defaults.ConfigAnnotationCheManagerNamespace] != "ns" { + t.Errorf("The namespace of the associated che manager should have been recorded in the service annotation") + } + + if svc.Labels[constants.DevWorkspaceIDLabel] != "wsid" { + t.Errorf("The workspace ID should be recorded in the service labels") + } + }) + } + + t.Run("noWorkspaceTraefikConfig", func(t *testing.T) { + cms := &corev1.ConfigMapList{} + cl.List(context.TODO(), cms) + + if len(cms.Items) != 0 { + t.Errorf("there should be 0 configmaps created but there were: %d", len(cms.Items)) + } + }) + + return objs + } + + t.Run("expectedIngresses", func(t *testing.T) { + objs := testCommon(infrastructure.Kubernetes) + if len(objs.Ingresses) != 1 { + t.Error() + } + if objs.Ingresses[0].Spec.Rules[0].Host != "wsid-1.down.on.earth" { + t.Error() + } + }) + + t.Run("expectedRoutes", func(t *testing.T) { + objs := testCommon(infrastructure.OpenShiftv4) + if len(objs.Routes) != 1 { + t.Error() + } + if objs.Routes[0].Spec.Host != "wsid-1.down.on.earth" { + t.Error() + } + }) +} + +func TestReportRelocatableExposedEndpoints(t *testing.T) { + infrastructure.InitializeForTesting(infrastructure.Kubernetes) + routing := relocatableDevWorkspaceRouting() + _, solver, objs := getSpecObjects(t, routing) + + exposed, ready, err := solver.GetExposedEndpoints(routing.Spec.Endpoints, objs) + if err != nil { + t.Fatal(err) + } + + if !ready { + t.Errorf("The exposed endpoints should have been ready.") + } + + if len(exposed) != 1 { + t.Errorf("There should have been 1 exposed endpoins but found %d", len(exposed)) + } + + m1, ok := exposed["m1"] + if !ok { + t.Errorf("The exposed endpoints should have been defined on the m1 component.") + } + + if len(m1) != 3 { + t.Fatalf("There should have been 3 endpoints for m1.") + } + + e1 := m1[0] + if e1.Name != "e1" { + t.Errorf("The first endpoint should have been e1 but is %s", e1.Name) + } + if e1.Url != "https://over.the.rainbow/wsid/m1/9999/1/" { + t.Errorf("The e1 endpoint should have the following URL: '%s' but has '%s'.", "https://over.the.rainbow/wsid/m1/9999/1/", e1.Url) + } + + e2 := m1[1] + if e2.Name != "e2" { + t.Errorf("The second endpoint should have been e2 but is %s", e1.Name) + } + if e2.Url != "https://over.the.rainbow/wsid/m1/9999/2.js" { + t.Errorf("The e2 endpoint should have the following URL: '%s' but has '%s'.", "https://over.the.rainbow/wsid/m1/9999/2.js", e2.Url) + } + + e3 := m1[2] + if e3.Name != "e3" { + t.Errorf("The third endpoint should have been e3 but is %s", e1.Name) + } + if e3.Url != "https://over.the.rainbow/wsid/m1/9999/" { + t.Errorf("The e3 endpoint should have the following URL: '%s' but has '%s'.", "https://over.the.rainbow/wsid/m1/9999/", e3.Url) + } +} + +func TestReportSubdomainExposedEndpoints(t *testing.T) { + infrastructure.InitializeForTesting(infrastructure.Kubernetes) + routing := subdomainDevWorkspaceRouting() + _, solver, objs := getSpecObjects(t, routing) + + exposed, ready, err := solver.GetExposedEndpoints(routing.Spec.Endpoints, objs) + if err != nil { + t.Fatal(err) + } + + if !ready { + t.Errorf("The exposed endpoints should have been ready.") + } + + if len(exposed) != 1 { + t.Errorf("There should have been 1 exposed endpoins but found %d", len(exposed)) + } + + m1, ok := exposed["m1"] + if !ok { + t.Errorf("The exposed endpoints should have been defined on the m1 component.") + } + + if len(m1) != 3 { + t.Fatalf("There should have been 3 endpoints for m1.") + } + + e1 := m1[0] + if e1.Name != "e1" { + t.Errorf("The first endpoint should have been e1 but is %s", e1.Name) + } + if e1.Url != "https://wsid-1.down.on.earth/1/" { + t.Errorf("The e1 endpoint should have the following URL: '%s' but has '%s'.", "https://wsid-1.down.on.earth/1/", e1.Url) + } + + e2 := m1[1] + if e2.Name != "e2" { + t.Errorf("The second endpoint should have been e2 but is %s", e1.Name) + } + if e2.Url != "https://wsid-1.down.on.earth/2.js" { + t.Errorf("The e2 endpoint should have the following URL: '%s' but has '%s'.", "https://wsid-1.down.on.earth/2.js", e2.Url) + } + + e3 := m1[2] + if e3.Name != "e3" { + t.Errorf("The third endpoint should have been e3 but is %s", e1.Name) + } + if e3.Url != "http://wsid-1.down.on.earth/" { + t.Errorf("The e3 endpoint should have the following URL: '%s' but has '%s'.", "https://wsid-1.down.on.earth/", e3.Url) + } +} + +func TestFinalize(t *testing.T) { + infrastructure.InitializeForTesting(infrastructure.Kubernetes) + routing := relocatableDevWorkspaceRouting() + cl, slv, _ := getSpecObjects(t, routing) + + // the create test checks that during the above call, the solver created the 2 traefik configmaps + // (1 for the main config and the second for the devworkspace) + + // now, let the solver finalize the routing + if err := slv.Finalize(routing); err != nil { + t.Fatal(err) + } + + cms := &corev1.ConfigMapList{} + cl.List(context.TODO(), cms) + + if len(cms.Items) != 0 { + t.Fatalf("There should be just 0 configmaps after routing finalization, but there were %d found", len(cms.Items)) + } +} + +func TestEndpointsAlwaysOnSecureProtocolsWhenExposedThroughGateway(t *testing.T) { + infrastructure.InitializeForTesting(infrastructure.Kubernetes) + routing := relocatableDevWorkspaceRouting() + _, slv, objs := getSpecObjects(t, routing) + + exposed, ready, err := slv.GetExposedEndpoints(routing.Spec.Endpoints, objs) + if err != nil { + t.Fatal(err) + } + + if !ready { + t.Errorf("The exposed endpoints should be considered ready.") + } + + for _, endpoints := range exposed { + for _, endpoint := range endpoints { + if !strings.HasPrefix(endpoint.Url, "https://") { + t.Errorf("The endpoint %s should be exposed on https.", endpoint.Url) + } + } + } +} + +func TestUsesIngressAnnotationsForWorkspaceEndpointIngresses(t *testing.T) { + infrastructure.InitializeForTesting(infrastructure.Kubernetes) + + mgr := &v2alpha1.CheCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "che", + Namespace: "ns", + Finalizers: []string{controller.FinalizerName}, + }, + Spec: v2alpha1.CheClusterSpec{ + Gateway: v2alpha1.CheGatewaySpec{ + Host: "over.the.rainbow", + }, + WorkspaceDomainEndpoints: v2alpha1.WorkspaceDomainEndpoints{ + BaseDomain: "down.on.earth", + }, + K8s: v2alpha1.CheClusterSpecK8s{ + IngressAnnotations: map[string]string{ + "a": "b", + }, + }, + }, + } + + _, _, objs := getSpecObjectsForManager(t, mgr, subdomainDevWorkspaceRouting()) + + if len(objs.Ingresses) != 1 { + t.Fatalf("Unexpected number of generated ingresses: %d", len(objs.Ingresses)) + } + + ingress := objs.Ingresses[0] + if len(ingress.Annotations) != 3 { + // 3 annotations - a => b, endpoint-name and component-name + t.Fatalf("Unexpected number of annotations on the generated ingress: %d", len(ingress.Annotations)) + } + + if ingress.Annotations["a"] != "b" { + t.Errorf("Unexpected value of the custom endpoint ingress annotation") + } +} + +func TestUsesCustomCertificateForWorkspaceEndpointIngresses(t *testing.T) { + infrastructure.InitializeForTesting(infrastructure.Kubernetes) + + mgr := &v2alpha1.CheCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "che", + Namespace: "ns", + Finalizers: []string{controller.FinalizerName}, + }, + Spec: v2alpha1.CheClusterSpec{ + Gateway: v2alpha1.CheGatewaySpec{ + Host: "beyond.comprehension", + }, + WorkspaceDomainEndpoints: v2alpha1.WorkspaceDomainEndpoints{ + BaseDomain: "almost.trivial", + TlsSecretName: "tlsSecret", + }, + }, + } + + _, _, objs := getSpecObjectsForManager(t, mgr, subdomainDevWorkspaceRouting(), &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "tlsSecret", + Namespace: "ns", + }, + Data: map[string][]byte{ + "tls.key": []byte("asdf"), + "tls.crt": []byte("qwer"), + }, + }) + + if len(objs.Ingresses) != 1 { + t.Fatalf("Unexpected number of generated ingresses: %d", len(objs.Ingresses)) + } + + ingress := objs.Ingresses[0] + + if len(ingress.Spec.TLS) != 1 { + t.Fatalf("Unexpected number of TLS records on the ingress: %d", len(ingress.Spec.TLS)) + } + + if ingress.Spec.TLS[0].SecretName != "wsid-endpoints" { + t.Errorf("Unexpected name of the TLS secret on the ingress: %s", ingress.Spec.TLS[0].SecretName) + } + + if len(ingress.Spec.TLS[0].Hosts) != 1 { + t.Fatalf("Unexpected number of host records on the TLS spec: %d", len(ingress.Spec.TLS[0].Hosts)) + } + + if ingress.Spec.TLS[0].Hosts[0] != "wsid-1.almost.trivial" { + t.Errorf("Unexpected host name of the TLS spec: %s", ingress.Spec.TLS[0].Hosts[0]) + } +} + +func TestUsesCustomCertificateForWorkspaceEndpointRoutes(t *testing.T) { + infrastructure.InitializeForTesting(infrastructure.OpenShiftv4) + + mgr := &v2alpha1.CheCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "che", + Namespace: "ns", + Finalizers: []string{controller.FinalizerName}, + }, + Spec: v2alpha1.CheClusterSpec{ + Gateway: v2alpha1.CheGatewaySpec{ + Host: "beyond.comprehension", + }, + WorkspaceDomainEndpoints: v2alpha1.WorkspaceDomainEndpoints{ + BaseDomain: "almost.trivial", + TlsSecretName: "tlsSecret", + }, + }, + } + + _, _, objs := getSpecObjectsForManager(t, mgr, subdomainDevWorkspaceRouting(), &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "tlsSecret", + Namespace: "ns", + }, + Data: map[string][]byte{ + "tls.key": []byte("asdf"), + "tls.crt": []byte("qwer"), + }, + }) + + if len(objs.Routes) != 1 { + t.Fatalf("Unexpected number of generated routes: %d", len(objs.Routes)) + } + + route := objs.Routes[0] + + if route.Spec.TLS.Certificate != "qwer" { + t.Errorf("Unexpected name of the TLS certificate on the route: %s", route.Spec.TLS.Certificate) + } + + if route.Spec.TLS.Key != "asdf" { + t.Errorf("Unexpected key of TLS spec: %s", route.Spec.TLS.Key) + } +} + +func asV1(v2Obj *v2alpha1.CheCluster) *v1.CheCluster { + return org.AsV1(v2Obj) +} diff --git a/controllers/devworkspace/solver/doc.go b/controllers/devworkspace/solver/doc.go new file mode 100644 index 000000000..48489b95a --- /dev/null +++ b/controllers/devworkspace/solver/doc.go @@ -0,0 +1,5 @@ +// Package solver contains the implementation of the "devworkspace routing solver" which provides che-specific +// logic to the otherwise generic dev workspace routing controller. +// The devworkspace routing controller needs to be provided with a "solver getter" in its configuration prior +// to starting the reconciliation loop. See `CheRouterGetter`. +package solver diff --git a/controllers/devworkspace/solver/endpoint_exposer.go b/controllers/devworkspace/solver/endpoint_exposer.go new file mode 100644 index 000000000..e15e5b416 --- /dev/null +++ b/controllers/devworkspace/solver/endpoint_exposer.go @@ -0,0 +1,239 @@ +// +// Copyright (c) 2019-2021 Red Hat, Inc. +// This program and the accompanying materials are made +// available under the terms of the Eclipse Public License 2.0 +// which is available at https://www.eclipse.org/legal/epl-2.0/ +// +// SPDX-License-Identifier: EPL-2.0 +// +// Contributors: +// Red Hat, Inc. - initial API and implementation +// +package solver + +import ( + "context" + "fmt" + + dwo "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1" + "github.com/devfile/devworkspace-operator/pkg/constants" + "github.com/eclipse-che/che-operator/api/v2alpha1" + "github.com/eclipse-che/che-operator/controllers/devworkspace/defaults" + routev1 "github.com/openshift/api/route/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type IngressExposer struct { + devWorkspaceID string + baseDomain string + ingressAnnotations map[string]string + tlsSecretName string +} + +type RouteExposer struct { + devWorkspaceID string + baseDomain string + tlsSecretKey string + tlsSecretCertificate string +} + +type EndpointInfo struct { + order int + componentName string + endpointName string + port int32 + scheme string + service *corev1.Service +} + +// This method is used compose the object names (both Kubernetes objects and "objects" within Traefik configuration) +// representing object endpoints. +func getEndpointExposingObjectName(componentName string, workspaceID string, port int32, endpointName string) string { + if endpointName == "" { + return fmt.Sprintf("%s-%s-%d", workspaceID, componentName, port) + } + return fmt.Sprintf("%s-%s-%d-%s", workspaceID, componentName, port, endpointName) +} + +func (e *RouteExposer) initFrom(ctx context.Context, cl client.Client, cluster *v2alpha1.CheCluster, routing *dwo.DevWorkspaceRouting) error { + e.baseDomain = cluster.Status.WorkspaceBaseDomain + e.devWorkspaceID = routing.Spec.DevWorkspaceId + + if cluster.Spec.WorkspaceDomainEndpoints.TlsSecretName != "" { + secret := &corev1.Secret{} + err := cl.Get(ctx, client.ObjectKey{Name: cluster.Spec.TlsSecretName, Namespace: cluster.Namespace}, secret) + if err != nil { + return err + } + + e.tlsSecretKey = string(secret.Data["tls.key"]) + e.tlsSecretCertificate = string(secret.Data["tls.crt"]) + } + + return nil +} + +func (e *IngressExposer) initFrom(ctx context.Context, cl client.Client, cluster *v2alpha1.CheCluster, routing *dwo.DevWorkspaceRouting, ingressAnnotations map[string]string) error { + e.baseDomain = cluster.Status.WorkspaceBaseDomain + e.devWorkspaceID = routing.Spec.DevWorkspaceId + e.ingressAnnotations = ingressAnnotations + + if cluster.Spec.WorkspaceDomainEndpoints.TlsSecretName != "" { + tlsSecretName := routing.Spec.DevWorkspaceId + "-endpoints" + e.tlsSecretName = tlsSecretName + + secret := &corev1.Secret{} + + // check that there is no secret with the anticipated name yet + err := cl.Get(ctx, client.ObjectKey{Name: tlsSecretName, Namespace: routing.Namespace}, secret) + if errors.IsNotFound(err) { + secret = &corev1.Secret{} + err = cl.Get(ctx, client.ObjectKey{Name: cluster.Spec.TlsSecretName, Namespace: cluster.Namespace}, secret) + if err != nil { + return err + } + + yes := true + + newSecret := &corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Name: tlsSecretName, + Namespace: routing.Namespace, + OwnerReferences: []v1.OwnerReference{ + { + Name: routing.Name, + Kind: routing.Kind, + APIVersion: routing.APIVersion, + UID: routing.UID, + Controller: &yes, + BlockOwnerDeletion: &yes, + }, + }, + }, + Type: secret.Type, + Data: secret.Data, + } + + return cl.Create(ctx, newSecret) + } + } + + return nil +} + +func (e *RouteExposer) getRouteForService(endpoint *EndpointInfo) routev1.Route { + targetEndpoint := intstr.FromInt(int(endpoint.port)) + route := routev1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: getEndpointExposingObjectName(endpoint.componentName, e.devWorkspaceID, endpoint.port, endpoint.endpointName), + Namespace: endpoint.service.Namespace, + Labels: map[string]string{ + constants.DevWorkspaceIDLabel: e.devWorkspaceID, + }, + Annotations: routeAnnotations(endpoint.componentName, endpoint.endpointName), + OwnerReferences: endpoint.service.OwnerReferences, + }, + Spec: routev1.RouteSpec{ + Host: hostName(endpoint.order, e.devWorkspaceID, e.baseDomain), + To: routev1.RouteTargetReference{ + Kind: "Service", + Name: endpoint.service.Name, + }, + Port: &routev1.RoutePort{ + TargetPort: targetEndpoint, + }, + }, + } + + if isSecureScheme(endpoint.scheme) { + route.Spec.TLS = &routev1.TLSConfig{ + InsecureEdgeTerminationPolicy: routev1.InsecureEdgeTerminationPolicyRedirect, + Termination: routev1.TLSTerminationEdge, + } + + if e.tlsSecretKey != "" { + route.Spec.TLS.Key = e.tlsSecretKey + route.Spec.TLS.Certificate = e.tlsSecretCertificate + } + } + + return route +} + +func (e *IngressExposer) getIngressForService(endpoint *EndpointInfo) v1beta1.Ingress { + targetEndpoint := intstr.FromInt(int(endpoint.port)) + hostname := hostName(endpoint.order, e.devWorkspaceID, e.baseDomain) + ingressPathType := v1beta1.PathTypeImplementationSpecific + + ingress := v1beta1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: getEndpointExposingObjectName(endpoint.componentName, e.devWorkspaceID, endpoint.port, endpoint.endpointName), + Namespace: endpoint.service.Namespace, + Labels: map[string]string{ + constants.DevWorkspaceIDLabel: e.devWorkspaceID, + }, + Annotations: finalizeIngressAnnotations(e.ingressAnnotations, endpoint.componentName, endpoint.endpointName), + OwnerReferences: endpoint.service.OwnerReferences, + }, + Spec: v1beta1.IngressSpec{ + Rules: []v1beta1.IngressRule{ + { + Host: hostname, + IngressRuleValue: v1beta1.IngressRuleValue{ + HTTP: &v1beta1.HTTPIngressRuleValue{ + Paths: []v1beta1.HTTPIngressPath{ + { + Backend: v1beta1.IngressBackend{ + ServiceName: endpoint.service.Name, + ServicePort: targetEndpoint, + }, + PathType: &ingressPathType, + Path: "/", + }, + }, + }, + }, + }, + }, + }, + } + + if isSecureScheme(endpoint.scheme) && e.tlsSecretName != "" { + ingress.Spec.TLS = []v1beta1.IngressTLS{ + { + Hosts: []string{hostname}, + SecretName: e.tlsSecretName, + }, + } + } + + return ingress +} + +func hostName(order int, workspaceID string, baseDomain string) string { + return fmt.Sprintf("%s-%d.%s", workspaceID, order+1, baseDomain) +} + +func routeAnnotations(machineName string, endpointName string) map[string]string { + return map[string]string{ + defaults.ConfigAnnotationEndpointName: endpointName, + defaults.ConfigAnnotationComponentName: machineName, + } +} + +func finalizeIngressAnnotations(ingressAnnotations map[string]string, machineName string, endpointName string) map[string]string { + annos := map[string]string{} + for k, v := range ingressAnnotations { + annos[k] = v + } + annos[defaults.ConfigAnnotationEndpointName] = endpointName + annos[defaults.ConfigAnnotationComponentName] = machineName + + return annos +} diff --git a/controllers/devworkspace/solver/solver.go b/controllers/devworkspace/solver/solver.go new file mode 100644 index 000000000..8dcbca9c6 --- /dev/null +++ b/controllers/devworkspace/solver/solver.go @@ -0,0 +1,199 @@ +// +// Copyright (c) 2019-2020 Red Hat, Inc. +// This program and the accompanying materials are made +// available under the terms of the Eclipse Public License 2.0 +// which is available at https://www.eclipse.org/legal/epl-2.0/ +// +// SPDX-License-Identifier: EPL-2.0 +// +// Contributors: +// Red Hat, Inc. - initial API and implementation +// + +package solver + +import ( + "fmt" + "time" + + "github.com/devfile/devworkspace-operator/pkg/constants" + + controllerv1alpha1 "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1" + "github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/solvers" + "github.com/eclipse-che/che-operator/api/v2alpha1" + controller "github.com/eclipse-che/che-operator/controllers/devworkspace" + "github.com/eclipse-che/che-operator/controllers/devworkspace/defaults" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +var ( + logger = ctrl.Log.WithName("solver") +) + +// CheRoutingSolver is a struct representing the routing solver for Che specific routing of devworkspaces +type CheRoutingSolver struct { + client client.Client + scheme *runtime.Scheme +} + +// Magic to ensure we get compile time error right here if our struct doesn't support the interface. +var _ solvers.RoutingSolverGetter = (*CheRouterGetter)(nil) +var _ solvers.RoutingSolver = (*CheRoutingSolver)(nil) + +// CheRouterGetter negotiates the solver with the calling code +type CheRouterGetter struct { + scheme *runtime.Scheme +} + +// Getter creates a new CheRouterGetter +func Getter(scheme *runtime.Scheme) *CheRouterGetter { + return &CheRouterGetter{ + scheme: scheme, + } +} + +func (g *CheRouterGetter) HasSolver(routingClass controllerv1alpha1.DevWorkspaceRoutingClass) bool { + return isSupported(routingClass) +} + +func (g *CheRouterGetter) GetSolver(client client.Client, routingClass controllerv1alpha1.DevWorkspaceRoutingClass) (solver solvers.RoutingSolver, err error) { + if !isSupported(routingClass) { + return nil, solvers.RoutingNotSupported + } + return &CheRoutingSolver{client: client, scheme: g.scheme}, nil +} + +func (g *CheRouterGetter) SetupControllerManager(mgr *builder.Builder) error { + + // We want to watch configmaps and re-map the reconcile on the devworkspace routing, if possible + // This way we can react on changes of the gateway configmap changes by re-reconciling the corresponding + // devworkspace routing and thus keeping the devworkspace routing in a functional state + // TODO is this going to be performant enough in a big cluster with very many configmaps? + mgr.Watches(&source.Kind{Type: &corev1.ConfigMap{}}, &handler.EnqueueRequestsFromMapFunc{ToRequests: handler.ToRequestsFunc(func(mo handler.MapObject) []reconcile.Request { + applicable, key := isGatewayWorkspaceConfig(mo.Meta) + + if applicable { + // cool, we can trigger the reconcile of the routing so that we can update the configmap that has just changed under our hands + return []reconcile.Request{ + { + NamespacedName: key, + }, + } + } else { + return []reconcile.Request{} + } + })}) + + return nil +} + +func isGatewayWorkspaceConfig(obj metav1.Object) (bool, types.NamespacedName) { + workspaceID := obj.GetLabels()[constants.DevWorkspaceIDLabel] + objectName := obj.GetName() + + // bail out quickly if we're not dealing with a configmap with an expected name + if objectName != defaults.GetGatewayWorkpaceConfigMapName(workspaceID) { + return false, types.NamespacedName{} + } + + routingName := obj.GetAnnotations()[defaults.ConfigAnnotationDevWorkspaceRoutingName] + routingNamespace := obj.GetAnnotations()[defaults.ConfigAnnotationDevWorkspaceRoutingNamespace] + + // if there is no annotation for the routing, we're out of luck.. this should not happen though + if routingName == "" { + return false, types.NamespacedName{} + } + + // cool, we found a configmap belonging to a concrete devworkspace routing + return true, types.NamespacedName{Name: routingName, Namespace: routingNamespace} +} + +func (c *CheRoutingSolver) FinalizerRequired(routing *controllerv1alpha1.DevWorkspaceRouting) bool { + return true +} + +func (c *CheRoutingSolver) Finalize(routing *controllerv1alpha1.DevWorkspaceRouting) error { + cheManager, err := cheManagerOfRouting(routing) + if err != nil { + return err + } + + return c.cheRoutingFinalize(cheManager, routing) +} + +// GetSpecObjects constructs cluster routing objects which should be applied on the cluster +func (c *CheRoutingSolver) GetSpecObjects(routing *controllerv1alpha1.DevWorkspaceRouting, workspaceMeta solvers.DevWorkspaceMetadata) (solvers.RoutingObjects, error) { + cheManager, err := cheManagerOfRouting(routing) + if err != nil { + return solvers.RoutingObjects{}, err + } + + return c.cheSpecObjects(cheManager, routing, workspaceMeta) +} + +// GetExposedEndpoints retreives the URL for each endpoint in a devfile spec from a set of RoutingObjects. +// Returns is a map from component ids (as defined in the devfile) to the list of endpoints for that component +// Return value "ready" specifies if all endpoints are resolved on the cluster; if false it is necessary to retry, as +// URLs will be undefined. +func (c *CheRoutingSolver) GetExposedEndpoints(endpoints map[string]controllerv1alpha1.EndpointList, routingObj solvers.RoutingObjects) (exposedEndpoints map[string]controllerv1alpha1.ExposedEndpointList, ready bool, err error) { + if len(routingObj.Services) == 0 { + return map[string]controllerv1alpha1.ExposedEndpointList{}, true, nil + } + + managerName := routingObj.Services[0].Annotations[defaults.ConfigAnnotationCheManagerName] + managerNamespace := routingObj.Services[0].Annotations[defaults.ConfigAnnotationCheManagerNamespace] + workspaceID := routingObj.Services[0].Labels[constants.DevWorkspaceIDLabel] + + manager, err := findCheManager(client.ObjectKey{Name: managerName, Namespace: managerNamespace}) + if err != nil { + return nil, false, err + } + + return c.cheExposedEndpoints(manager, workspaceID, endpoints, routingObj) +} + +func isSupported(routingClass controllerv1alpha1.DevWorkspaceRoutingClass) bool { + return routingClass == "che" +} + +func cheManagerOfRouting(routing *controllerv1alpha1.DevWorkspaceRouting) (*v2alpha1.CheCluster, error) { + cheName := routing.Annotations[defaults.ConfigAnnotationCheManagerName] + cheNamespace := routing.Annotations[defaults.ConfigAnnotationCheManagerNamespace] + + return findCheManager(client.ObjectKey{Name: cheName, Namespace: cheNamespace}) +} + +func findCheManager(cheManagerKey client.ObjectKey) (*v2alpha1.CheCluster, error) { + managers := controller.GetCurrentCheClusterInstances() + if len(managers) == 0 { + // the CheManager has not been reconciled yet, so let's wait a bit + return &v2alpha1.CheCluster{}, &solvers.RoutingNotReady{Retry: 1 * time.Second} + } + + if len(cheManagerKey.Name) == 0 { + if len(managers) > 1 { + return &v2alpha1.CheCluster{}, &solvers.RoutingInvalid{Reason: fmt.Sprintf("the routing does not specify any Che manager in its configuration but there are %d Che managers in the cluster", len(managers))} + } + for _, m := range managers { + return &m, nil + } + + } + + if m, ok := managers[cheManagerKey]; ok { + return &m, nil + } + + logger.Info("Routing requires a non-existing che manager. Retrying in 10 seconds.", "key", cheManagerKey) + + return &v2alpha1.CheCluster{}, &solvers.RoutingNotReady{Retry: 10 * time.Second} +} diff --git a/controllers/devworkspace/solver/traefik_config.go b/controllers/devworkspace/solver/traefik_config.go new file mode 100644 index 000000000..3e0394888 --- /dev/null +++ b/controllers/devworkspace/solver/traefik_config.go @@ -0,0 +1,39 @@ +package solver + +// A representation of the Traefik config as we need it. This is in no way complete but can be used for the purposes we need it for. +type traefikConfig struct { + HTTP traefikConfigHTTP `json:"http"` +} + +type traefikConfigHTTP struct { + Routers map[string]traefikConfigRouter `json:"routers"` + Services map[string]traefikConfigService `json:"services"` + Middlewares map[string]traefikConfigMiddleware `json:"middlewares"` +} + +type traefikConfigRouter struct { + Rule string `json:"rule"` + Service string `json:"service"` + Middlewares []string `json:"middlewares"` + Priority int `json:"priority"` +} + +type traefikConfigService struct { + LoadBalancer traefikConfigLoadbalancer `json:"loadBalancer"` +} + +type traefikConfigMiddleware struct { + StripPrefix traefikConfigStripPrefix `json:"stripPrefix"` +} + +type traefikConfigLoadbalancer struct { + Servers []traefikConfigLoadbalancerServer `json:"servers"` +} + +type traefikConfigLoadbalancerServer struct { + URL string `json:"url"` +} + +type traefikConfigStripPrefix struct { + Prefixes []string `json:"prefixes"` +} diff --git a/controllers/devworkspace/sync/sync.go b/controllers/devworkspace/sync/sync.go new file mode 100644 index 000000000..551551541 --- /dev/null +++ b/controllers/devworkspace/sync/sync.go @@ -0,0 +1,208 @@ +// +// Copyright (c) 2019-2020 Red Hat, Inc. +// This program and the accompanying materials are made +// available under the terms of the Eclipse Public License 2.0 +// which is available at https://www.eclipse.org/legal/epl-2.0/ +// +// SPDX-License-Identifier: EPL-2.0 +// +// Contributors: +// Red Hat, Inc. - initial API and implementation +// + +package sync + +import ( + "context" + "fmt" + + "github.com/google/go-cmp/cmp" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +var ( + log = ctrl.Log.WithName("sync") +) + +// Syncer synchronized K8s objects with the cluster +type Syncer struct { + client client.Client + scheme *runtime.Scheme +} + +func New(client client.Client, scheme *runtime.Scheme) Syncer { + return Syncer{client: client, scheme: scheme} +} + +// Sync syncs the blueprint to the cluster in a generic (as much as Go allows) manner. +// Returns true if the object was created or updated, false if there was no change detected. +func (s *Syncer) Sync(ctx context.Context, owner metav1.Object, blueprint metav1.Object, diffOpts cmp.Option) (bool, runtime.Object, error) { + blueprintObject, ok := blueprint.(runtime.Object) + if !ok { + return false, nil, fmt.Errorf("object %T is not a runtime.Object. Cannot sync it", blueprint) + } + + key := client.ObjectKey{Name: blueprint.GetName(), Namespace: blueprint.GetNamespace()} + + actual := blueprintObject.DeepCopyObject() + + if getErr := s.client.Get(context.TODO(), key, actual); getErr != nil { + if statusErr, ok := getErr.(*errors.StatusError); !ok || statusErr.Status().Reason != metav1.StatusReasonNotFound { + return false, nil, getErr + } + actual = nil + } + + if actual == nil { + actual, err := s.create(ctx, owner, key, blueprint) + if err != nil { + return false, actual, err + } + + return true, actual, nil + } + + return s.update(ctx, owner, actual, blueprint, diffOpts) +} + +// Delete deletes the supplied object from the cluster. +func (s *Syncer) Delete(ctx context.Context, object metav1.Object) error { + key := client.ObjectKey{Name: object.GetName(), Namespace: object.GetNamespace()} + + var err error + ro, ok := object.(runtime.Object) + if !ok { + return fmt.Errorf("Could not use the supplied object as kubernetes runtime object. That's unexpected: %s", object) + } + + if err = s.client.Get(ctx, key, ro); err == nil { + err = s.client.Delete(ctx, ro) + } + + if err != nil && !errors.IsNotFound(err) { + return err + } + + return nil +} + +func (s *Syncer) create(ctx context.Context, owner metav1.Object, key client.ObjectKey, blueprint metav1.Object) (runtime.Object, error) { + blueprintObject, ok := blueprint.(runtime.Object) + kind := blueprintObject.GetObjectKind().GroupVersionKind().Kind + if !ok { + return nil, fmt.Errorf("object %T is not a runtime.Object. Cannot sync it", blueprint) + } + + actual := blueprintObject.DeepCopyObject() + + log.Info("Creating a new object", "kind", kind, "name", blueprint.GetName(), "namespace", blueprint.GetNamespace()) + obj, err := s.setOwnerReferenceAndConvertToRuntime(owner, blueprint) + if err != nil { + return nil, err + } + + err = s.client.Create(ctx, obj) + if err != nil { + if !errors.IsAlreadyExists(err) { + return nil, err + } + + // ok, we got an already-exists error. So let's try to load the object into "actual". + // if we fail this retry for whatever reason, just give up rather than retrying this in a loop... + // the reconciliation loop will lead us here again in the next round. + if err = s.client.Get(ctx, key, actual); err != nil { + return nil, err + } + } + + return actual, nil +} + +func (s *Syncer) update(ctx context.Context, owner metav1.Object, actual runtime.Object, blueprint metav1.Object, diffOpts cmp.Option) (bool, runtime.Object, error) { + actualMeta := actual.(metav1.Object) + + diff := cmp.Diff(actual, blueprint, diffOpts) + if len(diff) > 0 { + kind := actual.GetObjectKind().GroupVersionKind().Kind + log.Info("Updating existing object", "kind", kind, "name", actualMeta.GetName(), "namespace", actualMeta.GetNamespace()) + + // we need to handle labels and annotations specially in case the cluster admin has modified them. + // if the current object in the cluster has the same annos/labels, they get overwritten with what's + // in the blueprint. Any additional labels/annos on the object are kept though. + targetLabels := map[string]string{} + targetAnnos := map[string]string{} + + for k, v := range actualMeta.GetAnnotations() { + targetAnnos[k] = v + } + for k, v := range actualMeta.GetLabels() { + targetLabels[k] = v + } + + for k, v := range blueprint.GetAnnotations() { + targetAnnos[k] = v + } + for k, v := range blueprint.GetLabels() { + targetLabels[k] = v + } + + blueprint.SetAnnotations(targetAnnos) + blueprint.SetLabels(targetLabels) + + if isUpdateUsingDeleteCreate(actual.GetObjectKind().GroupVersionKind().Kind) { + err := s.client.Delete(ctx, actual) + if err != nil { + return false, actual, err + } + + key := client.ObjectKey{Name: actualMeta.GetName(), Namespace: actualMeta.GetNamespace()} + obj, err := s.create(ctx, owner, key, blueprint) + return false, obj, err + } else { + obj, err := s.setOwnerReferenceAndConvertToRuntime(owner, blueprint) + if err != nil { + return false, actual, err + } + + // to be able to update, we need to set the resource version of the object that we know of + obj.(metav1.Object).SetResourceVersion(actualMeta.GetResourceVersion()) + + err = s.client.Update(ctx, obj) + if err != nil { + return false, obj, err + } + + return true, obj, nil + } + } + return false, actual, nil +} + +func isUpdateUsingDeleteCreate(kind string) bool { + // Routes are not able to update the host, so we just need to re-create them... + // ingresses and services have been identified to needs this, too, for reasons that I don't know.. + return "Service" == kind || "Ingress" == kind || "Route" == kind +} + +func (s *Syncer) setOwnerReferenceAndConvertToRuntime(owner metav1.Object, obj metav1.Object) (runtime.Object, error) { + robj, ok := obj.(runtime.Object) + if !ok { + return nil, fmt.Errorf("object %T is not a runtime.Object. Cannot sync it", obj) + } + + if owner == nil { + return robj, nil + } + + err := controllerutil.SetControllerReference(owner, obj, s.scheme) + if err != nil { + return nil, err + } + + return robj, nil +} diff --git a/controllers/devworkspace/sync/sync_test.go b/controllers/devworkspace/sync/sync_test.go new file mode 100644 index 000000000..5e255b63c --- /dev/null +++ b/controllers/devworkspace/sync/sync_test.go @@ -0,0 +1,195 @@ +package sync + +import ( + "context" + "reflect" + "testing" + + "github.com/devfile/devworkspace-operator/pkg/infrastructure" + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +var ( + scheme = runtime.NewScheme() +) + +func init() { + infrastructure.InitializeForTesting(infrastructure.Kubernetes) + corev1.AddToScheme(scheme) +} + +func TestSyncCreates(t *testing.T) { + + preexisting := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "preexisting", + Namespace: "default", + }, + } + + new := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "new", + Namespace: "default", + }, + } + + cl := fake.NewFakeClientWithScheme(scheme, preexisting) + + syncer := Syncer{client: cl, scheme: scheme} + + syncer.Sync(context.TODO(), preexisting, new, cmp.Options{}) + + synced := &corev1.Pod{} + key := client.ObjectKey{Name: "new", Namespace: "default"} + + cl.Get(context.TODO(), key, synced) + + if synced.Name != "new" { + t.Error("The synced object should have the expected name") + } + + if len(synced.OwnerReferences) == 0 { + t.Fatal("There should have been an owner reference set") + } + + if synced.OwnerReferences[0].Name != "preexisting" { + t.Error("Unexpected owner reference") + } +} + +func TestSyncUpdates(t *testing.T) { + preexisting := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "preexisting", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + Name: "preexisting", + Kind: "Pod", + }, + }, + }, + } + + newOwner := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "newOwner", + Namespace: "default", + }, + } + + update := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "preexisting", + Namespace: "default", + Labels: map[string]string{ + "a": "b", + }, + }, + } + + cl := fake.NewFakeClientWithScheme(scheme, preexisting) + + syncer := Syncer{client: cl, scheme: scheme} + + syncer.Sync(context.TODO(), newOwner, update, cmp.Options{}) + + synced := &corev1.Pod{} + key := client.ObjectKey{Name: "preexisting", Namespace: "default"} + + cl.Get(context.TODO(), key, synced) + + if synced.Name != "preexisting" { + t.Error("The synced object should have the expected name") + } + + if len(synced.OwnerReferences) == 0 { + t.Fatal("There should have been an owner reference set") + } + + if synced.OwnerReferences[0].Name != "newOwner" { + t.Error("Unexpected owner reference") + } + + if len(synced.GetLabels()) == 0 { + t.Fatal("There should have been labels on the synced object") + } + + if synced.GetLabels()["a"] != "b" { + t.Error("Unexpected label") + } +} + +func TestSyncKeepsAdditionalAnnosAndLabels(t *testing.T) { + preexisting := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "preexisting", + Namespace: "default", + Labels: map[string]string{ + "a": "x", + "k": "v", + }, + Annotations: map[string]string{ + "a": "x", + "k": "v", + }, + }, + } + + owner := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "owner", + Namespace: "default", + }, + } + + update := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "preexisting", + Namespace: "default", + Labels: map[string]string{ + "a": "b", + "c": "d", + }, + Annotations: map[string]string{ + "a": "b", + "c": "d", + }, + }, + } + + cl := fake.NewFakeClientWithScheme(scheme, preexisting) + + syncer := Syncer{client: cl, scheme: scheme} + + syncer.Sync(context.TODO(), owner, update, cmp.Options{}) + + synced := &corev1.Pod{} + key := client.ObjectKey{Name: "preexisting", Namespace: "default"} + + cl.Get(context.TODO(), key, synced) + + if synced.Name != "preexisting" { + t.Error("The synced object should have the expected name") + } + + expectedValues := map[string]string{ + "a": "b", + "k": "v", + "c": "d", + } + + if !reflect.DeepEqual(expectedValues, synced.Labels) { + t.Fatal("Unexpected labels on the synced object") + } + + if !reflect.DeepEqual(expectedValues, synced.Annotations) { + t.Fatal("Unexpected annotations on the synced object") + } +} diff --git a/go.mod b/go.mod index 58ed19888..63b49e9e7 100644 --- a/go.mod +++ b/go.mod @@ -6,6 +6,8 @@ require ( github.com/Shopify/logrus-bugsnag v0.0.0-00010101000000-000000000000 // indirect github.com/bitly/go-simplejson v0.0.0-00010101000000-000000000000 // indirect github.com/che-incubator/kubernetes-image-puller-operator v0.0.0-20210428110012-14ef54b7dbf4 + github.com/devfile/api/v2 v2.0.0-20210713124824-03e023e7078b + github.com/devfile/devworkspace-operator v0.2.1-0.20210805190010-9c55f69c461d github.com/go-logr/logr v0.4.0 github.com/golang/mock v1.5.0 github.com/google/go-cmp v0.5.2 @@ -16,7 +18,7 @@ require ( github.com/operator-framework/operator-lifecycle-manager v0.18.1 github.com/prometheus/client_golang v1.11.0 // indirect github.com/sirupsen/logrus v1.7.0 - go.uber.org/zap v1.13.0 + go.uber.org/zap v1.16.0 golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 // indirect k8s.io/api v0.21.1 diff --git a/go.sum b/go.sum index 0ea90446d..69b50ab23 100644 --- a/go.sum +++ b/go.sum @@ -51,14 +51,17 @@ github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdko github.com/Rican7/retry v0.1.0/go.mod h1:FgOROf8P5bebcC1DS0PdOQiqGUridaZvikzUmkFW6gg= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alessio/shellescape v1.2.2/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/ant31/crd-validation v0.0.0-20180702145049-30f8a35d0ac2/go.mod h1:X0noFIik9YqfhGYBLEHg8LJKEwy7QIitLQuFMpKLcPk= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/auth0/go-jwt-middleware v0.0.0-20170425171159-5493cabe49f7/go.mod h1:LWMyo4iOLWXHGdBki7NIht1kHru/0wM179h+d3g8ATM= github.com/aws/aws-sdk-go v0.0.0-20210122191723-2c7b39c8f2e2/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= @@ -143,6 +146,10 @@ github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892/go.mod h1:CTDl0pzVz github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= github.com/deislabs/oras v0.8.1/go.mod h1:Mx0rMSbBNaNfY9hjpccEnxkOqJL6KGjtxNHPLC4G4As= github.com/denisenkom/go-mssqldb v0.0.0-20190204142019-df6d76eb9289/go.mod h1:xN/JuLBIz4bjkxNmByTiV1IbhfnYb6oo99phBn4Eqhc= +github.com/devfile/api/v2 v2.0.0-20210713124824-03e023e7078b h1:N00ORHA5iamvPKpDFfSAkAczAaCBvK8l0EzAphsgFSI= +github.com/devfile/api/v2 v2.0.0-20210713124824-03e023e7078b/go.mod h1:QNzaIVQnCsYfXed+QZOn1uvEQFzyhvpi/uc3g/b2ws0= +github.com/devfile/devworkspace-operator v0.2.1-0.20210805190010-9c55f69c461d h1:m0AhacO7IrwysBlLWDunITEkxITciGaO5e6uMN0t1XQ= +github.com/devfile/devworkspace-operator v0.2.1-0.20210805190010-9c55f69c461d/go.mod h1:Rfz7VVnXRpM4dT7UgMwV8zp6qHCggi39mBrN+i69pRo= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= @@ -173,6 +180,7 @@ github.com/edsrzf/mmap-go v0.0.0-20181215214921-188cc3b666ba/go.mod h1:YO35OhQPt github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20200129102538-a2fa14558f9a/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= github.com/envoyproxy/go-control-plane v0.0.0-20200213201256-ba8e577f987f/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= @@ -196,10 +204,15 @@ github.com/garyburd/redigo v1.6.0 h1:0VruCpn7yAIIu7pWVClQC8wxCJEcG3nyzpMSHKi1PQc github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680 h1:ZktWZesgun21uEDrwW7iEV1zPCGQldM2atlJZ3TdvVM= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/globalsign/mgo v0.0.0-20160323214708-72aab81a5dec/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/go-acme/lego v2.5.0+incompatible/go.mod h1:yzMNe9CasVUhkquNvti5nAtPmG94USbYxYrZfTkIn0M= github.com/go-bindata/go-bindata v3.1.1+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= github.com/go-bindata/go-bindata/v3 v3.1.3/go.mod h1:1/zrpXsLD8YDIbhZRqXzm1Ghc7NhEvIN9+Z6R5/xH4I= +github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= +github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= +github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= +github.com/go-git/go-git/v5 v5.2.0/go.mod h1:kh02eMX+wdqqxgNMEyq8YgwlIOsDOa9homkUq1PoTMs= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.3.0 h1:q4c+kbcR0d5rSurhBR8dIgieOaYpXtsdTYfx22Cu6rs= @@ -289,11 +302,14 @@ github.com/grpc-ecosystem/grpc-health-probe v0.3.2/go.mod h1:izVOQ4RWbjUR6lm4nn+ github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= @@ -335,6 +351,7 @@ github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwX github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jehiah/go-strftime v0.0.0-20171201141054-1d33003b3869/go.mod h1:cJ6Cj7dQo+O6GJNiMx+Pa94qKj+TG8ONdKHgMNIyyag= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8= @@ -352,6 +369,7 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= github.com/karrick/godirwalk v1.10.12/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= @@ -374,6 +392,7 @@ github.com/lucas-clemente/aes12 v0.0.0-20171027163421-cd47fb39b79f/go.mod h1:JpH github.com/lucas-clemente/quic-clients v0.1.0/go.mod h1:y5xVIEoObKqULIKivu+gD/LU90pL73bTdtQjPBvtCBk= github.com/lucas-clemente/quic-go v0.10.2/go.mod h1:hvaRS9IHjFLMq76puFJeWNfmn+H70QZ/CXoxqw9bzao= github.com/lucas-clemente/quic-go-certificates v0.0.0-20160823095156-d2f86524cced/go.mod h1:NCcRLrOTZbzhZvixZLlERbJtDtYsmMw8Jc4vS8Z0g58= +github.com/lucasjones/reggen v0.0.0-20200904144131-37ba4fa293bb/go.mod h1:5ELEyG+X8f+meRWHuqUOewBOhvHkl7M76pdGEansxW4= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20200218084223-8edcc4e51f39/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/maorfr/helm-plugin-utils v0.0.0-20181205064038-588190cb5e3b/go.mod h1:p3gwmRSFqbWw6plBpR0sKl3n3vpu8kX70gvCJKMvvCA= @@ -406,6 +425,7 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4 github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/moby v0.7.3-0.20190826074503-38ab9da00309 h1:cvy4lBOYN3gKfKj8Lzz5Q9TfviP+L7koMHY7SvkyTKs= github.com/moby/moby v0.7.3-0.20190826074503-38ab9da00309/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= @@ -427,6 +447,7 @@ github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= github.com/neo4j-drivers/gobolt v1.7.4/go.mod h1:O9AUbip4Dgre+CD3p40dnMD4a4r52QBIfblg5k7CTbE= github.com/neo4j/neo4j-go-driver v1.7.4/go.mod h1:aPO0vVr+WnhEJne+FgFjfsjzAnssPFLucHgGZ76Zb/U= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ= @@ -487,6 +508,8 @@ github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULU github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/prometheus v0.0.0-20180315085919-58e2a31db8de/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= github.com/quobyte/api v0.1.2/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H6VI= +github.com/redhat-cop/operator-utils v0.1.0 h1:K0/A5bQS+7cl2mMk6cFaTlmcf1/cNepp6C5digjmysM= +github.com/redhat-cop/operator-utils v0.1.0/go.mod h1:K9f0vBA2bBiDyg9bsGDUojdwdhwUvHKX5QW0B+brWgo= github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/robfig/cron v1.1.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= @@ -497,6 +520,7 @@ github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351/go.mod h1:DCgfY github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= @@ -534,6 +558,7 @@ github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f/go.mod h1:ZjcWmF github.com/vmware/govmomi v0.0.0-20201221180647-1ec59a7c0002/go.mod h1:Y+Wq4lst78L85Ge/F8+ORXIWiKYqaro1vhAulACy9Lc= github.com/vmware/vmw-guestinfo v0.0.0-20170707015358-25eff159a728/go.mod h1:x9oS4Wk2s2u4tS29nEaDLdzvuHdB19CvSGJjPgkZJNk= github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs= +github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -642,6 +667,7 @@ golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190310054646-10058d7d4faa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -776,6 +802,7 @@ gopkg.in/tomb.v1 v1.0.0-20161208151619-d5d1b5820637/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 h1:yiW+nvdHb9LVqSHQBXfZCieqV4fzYhNBql77zY0ykqs= gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637/go.mod h1:BHsqpu/nsuzkT5BpiH1EMZPLyqSMM8JbIavyFACoFNk= gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/main.go b/main.go index a5c7bbd48..cff65242f 100644 --- a/main.go +++ b/main.go @@ -19,15 +19,21 @@ import ( "go.uber.org/zap/zapcore" // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) // to ensure that exec-entrypoint and run can make use of them. + "k8s.io/client-go/discovery" _ "k8s.io/client-go/plugin/pkg/client/auth" "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/manager" osruntime "runtime" + dwo_api "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1" + dwr "github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting" + "github.com/devfile/devworkspace-operator/pkg/infrastructure" + "fmt" "github.com/go-logr/logr" @@ -39,6 +45,8 @@ import ( checontroller "github.com/eclipse-che/che-operator/controllers/che" backupcontroller "github.com/eclipse-che/che-operator/controllers/checlusterbackup" restorecontroller "github.com/eclipse-che/che-operator/controllers/checlusterrestore" + "github.com/eclipse-che/che-operator/controllers/devworkspace" + "github.com/eclipse-che/che-operator/controllers/devworkspace/solver" "github.com/eclipse-che/che-operator/pkg/deploy" "github.com/eclipse-che/che-operator/pkg/signal" "github.com/eclipse-che/che-operator/pkg/util" @@ -189,7 +197,11 @@ func main() { HealthProbeBindAddress: probeAddr, LeaderElection: enableLeaderElection, LeaderElectionID: "e79b08a4.org.eclipse.che", - Namespace: watchNamespace, + // NOTE: We CANNOT limit the manager to a single namespace, because that would limit the + // devworkspace routing reconciler to a single namespace, which would make it totally unusable. + // Instead, if some controller wants to limit itself to single namespace, it can do it + // for example using an event filter, as checontroller does. + // Namespace: watchNamespace, // TODO try to use it instead of signal handler.... // GracefulShutdownTimeout: , }) @@ -198,13 +210,13 @@ func main() { os.Exit(1) } - cheReconciler, err := checontroller.NewReconciler(mgr) + cheReconciler, err := checontroller.NewReconciler(mgr, watchNamespace) if err != nil { setupLog.Error(err, "unable to create checluster reconciler") os.Exit(1) } - backupReconciler := backupcontroller.NewReconciler(mgr) - restoreReconciler := restorecontroller.NewReconciler(mgr) + backupReconciler := backupcontroller.NewReconciler(mgr, watchNamespace) + restoreReconciler := restorecontroller.NewReconciler(mgr, watchNamespace) if err = cheReconciler.SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to set up controller", "controller", "CheCluster") @@ -219,6 +231,11 @@ func main() { os.Exit(1) } + if err = enableDevworkspaceSupport(mgr); err != nil { + setupLog.Error(err, "unable to initialize devworkspace support") + os.Exit(1) + } + // +kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { @@ -238,3 +255,58 @@ func main() { os.Exit(1) } } + +func enableDevworkspaceSupport(mgr manager.Manager) error { + // DWO and DWCO use the infrastructure package for openshift detection. It needs to be initialized + // but only supports OpenShift v4 or Kubernetes. + if err := infrastructure.Initialize(); err != nil { + setupLog.Info("devworkspace cannot run on this infrastructure") + return nil + } + + // we install the devworkspace CheCluster reconciler even if dw is not supported so that it + // can write meaningful status messages into the CheCluster CRs. + dwChe := devworkspace.CheClusterReconciler{} + if err := dwChe.SetupWithManager(mgr); err != nil { + return err + } + + // we only enable Devworkspace support, if there is the controller.devfile.io resource group in the cluster + // we assume that if the group is there, then we have all the expected CRs there, too. + + cl, err := discovery.NewDiscoveryClientForConfig(mgr.GetConfig()) + if err != nil { + return err + } + + groups, err := cl.ServerGroups() + if err != nil { + return err + } + + supported := false + for _, g := range groups.Groups { + if g.Name == "controller.devfile.io" { + supported = true + break + } + } + + if supported { + if err := dwo_api.AddToScheme(mgr.GetScheme()); err != nil { + return err + } + + routing := dwr.DevWorkspaceRoutingReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("DevWorkspaceRouting"), + Scheme: mgr.GetScheme(), + SolverGetter: solver.Getter(mgr.GetScheme()), + } + if err := routing.SetupWithManager(mgr); err != nil { + return err + } + } + + return nil +} diff --git a/make-release.sh b/make-release.sh index 95d410c6b..16850473d 100755 --- a/make-release.sh +++ b/make-release.sh @@ -28,7 +28,6 @@ init() { FORCE_UPDATE="" BUILDX_PLATFORMS="linux/amd64,linux/ppc64le" DEV_WORKSPACE_CONTROLLER_VERSION="main" - DEV_WORKSPACE_CHE_OPERATOR_VERSION="main" STABLE_CHANNELS=("stable-all-namespaces" "stable") if [[ $# -lt 1 ]]; then usage; exit; fi @@ -43,7 +42,6 @@ init() { '--check-resources') CHECK_RESOURCES=true; shift 0;; '--prepare-community-operators-update') PREPARE_COMMUNITY_OPERATORS_UPDATE=true; shift 0;; '--dev-workspace-controller-version') DEV_WORKSPACE_CONTROLLER_VERSION=$2; shift 1;; - '--dev-workspace-che-operator-version') DEV_WORKSPACE_CHE_OPERATOR_VERSION=$2; shift 1;; '--force') FORCE_UPDATE="--force"; shift 0;; '--help'|'-h') usage; exit;; esac @@ -133,10 +131,6 @@ if ! grep -q "value: quay.io/eclipse/che-dashboard:$RELEASE" $filename; then # use ${RELEASE} instead of master wget https://raw.githubusercontent.com/eclipse-che/che-server/${RELEASE}/assembly/assembly-wsmaster-war/src/main/webapp/WEB-INF/classes/che/che.properties -q -O /tmp/che.properties - if ! grep -q "value: quay.io/che-incubator/devworkspace-che-operator:$DEV_WORKSPACE_CHE_OPERATOR_VERSION" $filename; then - echo "[ERROR] Unable to find devworkspace che operator image with version ${DEV_WORKSPACE_CHE_OPERATOR_VERSION} in the $filename"; exit 1 - fi - plugin_broker_meta_image=$(cat /tmp/che.properties | grep che.workspace.plugin_broker.metadata.image | cut -d '=' -f2) if ! grep -q "value: $plugin_broker_meta_image" $filename; then echo "[ERROR] Unable to find plugin broker meta image '$plugin_broker_meta_image' in the $filename"; exit 1 @@ -171,7 +165,7 @@ releaseOperatorCode() { docker login quay.io -u "${QUAY_ECLIPSE_CHE_USERNAME}" -p "${QUAY_ECLIPSE_CHE_PASSWORD}" echo "[INFO] releaseOperatorCode :: Build operator image in platforms: $BUILDX_PLATFORMS" - docker buildx build --build-arg DEV_WORKSPACE_CONTROLLER_VERSION=${DEV_WORKSPACE_CONTROLLER_VERSION} --build-arg DEV_WORKSPACE_CHE_OPERATOR_VERSION=${DEV_WORKSPACE_CHE_OPERATOR_VERSION} --platform "$BUILDX_PLATFORMS" --push -t "quay.io/eclipse/che-operator:${RELEASE}" . + docker buildx build --build-arg DEV_WORKSPACE_CONTROLLER_VERSION=${DEV_WORKSPACE_CONTROLLER_VERSION} --platform "$BUILDX_PLATFORMS" --push -t "quay.io/eclipse/che-operator:${RELEASE}" . } replaceImagesTags() { @@ -180,7 +174,6 @@ replaceImagesTags() { lastDefaultCheServerImage=$(yq -r ".spec.template.spec.containers[] | select(.name == \"che-operator\") | .env[] | select(.name == \"RELATED_IMAGE_che_server\") | .value" "${OPERATOR_YAML}") lastDefaultDashboardImage=$(yq -r ".spec.template.spec.containers[] | select(.name == \"che-operator\") | .env[] | select(.name == \"RELATED_IMAGE_dashboard\") | .value" "${OPERATOR_YAML}") lastDefaultDevWorkspaceControllerImage=$(yq -r ".spec.template.spec.containers[] | select(.name == \"che-operator\") | .env[] | select(.name == \"RELATED_IMAGE_devworkspace_controller\") | .value" "${OPERATOR_YAML}") - lastDefaultDevWorkspaceCheOperatorImage=$(yq -r ".spec.template.spec.containers[] | select(.name == \"che-operator\") | .env[] | select(.name == \"RELATED_IMAGE_devworkspace_che_operator\") | .value" "${OPERATOR_YAML}") lastDefaultKeycloakImage=$(yq -r ".spec.template.spec.containers[] | select(.name == \"che-operator\") | .env[] | select(.name == \"RELATED_IMAGE_keycloak\") | .value" "${OPERATOR_YAML}") lastDefaultPluginRegistryImage=$(yq -r ".spec.template.spec.containers[] | select(.name == \"che-operator\") | .env[] | select(.name == \"RELATED_IMAGE_plugin_registry\") | .value" "${OPERATOR_YAML}") lastDefaultDevfileRegistryImage=$(yq -r ".spec.template.spec.containers[] | select(.name == \"che-operator\") | .env[] | select(.name == \"RELATED_IMAGE_devfile_registry\") | .value" "${OPERATOR_YAML}") @@ -188,7 +181,6 @@ replaceImagesTags() { CHE_SERVER_IMAGE_REALEASE=$(replaceTag "${lastDefaultCheServerImage}" "${RELEASE}") DASHBOARD_IMAGE_REALEASE=$(replaceTag "${lastDefaultDashboardImage}" "${RELEASE}") DEVWORKSPACE_CONTROLLER_IMAGE_RELEASE=$(replaceTag "${lastDefaultDevWorkspaceControllerImage}" "${DEV_WORKSPACE_CONTROLLER_VERSION}") - DEVWORKSPACE_CHE_OPERATOR_IMAGE_RELEASE=$(replaceTag "${lastDefaultDevWorkspaceCheOperatorImage}" "${DEV_WORKSPACE_CHE_OPERATOR_VERSION}") KEYCLOAK_IMAGE_RELEASE=$(replaceTag "${lastDefaultKeycloakImage}" "${RELEASE}") PLUGIN_REGISTRY_IMAGE_RELEASE=$(replaceTag "${lastDefaultPluginRegistryImage}" "${RELEASE}") DEVFILE_REGISTRY_IMAGE_RELEASE=$(replaceTag "${lastDefaultDevfileRegistryImage}" "${RELEASE}") @@ -203,11 +195,9 @@ replaceImagesTags() { yq -ryY "( .spec.template.spec.containers[] | select(.name == \"che-operator\").env[] | select(.name == \"RELATED_IMAGE_che_server\") | .value ) = \"${CHE_SERVER_IMAGE_REALEASE}\"" | \ yq -ryY "( .spec.template.spec.containers[] | select(.name == \"che-operator\").env[] | select(.name == \"RELATED_IMAGE_dashboard\") | .value ) = \"${DASHBOARD_IMAGE_REALEASE}\"" | \ yq -ryY "( .spec.template.spec.containers[] | select(.name == \"che-operator\").env[] | select(.name == \"RELATED_IMAGE_devworkspace_controller\") | .value ) = \"${DEVWORKSPACE_CONTROLLER_IMAGE_RELEASE}\"" | \ - yq -ryY "( .spec.template.spec.containers[] | select(.name == \"che-operator\").env[] | select(.name == \"RELATED_IMAGE_devworkspace_che_operator\") | .value ) = \"${DEVWORKSPACE_CHE_OPERATOR_IMAGE_RELEASE}\"" | \ yq -ryY "( .spec.template.spec.containers[] | select(.name == \"che-operator\").env[] | select(.name == \"RELATED_IMAGE_keycloak\") | .value ) = \"${KEYCLOAK_IMAGE_RELEASE}\"" | \ yq -ryY "( .spec.template.spec.containers[] | select(.name == \"che-operator\").env[] | select(.name == \"RELATED_IMAGE_plugin_registry\") | .value ) = \"${PLUGIN_REGISTRY_IMAGE_RELEASE}\"" | \ yq -ryY "( .spec.template.spec.containers[] | select(.name == \"che-operator\").env[] | select(.name == \"RELATED_IMAGE_devfile_registry\") | .value ) = \"${DEVFILE_REGISTRY_IMAGE_RELEASE}\"" | \ - yq -ryY "( .spec.template.spec.containers[] | select(.name == \"devworkspace-che-operator\") | .image ) = \"quay.io/che-incubator/devworkspace-che-operator:${DEV_WORKSPACE_CHE_OPERATOR_VERSION}\"" \ >> "${NEW_OPERATOR_YAML}" mv "${NEW_OPERATOR_YAML}" "${OPERATOR_YAML}" } @@ -230,7 +220,7 @@ releaseOlmFiles() { for channel in "${STABLE_CHANNELS[@]}" do cd $RELEASE_DIR/olm - . release-olm-files.sh --release-version $RELEASE --channel $channel --dev-workspace-controller-version $DEV_WORKSPACE_CONTROLLER_VERSION --dev-workspace-che-operator-version $DEV_WORKSPACE_CHE_OPERATOR_VERSION + . release-olm-files.sh --release-version $RELEASE --channel $channel --dev-workspace-controller-version $DEV_WORKSPACE_CONTROLLER_VERSION cd $RELEASE_DIR local openshift=$RELEASE_DIR/bundle/$channel/eclipse-che-preview-openshift/manifests diff --git a/olm/buildDigestMap.sh b/olm/buildDigestMap.sh index 5cda0f9c2..aa73c6458 100755 --- a/olm/buildDigestMap.sh +++ b/olm/buildDigestMap.sh @@ -61,9 +61,6 @@ setImagesFromDeploymentEnv setOperatorImage echo "${OPERATOR_IMAGE}" -setDevWorkspaceCheOperatorImage -echo ${DEVWORKSPACE_CHE_OPERATOR_IMAGE} - setPluginRegistryList echo "${PLUGIN_REGISTRY_LIST}" @@ -130,7 +127,6 @@ rm -Rf "${DIGEST_FILE}" touch "${DIGEST_FILE}" writeDigest "${OPERATOR_IMAGE}" "operator-image" -writeDigest "${DEVWORKSPACE_CHE_OPERATOR_IMAGE}" "devworkspace-che-operator-image" for image in ${REQUIRED_IMAGES}; do writeDigest "${image}" "required-image" diff --git a/olm/images.sh b/olm/images.sh index 55d9fd94b..57d873d5d 100755 --- a/olm/images.sh +++ b/olm/images.sh @@ -18,10 +18,6 @@ setOperatorImage() { OPERATOR_IMAGE=$(yq -r '.spec.install.spec.deployments[].spec.template.spec.containers[0].image' "${CSV}") } -setDevWorkspaceCheOperatorImage() { - DEVWORKSPACE_CHE_OPERATOR_IMAGE=$(yq -r '.spec.install.spec.deployments[].spec.template.spec.containers[1].image' "${CSV}") -} - setPluginRegistryList() { registry=$(yq -r '.spec.install.spec.deployments[].spec.template.spec.containers[].env[] | select(.name | test("RELATED_IMAGE_.*plugin_registry"; "g")) | .value' "${CSV}") setRegistryImages "${registry}" diff --git a/olm/release-olm-files.sh b/olm/release-olm-files.sh index 03c85a9fe..aeffed4d2 100755 --- a/olm/release-olm-files.sh +++ b/olm/release-olm-files.sh @@ -17,7 +17,6 @@ while [[ "$#" -gt 0 ]]; do '--release-version') RELEASE=$2; shift 1;; '--channel') CHANNEL=$2; shift 1;; '--dev-workspace-controller-version') DEV_WORKSPACE_CONTROLLER_VERSION=$2; shift 1;; - '--dev-workspace-che-operator-version') DEV_WORKSPACE_CHE_OPERATOR_VERSION=$2; shift 1;; esac shift 1 done @@ -82,7 +81,6 @@ if [[ -z "$RELEASE" ]] || [[ -z "$RELEASE" ]] || [[ -z "$RELEASE" ]]; then echo "One of the following required parameters is missing" echo "--release-version $RELEASE" echo "--dev-workspace-controller-version $DEV_WORKSPACE_CONTROLLER_VERSION" - echo "--dev-workspace-che-operator-version $DEV_WORKSPACE_CHE_OPERATOR_VERSION" exit 1 fi @@ -118,7 +116,6 @@ do -e 's/imagePullPolicy: *Always/imagePullPolicy: IfNotPresent/' \ -e 's/"cheImageTag": *"next"/"cheImageTag": ""/' \ -e 's|quay.io/eclipse/che-dashboard:next|quay.io/eclipse/che-dashboard:'${RELEASE}'|' \ - -e 's|quay.io/che-incubator/devworkspace-che-operator:next|quay.io/che-incubator/devworkspace-che-operator:'${DEV_WORKSPACE_CHE_OPERATOR_VERSION}'|' \ -e 's|quay.io/devfile/devworkspace-controller:next|quay.io/devfile/devworkspace-controller:'${DEV_WORKSPACE_CONTROLLER_VERSION}'|' \ -e 's|"identityProviderImage": *"quay.io/eclipse/che-keycloak:next"|"identityProviderImage": ""|' \ -e 's|"devfileRegistryImage": *"quay.io/eclipse/che-devfile-registry:next"|"devfileRegistryImage": ""|' \ diff --git a/pkg/deploy/defaults.go b/pkg/deploy/defaults.go index ecfccb206..c38edbc33 100644 --- a/pkg/deploy/defaults.go +++ b/pkg/deploy/defaults.go @@ -30,7 +30,6 @@ var ( defaultCheServerImage string defaultCheVersion string defaultDashboardImage string - defaultDevworkspaceCheOperatorImage string defaultDevworkspaceControllerImage string defaultPluginRegistryImage string defaultDevfileRegistryImage string @@ -176,7 +175,6 @@ func InitDefaultsFromFile(defaultsPath string) { defaultCheVersion = util.GetDeploymentEnv(operatorDeployment, "CHE_VERSION") defaultCheServerImage = util.GetDeploymentEnv(operatorDeployment, util.GetArchitectureDependentEnv("RELATED_IMAGE_che_server")) defaultDashboardImage = util.GetDeploymentEnv(operatorDeployment, util.GetArchitectureDependentEnv("RELATED_IMAGE_dashboard")) - defaultDevworkspaceCheOperatorImage = util.GetDeploymentEnv(operatorDeployment, util.GetArchitectureDependentEnv("RELATED_IMAGE_devworkspace_che_operator")) defaultDevworkspaceControllerImage = util.GetDeploymentEnv(operatorDeployment, util.GetArchitectureDependentEnv("RELATED_IMAGE_devworkspace_controller")) defaultPluginRegistryImage = util.GetDeploymentEnv(operatorDeployment, util.GetArchitectureDependentEnv("RELATED_IMAGE_plugin_registry")) defaultDevfileRegistryImage = util.GetDeploymentEnv(operatorDeployment, util.GetArchitectureDependentEnv("RELATED_IMAGE_devfile_registry")) @@ -302,10 +300,6 @@ func DefaultDevworkspaceControllerImage(cr *orgv1.CheCluster) string { return patchDefaultImageName(cr, defaultDevworkspaceControllerImage) } -func DefaultDevworkspaceCheOperatorImage(cr *orgv1.CheCluster) string { - return patchDefaultImageName(cr, defaultDevworkspaceCheOperatorImage) -} - func DefaultKeycloakImage(cr *orgv1.CheCluster) string { return patchDefaultImageName(cr, defaultKeycloakImage) } @@ -457,7 +451,6 @@ func InitDefaultsFromEnv() { defaultCheVersion = getDefaultFromEnv("CHE_VERSION") defaultCheServerImage = getDefaultFromEnv(util.GetArchitectureDependentEnv("RELATED_IMAGE_che_server")) defaultDashboardImage = getDefaultFromEnv(util.GetArchitectureDependentEnv("RELATED_IMAGE_dashboard")) - defaultDevworkspaceCheOperatorImage = getDefaultFromEnv(util.GetArchitectureDependentEnv("RELATED_IMAGE_devworkspace_che_operator")) defaultDevworkspaceControllerImage = getDefaultFromEnv(util.GetArchitectureDependentEnv("RELATED_IMAGE_devworkspace_controller")) defaultPluginRegistryImage = getDefaultFromEnv(util.GetArchitectureDependentEnv("RELATED_IMAGE_plugin_registry")) defaultDevfileRegistryImage = getDefaultFromEnv(util.GetArchitectureDependentEnv("RELATED_IMAGE_devfile_registry")) diff --git a/pkg/deploy/dev-workspace/dev_workspace.go b/pkg/deploy/dev-workspace/dev_workspace.go index 03f0819a8..0649776dd 100644 --- a/pkg/deploy/dev-workspace/dev_workspace.go +++ b/pkg/deploy/dev-workspace/dev_workspace.go @@ -17,6 +17,7 @@ import ( "errors" "fmt" "io/ioutil" + "os" "strings" orgv1 "github.com/eclipse-che/che-operator/api/v1" @@ -46,17 +47,13 @@ var ( DevWorkspaceDeploymentName = "devworkspace-controller-manager" SubscriptionResourceName = "subscriptions" - CheManagerResourcename = "chemanagers" ClusterServiceVersionResourceName = "clusterserviceversions" DevWorkspaceCSVNameWithouVersion = "devworkspace-operator" - OpenshiftDevWorkspaceTemplatesPath = "/tmp/devworkspace-operator/templates/deployment/openshift/objects" - OpenshiftDevWorkspaceCheTemplatesPath = "/tmp/devworkspace-che-operator/templates/deployment/openshift/objects" - KubernetesDevWorkspaceTemplatesPath = "/tmp/devworkspace-operator/templates/deployment/kubernetes/objects" - KubernetesDevWorkspaceCheTemplatesPath = "/tmp/devworkspace-che-operator/templates/deployment/kubernetes/objects" + OpenshiftDevWorkspaceTemplatesPath = "/tmp/devworkspace-operator/templates/deployment/openshift/objects" + KubernetesDevWorkspaceTemplatesPath = "/tmp/devworkspace-operator/templates/deployment/kubernetes/objects" - DevWorkspaceTemplates = devWorkspaceTemplatesPath() - DevWorkspaceCheTemplates = devWorkspaceCheTemplatesPath() + DevWorkspaceTemplates = devWorkspaceTemplatesPath() DevWorkspaceServiceAccountFile = DevWorkspaceTemplates + "/devworkspace-controller-serviceaccount.ServiceAccount.yaml" DevWorkspaceRoleFile = DevWorkspaceTemplates + "/devworkspace-controller-leader-election-role.Role.yaml" @@ -108,6 +105,13 @@ var ( syncDwConfigMap, syncDwDeployment, } + + // Exits the operator after successful fresh installation of the devworkspace. + // Can be replaced with something less drastic (especially useful in tests) + afterInstall = func() { + logrus.Warn("Exitting the operator after DevWorkspace installation. DevWorkspace support will be initialized on the next start.") + os.Exit(1) + } ) func ReconcileDevWorkspace(deployContext *deploy.DeployContext) (bool, error) { @@ -150,14 +154,21 @@ func ReconcileDevWorkspace(deployContext *deploy.DeployContext) (bool, error) { } for _, syncItem := range syncItems { - done, err := syncItem(deployContext) + _, err := syncItem(deployContext) if !util.IsTestMode() { - if !done { + if err != nil { return false, err } } } + if !devWorkspaceWebhookExists && !util.IsTestMode() { + // the webhook did not exist in the cluster + // this means that we're installing devworkspace and therefore need to restart + // so that devworkspace support can initialize during the operator startup + afterInstall() + } + return true, nil } @@ -203,7 +214,7 @@ func checkWebTerminalSubscription(deployContext *deploy.DeployContext) error { return err } - return errors.New("A non matching version of the Dev Workspace operator is already installed") + return errors.New("a non matching version of the Dev Workspace operator is already installed") } func createDwNamespace(deployContext *deploy.DeployContext) (bool, error) { @@ -464,10 +475,3 @@ func devWorkspaceTemplatesPath() string { } return KubernetesDevWorkspaceTemplatesPath } - -func devWorkspaceCheTemplatesPath() string { - if util.IsOpenShift { - return OpenshiftDevWorkspaceCheTemplatesPath - } - return KubernetesDevWorkspaceCheTemplatesPath -} diff --git a/pkg/deploy/dev-workspace/dev_workspace_test.go b/pkg/deploy/dev-workspace/dev_workspace_test.go index b2ce5deab..29acc74e7 100644 --- a/pkg/deploy/dev-workspace/dev_workspace_test.go +++ b/pkg/deploy/dev-workspace/dev_workspace_test.go @@ -116,14 +116,6 @@ func TestReconcileDevWorkspace(t *testing.T) { t.Run(testCase.name, func(t *testing.T) { deployContext := deploy.GetTestDeployContext(testCase.cheCluster, []runtime.Object{}) deployContext.ClusterAPI.Scheme.AddKnownTypes(operatorsv1alpha1.SchemeGroupVersion, &operatorsv1alpha1.Subscription{}) - deployContext.ClusterAPI.Scheme.AddKnownTypes(operatorsv1alpha1.SchemeGroupVersion, &operatorsv1alpha1.ClusterServiceVersion{}) - deployContext.ClusterAPI.DiscoveryClient.(*fakeDiscovery.FakeDiscovery).Fake.Resources = []*metav1.APIResourceList{ - { - APIResources: []metav1.APIResource{ - {Name: CheManagerResourcename}, - }, - }, - } util.IsOpenShift = testCase.IsOpenShift util.IsOpenShift4 = testCase.IsOpenShift4 @@ -183,7 +175,7 @@ func TestReconcileDevWorkspaceShouldThrowErrorIfWebTerminalSubscriptionExists(t util.IsOpenShift4 = true _, err := ReconcileDevWorkspace(deployContext) - if err == nil || err.Error() != "A non matching version of the Dev Workspace operator is already installed" { + if err == nil || err.Error() != "a non matching version of the Dev Workspace operator is already installed" { t.Fatalf("Error should be thrown") } } diff --git a/pkg/util/k8s_helpers.go b/pkg/util/k8s_helpers.go index 615647f3d..377abf0e8 100644 --- a/pkg/util/k8s_helpers.go +++ b/pkg/util/k8s_helpers.go @@ -26,6 +26,8 @@ import ( "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/remotecommand" "sigs.k8s.io/controller-runtime/pkg/client/config" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" ) type k8s struct { @@ -188,3 +190,20 @@ func (cl *k8s) IsResourceOperationPermitted(resourceAttr *authorizationv1.Resour return ssar.Status.Allowed, nil } + +func InNamespaceEventFilter(namespace string) predicate.Predicate { + return predicate.Funcs{ + CreateFunc: func(ce event.CreateEvent) bool { + return namespace == ce.Meta.GetNamespace() + }, + DeleteFunc: func(de event.DeleteEvent) bool { + return namespace == de.Meta.GetNamespace() + }, + UpdateFunc: func(ue event.UpdateEvent) bool { + return namespace == ue.MetaNew.GetNamespace() + }, + GenericFunc: func(ge event.GenericEvent) bool { + return namespace == ge.Meta.GetNamespace() + }, + } +} diff --git a/vendor/github.com/devfile/api/v2/LICENSE b/vendor/github.com/devfile/api/v2/LICENSE new file mode 100644 index 000000000..d3087e4c5 --- /dev/null +++ b/vendor/github.com/devfile/api/v2/LICENSE @@ -0,0 +1,277 @@ +Eclipse Public License - v 2.0 + + THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE + PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION + OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. + +1. DEFINITIONS + +"Contribution" means: + + a) in the case of the initial Contributor, the initial content + Distributed under this Agreement, and + + b) in the case of each subsequent Contributor: + i) changes to the Program, and + ii) additions to the Program; + where such changes and/or additions to the Program originate from + and are Distributed by that particular Contributor. A Contribution + "originates" from a Contributor if it was added to the Program by + such Contributor itself or anyone acting on such Contributor's behalf. + Contributions do not include changes or additions to the Program that + are not Modified Works. + +"Contributor" means any person or entity that Distributes the Program. + +"Licensed Patents" mean patent claims licensable by a Contributor which +are necessarily infringed by the use or sale of its Contribution alone +or when combined with the Program. + +"Program" means the Contributions Distributed in accordance with this +Agreement. + +"Recipient" means anyone who receives the Program under this Agreement +or any Secondary License (as applicable), including Contributors. + +"Derivative Works" shall mean any work, whether in Source Code or other +form, that is based on (or derived from) the Program and for which the +editorial revisions, annotations, elaborations, or other modifications +represent, as a whole, an original work of authorship. + +"Modified Works" shall mean any work in Source Code or other form that +results from an addition to, deletion from, or modification of the +contents of the Program, including, for purposes of clarity any new file +in Source Code form that contains any contents of the Program. Modified +Works shall not include works that contain only declarations, +interfaces, types, classes, structures, or files of the Program solely +in each case in order to link to, bind by name, or subclass the Program +or Modified Works thereof. + +"Distribute" means the acts of a) distributing or b) making available +in any manner that enables the transfer of a copy. + +"Source Code" means the form of a Program preferred for making +modifications, including but not limited to software source code, +documentation source, and configuration files. + +"Secondary License" means either the GNU General Public License, +Version 2.0, or any later versions of that license, including any +exceptions or additional permissions as identified by the initial +Contributor. + +2. GRANT OF RIGHTS + + a) Subject to the terms of this Agreement, each Contributor hereby + grants Recipient a non-exclusive, worldwide, royalty-free copyright + license to reproduce, prepare Derivative Works of, publicly display, + publicly perform, Distribute and sublicense the Contribution of such + Contributor, if any, and such Derivative Works. + + b) Subject to the terms of this Agreement, each Contributor hereby + grants Recipient a non-exclusive, worldwide, royalty-free patent + license under Licensed Patents to make, use, sell, offer to sell, + import and otherwise transfer the Contribution of such Contributor, + if any, in Source Code or other form. This patent license shall + apply to the combination of the Contribution and the Program if, at + the time the Contribution is added by the Contributor, such addition + of the Contribution causes such combination to be covered by the + Licensed Patents. The patent license shall not apply to any other + combinations which include the Contribution. No hardware per se is + licensed hereunder. + + c) Recipient understands that although each Contributor grants the + licenses to its Contributions set forth herein, no assurances are + provided by any Contributor that the Program does not infringe the + patent or other intellectual property rights of any other entity. + Each Contributor disclaims any liability to Recipient for claims + brought by any other entity based on infringement of intellectual + property rights or otherwise. As a condition to exercising the + rights and licenses granted hereunder, each Recipient hereby + assumes sole responsibility to secure any other intellectual + property rights needed, if any. For example, if a third party + patent license is required to allow Recipient to Distribute the + Program, it is Recipient's responsibility to acquire that license + before distributing the Program. + + d) Each Contributor represents that to its knowledge it has + sufficient copyright rights in its Contribution, if any, to grant + the copyright license set forth in this Agreement. + + e) Notwithstanding the terms of any Secondary License, no + Contributor makes additional grants to any Recipient (other than + those set forth in this Agreement) as a result of such Recipient's + receipt of the Program under the terms of a Secondary License + (if permitted under the terms of Section 3). + +3. REQUIREMENTS + +3.1 If a Contributor Distributes the Program in any form, then: + + a) the Program must also be made available as Source Code, in + accordance with section 3.2, and the Contributor must accompany + the Program with a statement that the Source Code for the Program + is available under this Agreement, and informs Recipients how to + obtain it in a reasonable manner on or through a medium customarily + used for software exchange; and + + b) the Contributor may Distribute the Program under a license + different than this Agreement, provided that such license: + i) effectively disclaims on behalf of all other Contributors all + warranties and conditions, express and implied, including + warranties or conditions of title and non-infringement, and + implied warranties or conditions of merchantability and fitness + for a particular purpose; + + ii) effectively excludes on behalf of all other Contributors all + liability for damages, including direct, indirect, special, + incidental and consequential damages, such as lost profits; + + iii) does not attempt to limit or alter the recipients' rights + in the Source Code under section 3.2; and + + iv) requires any subsequent distribution of the Program by any + party to be under a license that satisfies the requirements + of this section 3. + +3.2 When the Program is Distributed as Source Code: + + a) it must be made available under this Agreement, or if the + Program (i) is combined with other material in a separate file or + files made available under a Secondary License, and (ii) the initial + Contributor attached to the Source Code the notice described in + Exhibit A of this Agreement, then the Program may be made available + under the terms of such Secondary Licenses, and + + b) a copy of this Agreement must be included with each copy of + the Program. + +3.3 Contributors may not remove or alter any copyright, patent, +trademark, attribution notices, disclaimers of warranty, or limitations +of liability ("notices") contained within the Program from any copy of +the Program which they Distribute, provided that Contributors may add +their own appropriate notices. + +4. COMMERCIAL DISTRIBUTION + +Commercial distributors of software may accept certain responsibilities +with respect to end users, business partners and the like. While this +license is intended to facilitate the commercial use of the Program, +the Contributor who includes the Program in a commercial product +offering should do so in a manner which does not create potential +liability for other Contributors. Therefore, if a Contributor includes +the Program in a commercial product offering, such Contributor +("Commercial Contributor") hereby agrees to defend and indemnify every +other Contributor ("Indemnified Contributor") against any losses, +damages and costs (collectively "Losses") arising from claims, lawsuits +and other legal actions brought by a third party against the Indemnified +Contributor to the extent caused by the acts or omissions of such +Commercial Contributor in connection with its distribution of the Program +in a commercial product offering. The obligations in this section do not +apply to any claims or Losses relating to any actual or alleged +intellectual property infringement. In order to qualify, an Indemnified +Contributor must: a) promptly notify the Commercial Contributor in +writing of such claim, and b) allow the Commercial Contributor to control, +and cooperate with the Commercial Contributor in, the defense and any +related settlement negotiations. The Indemnified Contributor may +participate in any such claim at its own expense. + +For example, a Contributor might include the Program in a commercial +product offering, Product X. That Contributor is then a Commercial +Contributor. If that Commercial Contributor then makes performance +claims, or offers warranties related to Product X, those performance +claims and warranties are such Commercial Contributor's responsibility +alone. Under this section, the Commercial Contributor would have to +defend claims against the other Contributors related to those performance +claims and warranties, and if a court requires any other Contributor to +pay any damages as a result, the Commercial Contributor must pay +those damages. + +5. NO WARRANTY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT +PERMITTED BY APPLICABLE LAW, THE PROGRAM IS PROVIDED ON AN "AS IS" +BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR +IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF +TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR +PURPOSE. Each Recipient is solely responsible for determining the +appropriateness of using and distributing the Program and assumes all +risks associated with its exercise of rights under this Agreement, +including but not limited to the risks and costs of program errors, +compliance with applicable laws, damage to or loss of data, programs +or equipment, and unavailability or interruption of operations. + +6. DISCLAIMER OF LIABILITY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT +PERMITTED BY APPLICABLE LAW, NEITHER RECIPIENT NOR ANY CONTRIBUTORS +SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST +PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE +EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + +7. GENERAL + +If any provision of this Agreement is invalid or unenforceable under +applicable law, it shall not affect the validity or enforceability of +the remainder of the terms of this Agreement, and without further +action by the parties hereto, such provision shall be reformed to the +minimum extent necessary to make such provision valid and enforceable. + +If Recipient institutes patent litigation against any entity +(including a cross-claim or counterclaim in a lawsuit) alleging that the +Program itself (excluding combinations of the Program with other software +or hardware) infringes such Recipient's patent(s), then such Recipient's +rights granted under Section 2(b) shall terminate as of the date such +litigation is filed. + +All Recipient's rights under this Agreement shall terminate if it +fails to comply with any of the material terms or conditions of this +Agreement and does not cure such failure in a reasonable period of +time after becoming aware of such noncompliance. If all Recipient's +rights under this Agreement terminate, Recipient agrees to cease use +and distribution of the Program as soon as reasonably practicable. +However, Recipient's obligations under this Agreement and any licenses +granted by Recipient relating to the Program shall continue and survive. + +Everyone is permitted to copy and distribute copies of this Agreement, +but in order to avoid inconsistency the Agreement is copyrighted and +may only be modified in the following manner. The Agreement Steward +reserves the right to publish new versions (including revisions) of +this Agreement from time to time. No one other than the Agreement +Steward has the right to modify this Agreement. The Eclipse Foundation +is the initial Agreement Steward. The Eclipse Foundation may assign the +responsibility to serve as the Agreement Steward to a suitable separate +entity. Each new version of the Agreement will be given a distinguishing +version number. The Program (including Contributions) may always be +Distributed subject to the version of the Agreement under which it was +received. In addition, after a new version of the Agreement is published, +Contributor may elect to Distribute the Program (including its +Contributions) under the new version. + +Except as expressly stated in Sections 2(a) and 2(b) above, Recipient +receives no rights or licenses to the intellectual property of any +Contributor under this Agreement, whether expressly, by implication, +estoppel or otherwise. All rights in the Program not expressly granted +under this Agreement are reserved. Nothing in this Agreement is intended +to be enforceable by any entity that is not a Contributor or Recipient. +No third-party beneficiary rights are created under this Agreement. + +Exhibit A - Form of Secondary Licenses Notice + +"This Source Code may also be made available under the following +Secondary Licenses when the conditions for such availability set forth +in the Eclipse Public License, v. 2.0 are satisfied: {name license(s), +version(s), and exceptions or additional permissions here}." + + Simply including a copy of this Agreement, including this Exhibit A + is not sufficient to license the Source Code under Secondary Licenses. + + If it is not possible or desirable to put the notice in a particular + file, then You may include the notice in a location (such as a LICENSE + file in a relevant directory) where a recipient would be likely to + look for such a notice. + + You may add additional accurate notices of copyright ownership. diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/commands.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/commands.go new file mode 100644 index 000000000..c4c14d343 --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/commands.go @@ -0,0 +1,182 @@ +package v1alpha2 + +import ( + attributes "github.com/devfile/api/v2/pkg/attributes" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// CommandType describes the type of command. +// Only one of the following command type may be specified. +// +kubebuilder:validation:Enum=Exec;Apply;Composite;Custom +type CommandType string + +const ( + ExecCommandType CommandType = "Exec" + ApplyCommandType CommandType = "Apply" + CompositeCommandType CommandType = "Composite" + CustomCommandType CommandType = "Custom" +) + +// CommandGroupKind describes the kind of command group. +// +kubebuilder:validation:Enum=build;run;test;debug +type CommandGroupKind string + +const ( + BuildCommandGroupKind CommandGroupKind = "build" + RunCommandGroupKind CommandGroupKind = "run" + TestCommandGroupKind CommandGroupKind = "test" + DebugCommandGroupKind CommandGroupKind = "debug" +) + +type CommandGroup struct { + // Kind of group the command is part of + Kind CommandGroupKind `json:"kind"` + + // +optional + // Identifies the default command for a given group kind + IsDefault bool `json:"isDefault,omitempty"` +} + +type BaseCommand struct { + // +optional + // Defines the group this command is part of + Group *CommandGroup `json:"group,omitempty"` +} + +type LabeledCommand struct { + BaseCommand `json:",inline"` + + // +optional + // Optional label that provides a label for this command + // to be used in Editor UI menus for example + Label string `json:"label,omitempty"` +} + +type Command struct { + // Mandatory identifier that allows referencing + // this command in composite commands, from + // a parent, or in events. + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + // +kubebuilder:validation:MaxLength=63 + Id string `json:"id"` + // Map of implementation-dependant free-form YAML attributes. + // +optional + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless + Attributes attributes.Attributes `json:"attributes,omitempty"` + CommandUnion `json:",inline"` +} + +// +union +type CommandUnion struct { + // Type of devworkspace command + // +unionDiscriminator + // +optional + CommandType CommandType `json:"commandType,omitempty"` + + // CLI Command executed in an existing component container + // +optional + Exec *ExecCommand `json:"exec,omitempty"` + + // Command that consists in applying a given component definition, + // typically bound to a devworkspace event. + // + // For example, when an `apply` command is bound to a `preStart` event, + // and references a `container` component, it will start the container as a + // K8S initContainer in the devworkspace POD, unless the component has its + // `dedicatedPod` field set to `true`. + // + // When no `apply` command exist for a given component, + // it is assumed the component will be applied at devworkspace start + // by default. + // +optional + Apply *ApplyCommand `json:"apply,omitempty"` + + // Composite command that allows executing several sub-commands + // either sequentially or concurrently + // +optional + Composite *CompositeCommand `json:"composite,omitempty"` + + // Custom command whose logic is implementation-dependant + // and should be provided by the user + // possibly through some dedicated plugin + // +optional + // +devfile:overrides:include:omit=true + Custom *CustomCommand `json:"custom,omitempty"` +} + +type ExecCommand struct { + LabeledCommand `json:",inline"` + + // The actual command-line string + // + // Special variables that can be used: + // + // - `$PROJECTS_ROOT`: A path where projects sources are mounted as defined by container component's sourceMapping. + // + // - `$PROJECT_SOURCE`: A path to a project source ($PROJECTS_ROOT/). If there are multiple projects, this will point to the directory of the first one. + CommandLine string `json:"commandLine"` + + // Describes component to which given action relates + // + Component string `json:"component"` + + // Working directory where the command should be executed + // + // Special variables that can be used: + // + // - `$PROJECTS_ROOT`: A path where projects sources are mounted as defined by container component's sourceMapping. + // + // - `$PROJECT_SOURCE`: A path to a project source ($PROJECTS_ROOT/). If there are multiple projects, this will point to the directory of the first one. + // +optional + WorkingDir string `json:"workingDir,omitempty"` + + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // Optional list of environment variables that have to be set + // before running the command + Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + + // +optional + // Whether the command is capable to reload itself when source code changes. + // If set to `true` the command won't be restarted and it is expected to handle file changes on its own. + // + // Default value is `false` + HotReloadCapable bool `json:"hotReloadCapable,omitempty"` +} + +type ApplyCommand struct { + LabeledCommand `json:",inline"` + + // Describes component that will be applied + // + Component string `json:"component"` +} + +type CompositeCommand struct { + LabeledCommand `json:",inline"` + + // The commands that comprise this composite command + Commands []string `json:"commands,omitempty" patchStrategy:"replace"` + + // Indicates if the sub-commands should be executed concurrently + // +optional + Parallel bool `json:"parallel,omitempty"` +} + +type CustomCommand struct { + LabeledCommand `json:",inline"` + + // Class of command that the associated implementation component + // should use to process this command with the appropriate logic + CommandClass string `json:"commandClass"` + + // Additional free-form configuration for this custom command + // that the implementation component will know how to use + // + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:EmbeddedResource + EmbeddedResource runtime.RawExtension `json:"embeddedResource"` +} diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_container.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_container.go new file mode 100644 index 000000000..bec6b34e7 --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_container.go @@ -0,0 +1,93 @@ +package v1alpha2 + +// Component that allows the developer to add a configured container into their devworkspace +type ContainerComponent struct { + BaseComponent `json:",inline"` + Container `json:",inline"` + Endpoints []Endpoint `json:"endpoints,omitempty" patchStrategy:"merge" patchMergeKey:"name"` +} + +type Container struct { + Image string `json:"image"` + + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // Environment variables used in this container. + // + // The following variables are reserved and cannot be overridden via env: + // + // - `$PROJECTS_ROOT` + // + // - `$PROJECT_SOURCE` + Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + + // +optional + // List of volumes mounts that should be mounted is this container. + VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + + // +optional + MemoryLimit string `json:"memoryLimit,omitempty"` + + // +optional + MemoryRequest string `json:"memoryRequest,omitempty"` + + // +optional + CpuLimit string `json:"cpuLimit,omitempty"` + + // +optional + CpuRequest string `json:"cpuRequest,omitempty"` + + // The command to run in the dockerimage component instead of the default one provided in the image. + // + // Defaults to an empty array, meaning use whatever is defined in the image. + // +optional + Command []string `json:"command,omitempty" patchStrategy:"replace"` + + // The arguments to supply to the command running the dockerimage component. The arguments are supplied either to the default command provided in the image or to the overridden command. + // + // Defaults to an empty array, meaning use whatever is defined in the image. + // +optional + Args []string `json:"args,omitempty" patchStrategy:"replace"` + + // Toggles whether or not the project source code should + // be mounted in the component. + // + // Defaults to true for all component types except plugins and components that set `dedicatedPod` to true. + // +optional + MountSources *bool `json:"mountSources,omitempty"` + + // Optional specification of the path in the container where + // project sources should be transferred/mounted when `mountSources` is `true`. + // When omitted, the default value of /projects is used. + // +optional + // +kubebuilder:default=/projects + SourceMapping string `json:"sourceMapping,omitempty"` + + // Specify if a container should run in its own separated pod, + // instead of running as part of the main development environment pod. + // + // Default value is `false` + // +optional + DedicatedPod bool `json:"dedicatedPod,omitempty"` +} + +type EnvVar struct { + Name string `json:"name" yaml:"name"` + Value string `json:"value" yaml:"value"` +} + +// Volume that should be mounted to a component container +type VolumeMount struct { + // The volume mount name is the name of an existing `Volume` component. + // If several containers mount the same volume name + // then they will reuse the same volume and will be able to access to the same files. + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + // +kubebuilder:validation:MaxLength=63 + Name string `json:"name"` + + // The path in the component container where the volume should be mounted. + // If not path is mentioned, default path is the is `/`. + // +optional + Path string `json:"path,omitempty"` +} diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_kubernetes_like.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_kubernetes_like.go new file mode 100644 index 000000000..483735c0f --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_kubernetes_like.go @@ -0,0 +1,45 @@ +package v1alpha2 + +// K8sLikeComponentLocationType describes the type of +// the location the configuration is fetched from. +// Only one of the following component type may be specified. +// +kubebuilder:validation:Enum=Uri;Inlined +type K8sLikeComponentLocationType string + +const ( + UriK8sLikeComponentLocationType K8sLikeComponentLocationType = "Uri" + InlinedK8sLikeComponentLocationType K8sLikeComponentLocationType = "Inlined" +) + +// +union +type K8sLikeComponentLocation struct { + // Type of Kubernetes-like location + // + + // +unionDiscriminator + // +optional + LocationType K8sLikeComponentLocationType `json:"locationType,omitempty"` + + // Location in a file fetched from a uri. + // +optional + Uri string `json:"uri,omitempty"` + + // Inlined manifest + // +optional + Inlined string `json:"inlined,omitempty"` +} + +type K8sLikeComponent struct { + BaseComponent `json:",inline"` + K8sLikeComponentLocation `json:",inline"` + Endpoints []Endpoint `json:"endpoints,omitempty" patchStrategy:"merge" patchMergeKey:"name"` +} + +// Component that allows partly importing Kubernetes resources into the devworkspace POD +type KubernetesComponent struct { + K8sLikeComponent `json:",inline"` +} + +// Component that allows partly importing Openshift resources into the devworkspace POD +type OpenshiftComponent struct { + K8sLikeComponent `json:",inline"` +} diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_plugin.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_plugin.go new file mode 100644 index 000000000..68933dd71 --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_plugin.go @@ -0,0 +1,7 @@ +package v1alpha2 + +type PluginComponent struct { + BaseComponent `json:",inline"` + ImportReference `json:",inline"` + PluginOverrides `json:",inline"` +} diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_volume.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_volume.go new file mode 100644 index 000000000..54d3ecbbb --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_volume.go @@ -0,0 +1,19 @@ +package v1alpha2 + +// Component that allows the developer to declare and configure a volume into their devworkspace +type VolumeComponent struct { + BaseComponent `json:",inline"` + Volume `json:",inline"` +} + +// Volume that should be mounted to a component container +type Volume struct { + // +optional + // Size of the volume + Size string `json:"size,omitempty"` + + // +optional + // Ephemeral volumes are not stored persistently across restarts. Defaults + // to false + Ephemeral bool `json:"ephemeral,omitempty"` +} diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/components.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/components.go new file mode 100644 index 000000000..208499234 --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/components.go @@ -0,0 +1,104 @@ +package v1alpha2 + +import ( + attributes "github.com/devfile/api/v2/pkg/attributes" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// ComponentType describes the type of component. +// Only one of the following component type may be specified. +// +kubebuilder:validation:Enum=Container;Kubernetes;Openshift;Volume;Plugin;Custom +type ComponentType string + +const ( + ContainerComponentType ComponentType = "Container" + KubernetesComponentType ComponentType = "Kubernetes" + OpenshiftComponentType ComponentType = "Openshift" + PluginComponentType ComponentType = "Plugin" + VolumeComponentType ComponentType = "Volume" + CustomComponentType ComponentType = "Custom" +) + +// DevWorkspace component: Anything that will bring additional features / tooling / behaviour / context +// to the devworkspace, in order to make working in it easier. +type BaseComponent struct { +} + +//+k8s:openapi-gen=true +type Component struct { + // Mandatory name that allows referencing the component + // from other elements (such as commands) or from an external + // devfile that may reference this component through a parent or a plugin. + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + // +kubebuilder:validation:MaxLength=63 + Name string `json:"name"` + // Map of implementation-dependant free-form YAML attributes. + // +optional + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless + Attributes attributes.Attributes `json:"attributes,omitempty"` + ComponentUnion `json:",inline"` +} + +// +union +type ComponentUnion struct { + // Type of component + // + // +unionDiscriminator + // +optional + ComponentType ComponentType `json:"componentType,omitempty"` + + // Allows adding and configuring devworkspace-related containers + // +optional + Container *ContainerComponent `json:"container,omitempty"` + + // Allows importing into the devworkspace the Kubernetes resources + // defined in a given manifest. For example this allows reusing the Kubernetes + // definitions used to deploy some runtime components in production. + // + // +optional + Kubernetes *KubernetesComponent `json:"kubernetes,omitempty"` + + // Allows importing into the devworkspace the OpenShift resources + // defined in a given manifest. For example this allows reusing the OpenShift + // definitions used to deploy some runtime components in production. + // + // +optional + Openshift *OpenshiftComponent `json:"openshift,omitempty"` + + // Allows specifying the definition of a volume + // shared by several other components + // +optional + Volume *VolumeComponent `json:"volume,omitempty"` + + // Allows importing a plugin. + // + // Plugins are mainly imported devfiles that contribute components, commands + // and events as a consistent single unit. They are defined in either YAML files + // following the devfile syntax, + // or as `DevWorkspaceTemplate` Kubernetes Custom Resources + // +optional + // +devfile:overrides:include:omitInPlugin=true + Plugin *PluginComponent `json:"plugin,omitempty"` + + // Custom component whose logic is implementation-dependant + // and should be provided by the user + // possibly through some dedicated controller + // +optional + // +devfile:overrides:include:omit=true + Custom *CustomComponent `json:"custom,omitempty"` +} + +type CustomComponent struct { + // Class of component that the associated implementation controller + // should use to process this command with the appropriate logic + ComponentClass string `json:"componentClass"` + + // Additional free-form configuration for this custom component + // that the implementation controller will know how to use + // + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:EmbeddedResource + EmbeddedResource runtime.RawExtension `json:"embeddedResource"` +} diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devfile.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devfile.go new file mode 100644 index 000000000..050399a40 --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devfile.go @@ -0,0 +1,14 @@ +package v1alpha2 + +import ( + "github.com/devfile/api/v2/pkg/devfile" +) + +// Devfile describes the structure of a cloud-native devworkspace and development environment. +// +k8s:deepcopy-gen=false +// +devfile:jsonschema:generate:omitCustomUnionMembers=true,omitPluginUnionMembers=true +type Devfile struct { + devfile.DevfileHeader `json:",inline"` + + DevWorkspaceTemplateSpec `json:",inline"` +} diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspace_conversion.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspace_conversion.go new file mode 100644 index 000000000..03731eb1e --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspace_conversion.go @@ -0,0 +1,4 @@ +package v1alpha2 + +// Hub marks this type as a conversion hub. +func (*DevWorkspace) Hub() {} diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspace_types.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspace_types.go new file mode 100644 index 000000000..c7fe603c5 --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspace_types.go @@ -0,0 +1,97 @@ +package v1alpha2 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// DevWorkspaceSpec defines the desired state of DevWorkspace +type DevWorkspaceSpec struct { + Started bool `json:"started"` + RoutingClass string `json:"routingClass,omitempty"` + Template DevWorkspaceTemplateSpec `json:"template,omitempty"` +} + +// DevWorkspaceStatus defines the observed state of DevWorkspace +type DevWorkspaceStatus struct { + // Id of the DevWorkspace + DevWorkspaceId string `json:"devworkspaceId"` + // Main URL for this DevWorkspace + MainUrl string `json:"mainUrl,omitempty"` + Phase DevWorkspacePhase `json:"phase,omitempty"` + // Conditions represent the latest available observations of an object's state + Conditions []DevWorkspaceCondition `json:"conditions,omitempty"` + // Message is a short user-readable message giving additional information + // about an object's state + Message string `json:"message,omitempty"` +} + +type DevWorkspacePhase string + +// Valid devworkspace Statuses +const ( + DevWorkspaceStatusStarting DevWorkspacePhase = "Starting" + DevWorkspaceStatusRunning DevWorkspacePhase = "Running" + DevWorkspaceStatusStopped DevWorkspacePhase = "Stopped" + DevWorkspaceStatusStopping DevWorkspacePhase = "Stopping" + DevWorkspaceStatusFailed DevWorkspacePhase = "Failed" + DevWorkspaceStatusError DevWorkspacePhase = "Error" +) + +// DevWorkspaceCondition contains details for the current condition of this devworkspace. +type DevWorkspaceCondition struct { + // Type is the type of the condition. + Type DevWorkspaceConditionType `json:"type"` + // Phase is the status of the condition. + // Can be True, False, Unknown. + Status corev1.ConditionStatus `json:"status"` + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + // Unique, one-word, CamelCase reason for the condition's last transition. + Reason string `json:"reason,omitempty"` + // Human-readable message indicating details about last transition. + Message string `json:"message,omitempty"` +} + +// Types of conditions reported by devworkspace +type DevWorkspaceConditionType string + +const ( + DevWorkspaceComponentsReady DevWorkspaceConditionType = "ComponentsReady" + DevWorkspaceRoutingReady DevWorkspaceConditionType = "RoutingReady" + DevWorkspaceServiceAccountReady DevWorkspaceConditionType = "ServiceAccountReady" + DevWorkspaceReady DevWorkspaceConditionType = "Ready" + DevWorkspaceFailedStart DevWorkspaceConditionType = "FailedStart" + DevWorkspaceError DevWorkspaceConditionType = "Error" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DevWorkspace is the Schema for the devworkspaces API +// +kubebuilder:subresource:status +// +kubebuilder:resource:path=devworkspaces,scope=Namespaced,shortName=dw +// +kubebuilder:printcolumn:name="DevWorkspace ID",type="string",JSONPath=".status.devworkspaceId",description="The devworkspace's unique id" +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="The current devworkspace startup phase" +// +kubebuilder:printcolumn:name="Info",type="string",JSONPath=".status.message",description="Additional information about the devworkspace" +// +devfile:jsonschema:generate +// +kubebuilder:storageversion +type DevWorkspace struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DevWorkspaceSpec `json:"spec,omitempty"` + Status DevWorkspaceStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DevWorkspaceList contains a list of DevWorkspace +type DevWorkspaceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DevWorkspace `json:"items"` +} + +func init() { + SchemeBuilder.Register(&DevWorkspace{}, &DevWorkspaceList{}) +} diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspacetemplate_conversion.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspacetemplate_conversion.go new file mode 100644 index 000000000..fe889a1b4 --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspacetemplate_conversion.go @@ -0,0 +1,4 @@ +package v1alpha2 + +// Hub marks this type as a conversion hub. +func (*DevWorkspaceTemplate) Hub() {} diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspacetemplate_spec.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspacetemplate_spec.go new file mode 100644 index 000000000..4f58d3de8 --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspacetemplate_spec.go @@ -0,0 +1,79 @@ +package v1alpha2 + +import attributes "github.com/devfile/api/v2/pkg/attributes" + +// Structure of the devworkspace. This is also the specification of a devworkspace template. +// +devfile:jsonschema:generate +type DevWorkspaceTemplateSpec struct { + // Parent devworkspace template + // +optional + Parent *Parent `json:"parent,omitempty"` + + DevWorkspaceTemplateSpecContent `json:",inline"` +} + +// +devfile:overrides:generate +type DevWorkspaceTemplateSpecContent struct { + // Map of key-value variables used for string replacement in the devfile. Values can can be referenced via {{variable-key}} + // to replace the corresponding value in string fields in the devfile. Replacement cannot be used for + // + // - schemaVersion, metadata, parent source + // + // - element identifiers, e.g. command id, component name, endpoint name, project name + // + // - references to identifiers, e.g. in events, a command's component, container's volume mount name + // + // - string enums, e.g. command group kind, endpoint exposure + // +optional + // +patchStrategy=merge + // +devfile:overrides:include:omitInPlugin=true,description=Overrides of variables encapsulated in a parent devfile. + Variables map[string]string `json:"variables,omitempty" patchStrategy:"merge"` + + // Map of implementation-dependant free-form YAML attributes. + // +optional + // +patchStrategy=merge + // +devfile:overrides:include:omitInPlugin=true,description=Overrides of attributes encapsulated in a parent devfile. + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless + Attributes attributes.Attributes `json:"attributes,omitempty" patchStrategy:"merge"` + + // List of the devworkspace components, such as editor and plugins, + // user-provided containers, or other types of components + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // +devfile:overrides:include:description=Overrides of components encapsulated in a parent devfile or a plugin. + // +devfile:toplevellist + Components []Component `json:"components,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + + // Projects worked on in the devworkspace, containing names and sources locations + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // +devfile:overrides:include:omitInPlugin=true,description=Overrides of projects encapsulated in a parent devfile. + // +devfile:toplevellist + Projects []Project `json:"projects,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + + // StarterProjects is a project that can be used as a starting point when bootstrapping new projects + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // +devfile:overrides:include:omitInPlugin=true,description=Overrides of starterProjects encapsulated in a parent devfile. + // +devfile:toplevellist + StarterProjects []StarterProject `json:"starterProjects,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + + // Predefined, ready-to-use, devworkspace-related commands + // +optional + // +patchMergeKey=id + // +patchStrategy=merge + // +devfile:overrides:include:description=Overrides of commands encapsulated in a parent devfile or a plugin. + // +devfile:toplevellist + Commands []Command `json:"commands,omitempty" patchStrategy:"merge" patchMergeKey:"id"` + + // Bindings of commands to events. + // Each command is referred-to by its name. + // +optional + // +devfile:overrides:include:omit=true + Events *Events `json:"events,omitempty"` +} diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspacetemplate_types.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspacetemplate_types.go new file mode 100644 index 000000000..e7c31813e --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspacetemplate_types.go @@ -0,0 +1,31 @@ +package v1alpha2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DevWorkspaceTemplate is the Schema for the devworkspacetemplates API +// +kubebuilder:resource:path=devworkspacetemplates,scope=Namespaced,shortName=dwt +// +devfile:jsonschema:generate +// +kubebuilder:storageversion +type DevWorkspaceTemplate struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DevWorkspaceTemplateSpec `json:"spec,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DevWorkspaceTemplateList contains a list of DevWorkspaceTemplate +type DevWorkspaceTemplateList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DevWorkspaceTemplate `json:"items"` +} + +func init() { + SchemeBuilder.Register(&DevWorkspaceTemplate{}, &DevWorkspaceTemplateList{}) +} diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/doc.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/doc.go new file mode 100644 index 000000000..c16dcf897 --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/doc.go @@ -0,0 +1,6 @@ +// Package v1alpha2 contains API Schema definitions for the org v1alpha2 API group +// +k8s:deepcopy-gen=package,register +// +k8s:openapi-gen=true +// +groupName=workspace.devfile.io +// +devfile:jsonschema:version=2.2.0-alpha +package v1alpha2 diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/endpoint.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/endpoint.go new file mode 100644 index 000000000..c12e1262a --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/endpoint.go @@ -0,0 +1,115 @@ +package v1alpha2 + +import ( + attributes "github.com/devfile/api/v2/pkg/attributes" +) + +// EndpointProtocol defines the application and transport protocols of the traffic that will go through this endpoint. +// Only one of the following protocols may be specified: http, ws, tcp, udp. +// +kubebuilder:validation:Enum=http;https;ws;wss;tcp;udp +type EndpointProtocol string + +const ( + // Endpoint will have `http` traffic, typically on a TCP connection. + // It will be automaticaly promoted to `https` when the `secure` field is set to `true` + HTTPEndpointProtocol EndpointProtocol = "http" + // Endpoint will have `https` traffic, typically on a TCP connection + HTTPSEndpointProtocol EndpointProtocol = "https" + // Endpoint will have `ws` traffic, typically on a TCP connection + // It will be automaticaly promoted to `wss` when the `secure` field is set to `true` + WSEndpointProtocol EndpointProtocol = "ws" + // Endpoint will have `wss` traffic, typically on a TCP connection + WSSEndpointProtocol EndpointProtocol = "wss" + // Endpoint will have traffic on a TCP connection, + // without specifying an application protocol + TCPEndpointProtocol EndpointProtocol = "tcp" + // Endpoint will have traffic on an UDP connection, + // without specifying an application protocol + UDPEndpointProtocol EndpointProtocol = "udp" +) + +// EndpointExposure describes the way an endpoint is exposed on the network. +// Only one of the following exposures may be specified: public, internal, none. +// +kubebuilder:validation:Enum=public;internal;none +type EndpointExposure string + +const ( + // Endpoint will be exposed on the public network, typically through + // a K8S ingress or an OpenShift route + PublicEndpointExposure EndpointExposure = "public" + // Endpoint will be exposed internally outside of the main devworkspace POD, + // typically by K8S services, to be consumed by other elements running + // on the same cloud internal network. + InternalEndpointExposure EndpointExposure = "internal" + // Endpoint will not be exposed and will only be accessible + // inside the main devworkspace POD, on a local address. + NoneEndpointExposure EndpointExposure = "none" +) + +type Endpoint struct { + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + // +kubebuilder:validation:MaxLength=63 + Name string `json:"name"` + + TargetPort int `json:"targetPort"` + + // Describes how the endpoint should be exposed on the network. + // + // - `public` means that the endpoint will be exposed on the public network, typically through + // a K8S ingress or an OpenShift route. + // + // - `internal` means that the endpoint will be exposed internally outside of the main devworkspace POD, + // typically by K8S services, to be consumed by other elements running + // on the same cloud internal network. + // + // - `none` means that the endpoint will not be exposed and will only be accessible + // inside the main devworkspace POD, on a local address. + // + // Default value is `public` + // +optional + // +kubebuilder:default=public + Exposure EndpointExposure `json:"exposure,omitempty"` + + // Describes the application and transport protocols of the traffic that will go through this endpoint. + // + // - `http`: Endpoint will have `http` traffic, typically on a TCP connection. + // It will be automaticaly promoted to `https` when the `secure` field is set to `true`. + // + // - `https`: Endpoint will have `https` traffic, typically on a TCP connection. + // + // - `ws`: Endpoint will have `ws` traffic, typically on a TCP connection. + // It will be automaticaly promoted to `wss` when the `secure` field is set to `true`. + // + // - `wss`: Endpoint will have `wss` traffic, typically on a TCP connection. + // + // - `tcp`: Endpoint will have traffic on a TCP connection, without specifying an application protocol. + // + // - `udp`: Endpoint will have traffic on an UDP connection, without specifying an application protocol. + // + // Default value is `http` + // +optional + // +kubebuilder:default=http + Protocol EndpointProtocol `json:"protocol,omitempty"` + + // Describes whether the endpoint should be secured and protected by some + // authentication process. This requires a protocol of `https` or `wss`. + // +optional + Secure bool `json:"secure,omitempty"` + + // Path of the endpoint URL + // +optional + Path string `json:"path,omitempty"` + + // Map of implementation-dependant string-based free-form attributes. + // + // Examples of Che-specific attributes: + // + // - cookiesAuthEnabled: "true" / "false", + // + // - type: "terminal" / "ide" / "ide-dev", + // +optional + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless + Attributes attributes.Attributes `json:"attributes,omitempty"` +} diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/events.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/events.go new file mode 100644 index 000000000..2a8bd91a5 --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/events.go @@ -0,0 +1,26 @@ +package v1alpha2 + +type Events struct { + DevWorkspaceEvents `json:",inline"` +} + +type DevWorkspaceEvents struct { + // IDs of commands that should be executed before the devworkspace start. + // Kubernetes-wise, these commands would typically be executed in init containers of the devworkspace POD. + // +optional + PreStart []string `json:"preStart,omitempty"` + + // IDs of commands that should be executed after the devworkspace is completely started. + // In the case of Che-Theia, these commands should be executed after all plugins and extensions have started, including project cloning. + // This means that those commands are not triggered until the user opens the IDE in his browser. + // +optional + PostStart []string `json:"postStart,omitempty"` + + // +optional + // IDs of commands that should be executed before stopping the devworkspace. + PreStop []string `json:"preStop,omitempty"` + + // +optional + // IDs of commands that should be executed after stopping the devworkspace. + PostStop []string `json:"postStop,omitempty"` +} diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/import_reference.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/import_reference.go new file mode 100644 index 000000000..d422cdca2 --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/import_reference.go @@ -0,0 +1,53 @@ +package v1alpha2 + +// ImportReferenceType describes the type of location +// from where the referenced template structure should be retrieved. +// Only one of the following parent locations may be specified. +// +kubebuilder:validation:Enum=Uri;Id;Kubernetes +type ImportReferenceType string + +const ( + UriImportReferenceType ImportReferenceType = "Uri" + IdImportReferenceType ImportReferenceType = "Id" + KubernetesImportReferenceType ImportReferenceType = "Kubernetes" +) + +// Location from where the an import reference is retrieved +// +union +type ImportReferenceUnion struct { + // type of location from where the referenced template structure should be retrieved + // + + // +unionDiscriminator + // +optional + ImportReferenceType ImportReferenceType `json:"importReferenceType,omitempty"` + + // URI Reference of a parent devfile YAML file. + // It can be a full URL or a relative URI with the current devfile as the base URI. + // +optional + Uri string `json:"uri,omitempty"` + + // Id in a registry that contains a Devfile yaml file + // +optional + Id string `json:"id,omitempty"` + + // Reference to a Kubernetes CRD of type DevWorkspaceTemplate + // +optional + Kubernetes *KubernetesCustomResourceImportReference `json:"kubernetes,omitempty"` +} + +type KubernetesCustomResourceImportReference struct { + Name string `json:"name"` + + // +optional + Namespace string `json:"namespace,omitempty"` +} + +type ImportReference struct { + ImportReferenceUnion `json:",inline"` + + // Registry URL to pull the parent devfile from when using id in the parent reference. + // To ensure the parent devfile gets resolved consistently in different environments, + // it is recommended to always specify the `regsitryURL` when `Id` is used. + // +optional + RegistryUrl string `json:"registryUrl,omitempty"` +} diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/keyed.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/keyed.go new file mode 100644 index 000000000..81182efc7 --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/keyed.go @@ -0,0 +1,51 @@ +package v1alpha2 + +// Keyed is expected to be implemented by the elements of the devfile top-level lists +// (such as Command, Component, Project, ...). +// +// The Keys of list objects will typically be used to merge the top-level lists +// according to strategic merge patch rules, during parent or plugin overriding. +// +k8s:deepcopy-gen=false +type Keyed interface { + // Key is a string that allows uniquely identifying the object, + // especially in the Devfile top-level lists that are map-like K8S-compatible lists. + Key() string +} + +// KeyedList is a list of object that are uniquely identified by a Key +// The devfile top-level list (such as Commands, Components, Projects, ...) +// are examples of such lists of Keyed objects +// +k8s:deepcopy-gen=false +type KeyedList []Keyed + +// GetKeys converts a KeyedList into a slice of string by calling Key() on each +// element in the list. +func (l KeyedList) GetKeys() []string { + var res []string + for _, keyed := range l { + res = append(res, keyed.Key()) + } + return res +} + +// TopLevelLists is a map that contains several Devfile top-level lists +// (such as `Commands`, `Components`, `Projects`, ...), available as `KeyedList`s. +// +// Each key of this map is the name of the field that contains the given top-level list: +// `Commands`, `Components`, etc... +// +k8s:deepcopy-gen=false +type TopLevelLists map[string]KeyedList + +// TopLevelListContainer is an interface that allows retrieving the devfile top-level lists +// from an object. +// Main implementor of this interface will be the `DevWorkspaceTemplateSpecContent`, which +// will return all its devfile top-level lists. +// +// But this will also be implemented by `Overrides` which may return less top-level lists +// the `DevWorkspaceTemplateSpecContent`, according to the top-level lists they can override. +// `PluginOverride` will not return the `Projects` and `StarterProjects` list, since plugins are +// not expected to override `projects` or `starterProjects` +// +k8s:deepcopy-gen=false +type TopLevelListContainer interface { + GetToplevelLists() TopLevelLists +} diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/keyed_implementations.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/keyed_implementations.go new file mode 100644 index 000000000..5121f4ff1 --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/keyed_implementations.go @@ -0,0 +1,41 @@ +package v1alpha2 + +import ( + "fmt" + "reflect" +) + +func extractKeys(keyedList interface{}) []Keyed { + value := reflect.ValueOf(keyedList) + keys := make([]Keyed, 0, value.Len()) + for i := 0; i < value.Len(); i++ { + elem := value.Index(i) + if elem.CanInterface() { + i := elem.Interface() + if keyed, ok := i.(Keyed); ok { + keys = append(keys, keyed) + } + } + } + return keys +} + +// CheckDuplicateKeys checks if duplicate keys are present in the devfile objects +func CheckDuplicateKeys(keyedList interface{}) error { + seen := map[string]bool{} + value := reflect.ValueOf(keyedList) + for i := 0; i < value.Len(); i++ { + elem := value.Index(i) + if elem.CanInterface() { + i := elem.Interface() + if keyed, ok := i.(Keyed); ok { + key := keyed.Key() + if seen[key] { + return fmt.Errorf("duplicate key: %s", key) + } + seen[key] = true + } + } + } + return nil +} diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/override_directives.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/override_directives.go new file mode 100644 index 000000000..280ea367c --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/override_directives.go @@ -0,0 +1,58 @@ +package v1alpha2 + +// +kubebuilder:validation:Enum=replace;delete +type OverridingPatchDirective string + +const ( + ReplaceOverridingDirective OverridingPatchDirective = "replace" + DeleteOverridingDirective OverridingPatchDirective = "delete" +) + +const ( + DeleteFromPrimitiveListOverridingPatchDirective OverridingPatchDirective = "replace" +) + +type OverrideDirective struct { + // Path of the element the directive should be applied on + // + // For the following path tree: + // + // ```json + // commands: + // - exec + // id: commandId + // ``` + // + // the path would be: `commands["commandId"]`. + Path string `json:"path"` + + // `$Patch` directlive as defined in + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-api-machinery/strategic-merge-patch.md#basic-patch-format + // + // This is an enumeration that allows the following values: + // + // - *replace*: indicates that the element matched by the `jsonPath` field should be replaced instead of being merged. + // + // - *delete*: indicates that the element matched by the `jsonPath` field should be deleted. + // + // +optional + Patch OverridingPatchDirective `json:"patch,omitempty"` + + // `DeleteFromPrimitiveList` directive as defined in + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-api-machinery/strategic-merge-patch.md#deletefromprimitivelist-directive + // + // This indicates that the elements in this list should be deleted from the original primitive list. + // The original primitive list is the element matched by the `jsonPath` field. + // +optional + DeleteFromPrimitiveList []string `json:"deleteFromPrimitiveList,omitempty"` + + // `SetElementOrder` directive as defined in + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-api-machinery/strategic-merge-patch.md#deletefromprimitivelist-directive + // + // This provides a way to specify the order of a list. The relative order specified in this directive will be retained. + // The list whose order is controller is the element matched by the `jsonPath` field. + // If the controller list is a list of objects, then the values in this list should be + // the merge keys of the objects to order. + // +optional + SetElementOrder []string `json:"setElementOrder,omitempty"` +} diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/overrides.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/overrides.go new file mode 100644 index 000000000..69ad5b6e3 --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/overrides.go @@ -0,0 +1,11 @@ +package v1alpha2 + +// +k8s:deepcopy-gen=false +type Overrides interface { + TopLevelListContainer + isOverride() +} + +// OverridesBase is used in the Overrides generator in order to provide a common base for the generated Overrides +// So please be careful when renaming +type OverridesBase struct{} diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/parent.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/parent.go new file mode 100644 index 000000000..69ebaa6b2 --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/parent.go @@ -0,0 +1,6 @@ +package v1alpha2 + +type Parent struct { + ImportReference `json:",inline"` + ParentOverrides `json:",inline"` +} diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/projects.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/projects.go new file mode 100644 index 000000000..a6c526946 --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/projects.go @@ -0,0 +1,128 @@ +package v1alpha2 + +import ( + attributes "github.com/devfile/api/v2/pkg/attributes" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +type Project struct { + // Project name + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + // +kubebuilder:validation:MaxLength=63 + Name string `json:"name"` + + // Map of implementation-dependant free-form YAML attributes. + // +optional + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless + Attributes attributes.Attributes `json:"attributes,omitempty"` + + // Path relative to the root of the projects to which this project should be cloned into. This is a unix-style relative path (i.e. uses forward slashes). The path is invalid if it is absolute or tries to escape the project root through the usage of '..'. If not specified, defaults to the project name. + // +optional + ClonePath string `json:"clonePath,omitempty"` + + ProjectSource `json:",inline"` +} + +type StarterProject struct { + // Project name + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + // +kubebuilder:validation:MaxLength=63 + Name string `json:"name"` + + // Map of implementation-dependant free-form YAML attributes. + // +optional + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless + Attributes attributes.Attributes `json:"attributes,omitempty"` + + // Description of a starter project + // +optional + Description string `json:"description,omitempty"` + + // Sub-directory from a starter project to be used as root for starter project. + // +optional + SubDir string `json:"subDir,omitempty"` + + ProjectSource `json:",inline"` +} + +// ProjectSourceType describes the type of Project sources. +// Only one of the following project sources may be specified. +// If none of the following policies is specified, the default one +// is AllowConcurrent. +// +kubebuilder:validation:Enum=Git;Zip;Custom +type ProjectSourceType string + +const ( + GitProjectSourceType ProjectSourceType = "Git" + ZipProjectSourceType ProjectSourceType = "Zip" + CustomProjectSourceType ProjectSourceType = "Custom" +) + +// +union +type ProjectSource struct { + // Type of project source + // + + // +unionDiscriminator + // +optional + SourceType ProjectSourceType `json:"sourceType,omitempty"` + + // Project's Git source + // +optional + Git *GitProjectSource `json:"git,omitempty"` + + // Project's Zip source + // +optional + Zip *ZipProjectSource `json:"zip,omitempty"` + + // Project's Custom source + // +optional + // +devfile:overrides:include:omit=true + Custom *CustomProjectSource `json:"custom,omitempty"` +} + +type CommonProjectSource struct { +} + +type CustomProjectSource struct { + ProjectSourceClass string `json:"projectSourceClass"` + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:EmbeddedResource + EmbeddedResource runtime.RawExtension `json:"embeddedResource"` +} + +type ZipProjectSource struct { + CommonProjectSource `json:",inline"` + + // Zip project's source location address. Should be file path of the archive, e.g. file://$FILE_PATH + // +required + Location string `json:"location,omitempty"` +} + +type GitLikeProjectSource struct { + CommonProjectSource `json:",inline"` + + // Defines from what the project should be checked out. Required if there are more than one remote configured + // +optional + CheckoutFrom *CheckoutFrom `json:"checkoutFrom,omitempty"` + + // The remotes map which should be initialized in the git project. Must have at least one remote configured + Remotes map[string]string `json:"remotes"` +} + +type CheckoutFrom struct { + // The revision to checkout from. Should be branch name, tag or commit id. + // Default branch is used if missing or specified revision is not found. + // +optional + Revision string `json:"revision,omitempty"` + // The remote name should be used as init. Required if there are more than one remote configured + // +optional + Remote string `json:"remote,omitempty"` +} + +type GitProjectSource struct { + GitLikeProjectSource `json:",inline"` +} diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/register.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/register.go new file mode 100644 index 000000000..e83c8de58 --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/register.go @@ -0,0 +1,22 @@ +// NOTE: Boilerplate only. Ignore this file. + +// Package v1alpha2 contains API Schema definitions for the org v1alpha2 API group +// +k8s:deepcopy-gen=package,register +// +groupName=workspace.devfile.io +package v1alpha2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{Group: "workspace.devfile.io", Version: "v1alpha2"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/union.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/union.go new file mode 100644 index 000000000..34fb138c9 --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/union.go @@ -0,0 +1,21 @@ +package v1alpha2 + +// +k8s:deepcopy-gen=false + +// Union is an interface that allows managing structs defined as +// Kubernetes unions with discriminators, according to the following KEP: +// https://github.com/kubernetes/enhancements/blob/master/keps/sig-api-machinery/20190325-unions.md +type Union interface { + discriminator() *string + + // Normalize allows normalizing the union, according to the following rules: + // - When only one field of the union is set and no discriminator is set, set the discriminator according to the union value. + // - When several fields are set and a discrimnator is set, remove (== reset to zero value) all the values that do not match the discriminator. + // - When only one union value is set and it matches discriminator, just do nothing. + // - In other case, something is inconsistent or ambiguous: an error is thrown. + Normalize() error + + // Simplify allows removing the union discriminator, + // but only after normalizing it if necessary. + Simplify() +} diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/union_implementation.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/union_implementation.go new file mode 100644 index 000000000..d82ba4f26 --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/union_implementation.go @@ -0,0 +1,103 @@ +package v1alpha2 + +import ( + "errors" + "reflect" +) + +func visitUnion(union interface{}, visitor interface{}) (err error) { + visitorValue := reflect.ValueOf(visitor) + unionValue := reflect.ValueOf(union) + oneMemberPresent := false + typeOfVisitor := visitorValue.Type() + for i := 0; i < visitorValue.NumField(); i++ { + unionMemberToRead := typeOfVisitor.Field(i).Name + unionMember := unionValue.FieldByName(unionMemberToRead) + if !unionMember.IsZero() { + if oneMemberPresent { + err = errors.New("Only one element should be set in union: " + unionValue.Type().Name()) + return + } + oneMemberPresent = true + visitorFunction := visitorValue.Field(i) + if visitorFunction.IsNil() { + return + } + results := visitorFunction.Call([]reflect.Value{unionMember}) + if !results[0].IsNil() { + err = results[0].Interface().(error) + } + return + } + } + return +} + +func simplifyUnion(union Union, visitorType reflect.Type) { + normalizeUnion(union, visitorType) + *union.discriminator() = "" +} + +func normalizeUnion(union Union, visitorType reflect.Type) error { + err := updateDiscriminator(union, visitorType) + if err != nil { + return err + } + + err = cleanupValues(union, visitorType) + if err != nil { + return err + } + return nil +} + +func updateDiscriminator(union Union, visitorType reflect.Type) error { + unionValue := reflect.ValueOf(union) + + if union.discriminator() == nil { + return errors.New("Discriminator should not be 'nil' in union: " + unionValue.Type().Name()) + } + + if *union.discriminator() != "" { + // Nothing to do + return nil + } + + oneMemberPresent := false + for i := 0; i < visitorType.NumField(); i++ { + unionMemberToRead := visitorType.Field(i).Name + unionMember := unionValue.Elem().FieldByName(unionMemberToRead) + if !unionMember.IsZero() { + if oneMemberPresent { + return errors.New("Discriminator cannot be deduced from 2 values in union: " + unionValue.Type().Name()) + } + oneMemberPresent = true + *(union.discriminator()) = unionMemberToRead + } + } + return nil +} + +func cleanupValues(union Union, visitorType reflect.Type) error { + unionValue := reflect.ValueOf(union) + + if union.discriminator() == nil { + return errors.New("Discriminator should not be 'nil' in union: " + unionValue.Type().Name()) + } + + if *union.discriminator() == "" { + // Nothing to do + return errors.New("Values cannot be cleaned up without a discriminator in union: " + unionValue.Type().Name()) + } + + for i := 0; i < visitorType.NumField(); i++ { + unionMemberToRead := visitorType.Field(i).Name + unionMember := unionValue.Elem().FieldByName(unionMemberToRead) + if !unionMember.IsZero() { + if unionMemberToRead != *union.discriminator() { + unionMember.Set(reflect.Zero(unionMember.Type())) + } + } + } + return nil +} diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.deepcopy.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.deepcopy.go new file mode 100644 index 000000000..80d4e331c --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.deepcopy.go @@ -0,0 +1,2855 @@ +// +build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + "github.com/devfile/api/v2/pkg/attributes" + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplyCommand) DeepCopyInto(out *ApplyCommand) { + *out = *in + in.LabeledCommand.DeepCopyInto(&out.LabeledCommand) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplyCommand. +func (in *ApplyCommand) DeepCopy() *ApplyCommand { + if in == nil { + return nil + } + out := new(ApplyCommand) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplyCommandParentOverride) DeepCopyInto(out *ApplyCommandParentOverride) { + *out = *in + in.LabeledCommandParentOverride.DeepCopyInto(&out.LabeledCommandParentOverride) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplyCommandParentOverride. +func (in *ApplyCommandParentOverride) DeepCopy() *ApplyCommandParentOverride { + if in == nil { + return nil + } + out := new(ApplyCommandParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplyCommandPluginOverride) DeepCopyInto(out *ApplyCommandPluginOverride) { + *out = *in + in.LabeledCommandPluginOverride.DeepCopyInto(&out.LabeledCommandPluginOverride) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplyCommandPluginOverride. +func (in *ApplyCommandPluginOverride) DeepCopy() *ApplyCommandPluginOverride { + if in == nil { + return nil + } + out := new(ApplyCommandPluginOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplyCommandPluginOverrideParentOverride) DeepCopyInto(out *ApplyCommandPluginOverrideParentOverride) { + *out = *in + in.LabeledCommandPluginOverrideParentOverride.DeepCopyInto(&out.LabeledCommandPluginOverrideParentOverride) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplyCommandPluginOverrideParentOverride. +func (in *ApplyCommandPluginOverrideParentOverride) DeepCopy() *ApplyCommandPluginOverrideParentOverride { + if in == nil { + return nil + } + out := new(ApplyCommandPluginOverrideParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BaseCommand) DeepCopyInto(out *BaseCommand) { + *out = *in + if in.Group != nil { + in, out := &in.Group, &out.Group + *out = new(CommandGroup) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaseCommand. +func (in *BaseCommand) DeepCopy() *BaseCommand { + if in == nil { + return nil + } + out := new(BaseCommand) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BaseCommandParentOverride) DeepCopyInto(out *BaseCommandParentOverride) { + *out = *in + if in.Group != nil { + in, out := &in.Group, &out.Group + *out = new(CommandGroupParentOverride) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaseCommandParentOverride. +func (in *BaseCommandParentOverride) DeepCopy() *BaseCommandParentOverride { + if in == nil { + return nil + } + out := new(BaseCommandParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BaseCommandPluginOverride) DeepCopyInto(out *BaseCommandPluginOverride) { + *out = *in + if in.Group != nil { + in, out := &in.Group, &out.Group + *out = new(CommandGroupPluginOverride) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaseCommandPluginOverride. +func (in *BaseCommandPluginOverride) DeepCopy() *BaseCommandPluginOverride { + if in == nil { + return nil + } + out := new(BaseCommandPluginOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BaseCommandPluginOverrideParentOverride) DeepCopyInto(out *BaseCommandPluginOverrideParentOverride) { + *out = *in + if in.Group != nil { + in, out := &in.Group, &out.Group + *out = new(CommandGroupPluginOverrideParentOverride) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaseCommandPluginOverrideParentOverride. +func (in *BaseCommandPluginOverrideParentOverride) DeepCopy() *BaseCommandPluginOverrideParentOverride { + if in == nil { + return nil + } + out := new(BaseCommandPluginOverrideParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BaseComponent) DeepCopyInto(out *BaseComponent) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaseComponent. +func (in *BaseComponent) DeepCopy() *BaseComponent { + if in == nil { + return nil + } + out := new(BaseComponent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BaseComponentParentOverride) DeepCopyInto(out *BaseComponentParentOverride) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaseComponentParentOverride. +func (in *BaseComponentParentOverride) DeepCopy() *BaseComponentParentOverride { + if in == nil { + return nil + } + out := new(BaseComponentParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BaseComponentPluginOverride) DeepCopyInto(out *BaseComponentPluginOverride) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaseComponentPluginOverride. +func (in *BaseComponentPluginOverride) DeepCopy() *BaseComponentPluginOverride { + if in == nil { + return nil + } + out := new(BaseComponentPluginOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BaseComponentPluginOverrideParentOverride) DeepCopyInto(out *BaseComponentPluginOverrideParentOverride) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaseComponentPluginOverrideParentOverride. +func (in *BaseComponentPluginOverrideParentOverride) DeepCopy() *BaseComponentPluginOverrideParentOverride { + if in == nil { + return nil + } + out := new(BaseComponentPluginOverrideParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CheckoutFrom) DeepCopyInto(out *CheckoutFrom) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CheckoutFrom. +func (in *CheckoutFrom) DeepCopy() *CheckoutFrom { + if in == nil { + return nil + } + out := new(CheckoutFrom) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CheckoutFromParentOverride) DeepCopyInto(out *CheckoutFromParentOverride) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CheckoutFromParentOverride. +func (in *CheckoutFromParentOverride) DeepCopy() *CheckoutFromParentOverride { + if in == nil { + return nil + } + out := new(CheckoutFromParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Command) DeepCopyInto(out *Command) { + *out = *in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(attributes.Attributes, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + in.CommandUnion.DeepCopyInto(&out.CommandUnion) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Command. +func (in *Command) DeepCopy() *Command { + if in == nil { + return nil + } + out := new(Command) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommandGroup) DeepCopyInto(out *CommandGroup) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommandGroup. +func (in *CommandGroup) DeepCopy() *CommandGroup { + if in == nil { + return nil + } + out := new(CommandGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommandGroupParentOverride) DeepCopyInto(out *CommandGroupParentOverride) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommandGroupParentOverride. +func (in *CommandGroupParentOverride) DeepCopy() *CommandGroupParentOverride { + if in == nil { + return nil + } + out := new(CommandGroupParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommandGroupPluginOverride) DeepCopyInto(out *CommandGroupPluginOverride) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommandGroupPluginOverride. +func (in *CommandGroupPluginOverride) DeepCopy() *CommandGroupPluginOverride { + if in == nil { + return nil + } + out := new(CommandGroupPluginOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommandGroupPluginOverrideParentOverride) DeepCopyInto(out *CommandGroupPluginOverrideParentOverride) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommandGroupPluginOverrideParentOverride. +func (in *CommandGroupPluginOverrideParentOverride) DeepCopy() *CommandGroupPluginOverrideParentOverride { + if in == nil { + return nil + } + out := new(CommandGroupPluginOverrideParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommandParentOverride) DeepCopyInto(out *CommandParentOverride) { + *out = *in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(attributes.Attributes, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + in.CommandUnionParentOverride.DeepCopyInto(&out.CommandUnionParentOverride) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommandParentOverride. +func (in *CommandParentOverride) DeepCopy() *CommandParentOverride { + if in == nil { + return nil + } + out := new(CommandParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommandPluginOverride) DeepCopyInto(out *CommandPluginOverride) { + *out = *in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(attributes.Attributes, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + in.CommandUnionPluginOverride.DeepCopyInto(&out.CommandUnionPluginOverride) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommandPluginOverride. +func (in *CommandPluginOverride) DeepCopy() *CommandPluginOverride { + if in == nil { + return nil + } + out := new(CommandPluginOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommandPluginOverrideParentOverride) DeepCopyInto(out *CommandPluginOverrideParentOverride) { + *out = *in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(attributes.Attributes, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + in.CommandUnionPluginOverrideParentOverride.DeepCopyInto(&out.CommandUnionPluginOverrideParentOverride) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommandPluginOverrideParentOverride. +func (in *CommandPluginOverrideParentOverride) DeepCopy() *CommandPluginOverrideParentOverride { + if in == nil { + return nil + } + out := new(CommandPluginOverrideParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommandUnion) DeepCopyInto(out *CommandUnion) { + *out = *in + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + *out = new(ExecCommand) + (*in).DeepCopyInto(*out) + } + if in.Apply != nil { + in, out := &in.Apply, &out.Apply + *out = new(ApplyCommand) + (*in).DeepCopyInto(*out) + } + if in.Composite != nil { + in, out := &in.Composite, &out.Composite + *out = new(CompositeCommand) + (*in).DeepCopyInto(*out) + } + if in.Custom != nil { + in, out := &in.Custom, &out.Custom + *out = new(CustomCommand) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommandUnion. +func (in *CommandUnion) DeepCopy() *CommandUnion { + if in == nil { + return nil + } + out := new(CommandUnion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommandUnionParentOverride) DeepCopyInto(out *CommandUnionParentOverride) { + *out = *in + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + *out = new(ExecCommandParentOverride) + (*in).DeepCopyInto(*out) + } + if in.Apply != nil { + in, out := &in.Apply, &out.Apply + *out = new(ApplyCommandParentOverride) + (*in).DeepCopyInto(*out) + } + if in.Composite != nil { + in, out := &in.Composite, &out.Composite + *out = new(CompositeCommandParentOverride) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommandUnionParentOverride. +func (in *CommandUnionParentOverride) DeepCopy() *CommandUnionParentOverride { + if in == nil { + return nil + } + out := new(CommandUnionParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommandUnionPluginOverride) DeepCopyInto(out *CommandUnionPluginOverride) { + *out = *in + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + *out = new(ExecCommandPluginOverride) + (*in).DeepCopyInto(*out) + } + if in.Apply != nil { + in, out := &in.Apply, &out.Apply + *out = new(ApplyCommandPluginOverride) + (*in).DeepCopyInto(*out) + } + if in.Composite != nil { + in, out := &in.Composite, &out.Composite + *out = new(CompositeCommandPluginOverride) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommandUnionPluginOverride. +func (in *CommandUnionPluginOverride) DeepCopy() *CommandUnionPluginOverride { + if in == nil { + return nil + } + out := new(CommandUnionPluginOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommandUnionPluginOverrideParentOverride) DeepCopyInto(out *CommandUnionPluginOverrideParentOverride) { + *out = *in + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + *out = new(ExecCommandPluginOverrideParentOverride) + (*in).DeepCopyInto(*out) + } + if in.Apply != nil { + in, out := &in.Apply, &out.Apply + *out = new(ApplyCommandPluginOverrideParentOverride) + (*in).DeepCopyInto(*out) + } + if in.Composite != nil { + in, out := &in.Composite, &out.Composite + *out = new(CompositeCommandPluginOverrideParentOverride) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommandUnionPluginOverrideParentOverride. +func (in *CommandUnionPluginOverrideParentOverride) DeepCopy() *CommandUnionPluginOverrideParentOverride { + if in == nil { + return nil + } + out := new(CommandUnionPluginOverrideParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonProjectSource) DeepCopyInto(out *CommonProjectSource) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonProjectSource. +func (in *CommonProjectSource) DeepCopy() *CommonProjectSource { + if in == nil { + return nil + } + out := new(CommonProjectSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonProjectSourceParentOverride) DeepCopyInto(out *CommonProjectSourceParentOverride) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonProjectSourceParentOverride. +func (in *CommonProjectSourceParentOverride) DeepCopy() *CommonProjectSourceParentOverride { + if in == nil { + return nil + } + out := new(CommonProjectSourceParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Component) DeepCopyInto(out *Component) { + *out = *in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(attributes.Attributes, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + in.ComponentUnion.DeepCopyInto(&out.ComponentUnion) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Component. +func (in *Component) DeepCopy() *Component { + if in == nil { + return nil + } + out := new(Component) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentParentOverride) DeepCopyInto(out *ComponentParentOverride) { + *out = *in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(attributes.Attributes, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + in.ComponentUnionParentOverride.DeepCopyInto(&out.ComponentUnionParentOverride) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentParentOverride. +func (in *ComponentParentOverride) DeepCopy() *ComponentParentOverride { + if in == nil { + return nil + } + out := new(ComponentParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentPluginOverride) DeepCopyInto(out *ComponentPluginOverride) { + *out = *in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(attributes.Attributes, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + in.ComponentUnionPluginOverride.DeepCopyInto(&out.ComponentUnionPluginOverride) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentPluginOverride. +func (in *ComponentPluginOverride) DeepCopy() *ComponentPluginOverride { + if in == nil { + return nil + } + out := new(ComponentPluginOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentPluginOverrideParentOverride) DeepCopyInto(out *ComponentPluginOverrideParentOverride) { + *out = *in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(attributes.Attributes, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + in.ComponentUnionPluginOverrideParentOverride.DeepCopyInto(&out.ComponentUnionPluginOverrideParentOverride) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentPluginOverrideParentOverride. +func (in *ComponentPluginOverrideParentOverride) DeepCopy() *ComponentPluginOverrideParentOverride { + if in == nil { + return nil + } + out := new(ComponentPluginOverrideParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentUnion) DeepCopyInto(out *ComponentUnion) { + *out = *in + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = new(ContainerComponent) + (*in).DeepCopyInto(*out) + } + if in.Kubernetes != nil { + in, out := &in.Kubernetes, &out.Kubernetes + *out = new(KubernetesComponent) + (*in).DeepCopyInto(*out) + } + if in.Openshift != nil { + in, out := &in.Openshift, &out.Openshift + *out = new(OpenshiftComponent) + (*in).DeepCopyInto(*out) + } + if in.Volume != nil { + in, out := &in.Volume, &out.Volume + *out = new(VolumeComponent) + **out = **in + } + if in.Plugin != nil { + in, out := &in.Plugin, &out.Plugin + *out = new(PluginComponent) + (*in).DeepCopyInto(*out) + } + if in.Custom != nil { + in, out := &in.Custom, &out.Custom + *out = new(CustomComponent) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentUnion. +func (in *ComponentUnion) DeepCopy() *ComponentUnion { + if in == nil { + return nil + } + out := new(ComponentUnion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentUnionParentOverride) DeepCopyInto(out *ComponentUnionParentOverride) { + *out = *in + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = new(ContainerComponentParentOverride) + (*in).DeepCopyInto(*out) + } + if in.Kubernetes != nil { + in, out := &in.Kubernetes, &out.Kubernetes + *out = new(KubernetesComponentParentOverride) + (*in).DeepCopyInto(*out) + } + if in.Openshift != nil { + in, out := &in.Openshift, &out.Openshift + *out = new(OpenshiftComponentParentOverride) + (*in).DeepCopyInto(*out) + } + if in.Volume != nil { + in, out := &in.Volume, &out.Volume + *out = new(VolumeComponentParentOverride) + **out = **in + } + if in.Plugin != nil { + in, out := &in.Plugin, &out.Plugin + *out = new(PluginComponentParentOverride) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentUnionParentOverride. +func (in *ComponentUnionParentOverride) DeepCopy() *ComponentUnionParentOverride { + if in == nil { + return nil + } + out := new(ComponentUnionParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentUnionPluginOverride) DeepCopyInto(out *ComponentUnionPluginOverride) { + *out = *in + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = new(ContainerComponentPluginOverride) + (*in).DeepCopyInto(*out) + } + if in.Kubernetes != nil { + in, out := &in.Kubernetes, &out.Kubernetes + *out = new(KubernetesComponentPluginOverride) + (*in).DeepCopyInto(*out) + } + if in.Openshift != nil { + in, out := &in.Openshift, &out.Openshift + *out = new(OpenshiftComponentPluginOverride) + (*in).DeepCopyInto(*out) + } + if in.Volume != nil { + in, out := &in.Volume, &out.Volume + *out = new(VolumeComponentPluginOverride) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentUnionPluginOverride. +func (in *ComponentUnionPluginOverride) DeepCopy() *ComponentUnionPluginOverride { + if in == nil { + return nil + } + out := new(ComponentUnionPluginOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentUnionPluginOverrideParentOverride) DeepCopyInto(out *ComponentUnionPluginOverrideParentOverride) { + *out = *in + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = new(ContainerComponentPluginOverrideParentOverride) + (*in).DeepCopyInto(*out) + } + if in.Kubernetes != nil { + in, out := &in.Kubernetes, &out.Kubernetes + *out = new(KubernetesComponentPluginOverrideParentOverride) + (*in).DeepCopyInto(*out) + } + if in.Openshift != nil { + in, out := &in.Openshift, &out.Openshift + *out = new(OpenshiftComponentPluginOverrideParentOverride) + (*in).DeepCopyInto(*out) + } + if in.Volume != nil { + in, out := &in.Volume, &out.Volume + *out = new(VolumeComponentPluginOverrideParentOverride) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentUnionPluginOverrideParentOverride. +func (in *ComponentUnionPluginOverrideParentOverride) DeepCopy() *ComponentUnionPluginOverrideParentOverride { + if in == nil { + return nil + } + out := new(ComponentUnionPluginOverrideParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompositeCommand) DeepCopyInto(out *CompositeCommand) { + *out = *in + in.LabeledCommand.DeepCopyInto(&out.LabeledCommand) + if in.Commands != nil { + in, out := &in.Commands, &out.Commands + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompositeCommand. +func (in *CompositeCommand) DeepCopy() *CompositeCommand { + if in == nil { + return nil + } + out := new(CompositeCommand) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompositeCommandParentOverride) DeepCopyInto(out *CompositeCommandParentOverride) { + *out = *in + in.LabeledCommandParentOverride.DeepCopyInto(&out.LabeledCommandParentOverride) + if in.Commands != nil { + in, out := &in.Commands, &out.Commands + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompositeCommandParentOverride. +func (in *CompositeCommandParentOverride) DeepCopy() *CompositeCommandParentOverride { + if in == nil { + return nil + } + out := new(CompositeCommandParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompositeCommandPluginOverride) DeepCopyInto(out *CompositeCommandPluginOverride) { + *out = *in + in.LabeledCommandPluginOverride.DeepCopyInto(&out.LabeledCommandPluginOverride) + if in.Commands != nil { + in, out := &in.Commands, &out.Commands + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompositeCommandPluginOverride. +func (in *CompositeCommandPluginOverride) DeepCopy() *CompositeCommandPluginOverride { + if in == nil { + return nil + } + out := new(CompositeCommandPluginOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompositeCommandPluginOverrideParentOverride) DeepCopyInto(out *CompositeCommandPluginOverrideParentOverride) { + *out = *in + in.LabeledCommandPluginOverrideParentOverride.DeepCopyInto(&out.LabeledCommandPluginOverrideParentOverride) + if in.Commands != nil { + in, out := &in.Commands, &out.Commands + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompositeCommandPluginOverrideParentOverride. +func (in *CompositeCommandPluginOverrideParentOverride) DeepCopy() *CompositeCommandPluginOverrideParentOverride { + if in == nil { + return nil + } + out := new(CompositeCommandPluginOverrideParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Container) DeepCopyInto(out *Container) { + *out = *in + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + copy(*out, *in) + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]VolumeMount, len(*in)) + copy(*out, *in) + } + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.MountSources != nil { + in, out := &in.MountSources, &out.MountSources + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Container. +func (in *Container) DeepCopy() *Container { + if in == nil { + return nil + } + out := new(Container) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerComponent) DeepCopyInto(out *ContainerComponent) { + *out = *in + out.BaseComponent = in.BaseComponent + in.Container.DeepCopyInto(&out.Container) + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]Endpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerComponent. +func (in *ContainerComponent) DeepCopy() *ContainerComponent { + if in == nil { + return nil + } + out := new(ContainerComponent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerComponentParentOverride) DeepCopyInto(out *ContainerComponentParentOverride) { + *out = *in + out.BaseComponentParentOverride = in.BaseComponentParentOverride + in.ContainerParentOverride.DeepCopyInto(&out.ContainerParentOverride) + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]EndpointParentOverride, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerComponentParentOverride. +func (in *ContainerComponentParentOverride) DeepCopy() *ContainerComponentParentOverride { + if in == nil { + return nil + } + out := new(ContainerComponentParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerComponentPluginOverride) DeepCopyInto(out *ContainerComponentPluginOverride) { + *out = *in + out.BaseComponentPluginOverride = in.BaseComponentPluginOverride + in.ContainerPluginOverride.DeepCopyInto(&out.ContainerPluginOverride) + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]EndpointPluginOverride, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerComponentPluginOverride. +func (in *ContainerComponentPluginOverride) DeepCopy() *ContainerComponentPluginOverride { + if in == nil { + return nil + } + out := new(ContainerComponentPluginOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerComponentPluginOverrideParentOverride) DeepCopyInto(out *ContainerComponentPluginOverrideParentOverride) { + *out = *in + out.BaseComponentPluginOverrideParentOverride = in.BaseComponentPluginOverrideParentOverride + in.ContainerPluginOverrideParentOverride.DeepCopyInto(&out.ContainerPluginOverrideParentOverride) + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]EndpointPluginOverrideParentOverride, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerComponentPluginOverrideParentOverride. +func (in *ContainerComponentPluginOverrideParentOverride) DeepCopy() *ContainerComponentPluginOverrideParentOverride { + if in == nil { + return nil + } + out := new(ContainerComponentPluginOverrideParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerParentOverride) DeepCopyInto(out *ContainerParentOverride) { + *out = *in + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVarParentOverride, len(*in)) + copy(*out, *in) + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]VolumeMountParentOverride, len(*in)) + copy(*out, *in) + } + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.MountSources != nil { + in, out := &in.MountSources, &out.MountSources + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerParentOverride. +func (in *ContainerParentOverride) DeepCopy() *ContainerParentOverride { + if in == nil { + return nil + } + out := new(ContainerParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerPluginOverride) DeepCopyInto(out *ContainerPluginOverride) { + *out = *in + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVarPluginOverride, len(*in)) + copy(*out, *in) + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]VolumeMountPluginOverride, len(*in)) + copy(*out, *in) + } + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.MountSources != nil { + in, out := &in.MountSources, &out.MountSources + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerPluginOverride. +func (in *ContainerPluginOverride) DeepCopy() *ContainerPluginOverride { + if in == nil { + return nil + } + out := new(ContainerPluginOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerPluginOverrideParentOverride) DeepCopyInto(out *ContainerPluginOverrideParentOverride) { + *out = *in + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVarPluginOverrideParentOverride, len(*in)) + copy(*out, *in) + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]VolumeMountPluginOverrideParentOverride, len(*in)) + copy(*out, *in) + } + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.MountSources != nil { + in, out := &in.MountSources, &out.MountSources + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerPluginOverrideParentOverride. +func (in *ContainerPluginOverrideParentOverride) DeepCopy() *ContainerPluginOverrideParentOverride { + if in == nil { + return nil + } + out := new(ContainerPluginOverrideParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomCommand) DeepCopyInto(out *CustomCommand) { + *out = *in + in.LabeledCommand.DeepCopyInto(&out.LabeledCommand) + in.EmbeddedResource.DeepCopyInto(&out.EmbeddedResource) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomCommand. +func (in *CustomCommand) DeepCopy() *CustomCommand { + if in == nil { + return nil + } + out := new(CustomCommand) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomComponent) DeepCopyInto(out *CustomComponent) { + *out = *in + in.EmbeddedResource.DeepCopyInto(&out.EmbeddedResource) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomComponent. +func (in *CustomComponent) DeepCopy() *CustomComponent { + if in == nil { + return nil + } + out := new(CustomComponent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomProjectSource) DeepCopyInto(out *CustomProjectSource) { + *out = *in + in.EmbeddedResource.DeepCopyInto(&out.EmbeddedResource) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomProjectSource. +func (in *CustomProjectSource) DeepCopy() *CustomProjectSource { + if in == nil { + return nil + } + out := new(CustomProjectSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DevWorkspace) DeepCopyInto(out *DevWorkspace) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevWorkspace. +func (in *DevWorkspace) DeepCopy() *DevWorkspace { + if in == nil { + return nil + } + out := new(DevWorkspace) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DevWorkspace) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DevWorkspaceCondition) DeepCopyInto(out *DevWorkspaceCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevWorkspaceCondition. +func (in *DevWorkspaceCondition) DeepCopy() *DevWorkspaceCondition { + if in == nil { + return nil + } + out := new(DevWorkspaceCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DevWorkspaceEvents) DeepCopyInto(out *DevWorkspaceEvents) { + *out = *in + if in.PreStart != nil { + in, out := &in.PreStart, &out.PreStart + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PostStart != nil { + in, out := &in.PostStart, &out.PostStart + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PreStop != nil { + in, out := &in.PreStop, &out.PreStop + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PostStop != nil { + in, out := &in.PostStop, &out.PostStop + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevWorkspaceEvents. +func (in *DevWorkspaceEvents) DeepCopy() *DevWorkspaceEvents { + if in == nil { + return nil + } + out := new(DevWorkspaceEvents) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DevWorkspaceList) DeepCopyInto(out *DevWorkspaceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DevWorkspace, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevWorkspaceList. +func (in *DevWorkspaceList) DeepCopy() *DevWorkspaceList { + if in == nil { + return nil + } + out := new(DevWorkspaceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DevWorkspaceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DevWorkspaceSpec) DeepCopyInto(out *DevWorkspaceSpec) { + *out = *in + in.Template.DeepCopyInto(&out.Template) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevWorkspaceSpec. +func (in *DevWorkspaceSpec) DeepCopy() *DevWorkspaceSpec { + if in == nil { + return nil + } + out := new(DevWorkspaceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DevWorkspaceStatus) DeepCopyInto(out *DevWorkspaceStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DevWorkspaceCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevWorkspaceStatus. +func (in *DevWorkspaceStatus) DeepCopy() *DevWorkspaceStatus { + if in == nil { + return nil + } + out := new(DevWorkspaceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DevWorkspaceTemplate) DeepCopyInto(out *DevWorkspaceTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevWorkspaceTemplate. +func (in *DevWorkspaceTemplate) DeepCopy() *DevWorkspaceTemplate { + if in == nil { + return nil + } + out := new(DevWorkspaceTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DevWorkspaceTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DevWorkspaceTemplateList) DeepCopyInto(out *DevWorkspaceTemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DevWorkspaceTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevWorkspaceTemplateList. +func (in *DevWorkspaceTemplateList) DeepCopy() *DevWorkspaceTemplateList { + if in == nil { + return nil + } + out := new(DevWorkspaceTemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DevWorkspaceTemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DevWorkspaceTemplateSpec) DeepCopyInto(out *DevWorkspaceTemplateSpec) { + *out = *in + if in.Parent != nil { + in, out := &in.Parent, &out.Parent + *out = new(Parent) + (*in).DeepCopyInto(*out) + } + in.DevWorkspaceTemplateSpecContent.DeepCopyInto(&out.DevWorkspaceTemplateSpecContent) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevWorkspaceTemplateSpec. +func (in *DevWorkspaceTemplateSpec) DeepCopy() *DevWorkspaceTemplateSpec { + if in == nil { + return nil + } + out := new(DevWorkspaceTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DevWorkspaceTemplateSpecContent) DeepCopyInto(out *DevWorkspaceTemplateSpecContent) { + *out = *in + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(attributes.Attributes, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.Components != nil { + in, out := &in.Components, &out.Components + *out = make([]Component, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Projects != nil { + in, out := &in.Projects, &out.Projects + *out = make([]Project, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StarterProjects != nil { + in, out := &in.StarterProjects, &out.StarterProjects + *out = make([]StarterProject, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Commands != nil { + in, out := &in.Commands, &out.Commands + *out = make([]Command, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Events != nil { + in, out := &in.Events, &out.Events + *out = new(Events) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevWorkspaceTemplateSpecContent. +func (in *DevWorkspaceTemplateSpecContent) DeepCopy() *DevWorkspaceTemplateSpecContent { + if in == nil { + return nil + } + out := new(DevWorkspaceTemplateSpecContent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Endpoint) DeepCopyInto(out *Endpoint) { + *out = *in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(attributes.Attributes, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint. +func (in *Endpoint) DeepCopy() *Endpoint { + if in == nil { + return nil + } + out := new(Endpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointParentOverride) DeepCopyInto(out *EndpointParentOverride) { + *out = *in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(attributes.Attributes, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointParentOverride. +func (in *EndpointParentOverride) DeepCopy() *EndpointParentOverride { + if in == nil { + return nil + } + out := new(EndpointParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointPluginOverride) DeepCopyInto(out *EndpointPluginOverride) { + *out = *in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(attributes.Attributes, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPluginOverride. +func (in *EndpointPluginOverride) DeepCopy() *EndpointPluginOverride { + if in == nil { + return nil + } + out := new(EndpointPluginOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointPluginOverrideParentOverride) DeepCopyInto(out *EndpointPluginOverrideParentOverride) { + *out = *in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(attributes.Attributes, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPluginOverrideParentOverride. +func (in *EndpointPluginOverrideParentOverride) DeepCopy() *EndpointPluginOverrideParentOverride { + if in == nil { + return nil + } + out := new(EndpointPluginOverrideParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvVar) DeepCopyInto(out *EnvVar) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVar. +func (in *EnvVar) DeepCopy() *EnvVar { + if in == nil { + return nil + } + out := new(EnvVar) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvVarParentOverride) DeepCopyInto(out *EnvVarParentOverride) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVarParentOverride. +func (in *EnvVarParentOverride) DeepCopy() *EnvVarParentOverride { + if in == nil { + return nil + } + out := new(EnvVarParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvVarPluginOverride) DeepCopyInto(out *EnvVarPluginOverride) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVarPluginOverride. +func (in *EnvVarPluginOverride) DeepCopy() *EnvVarPluginOverride { + if in == nil { + return nil + } + out := new(EnvVarPluginOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvVarPluginOverrideParentOverride) DeepCopyInto(out *EnvVarPluginOverrideParentOverride) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVarPluginOverrideParentOverride. +func (in *EnvVarPluginOverrideParentOverride) DeepCopy() *EnvVarPluginOverrideParentOverride { + if in == nil { + return nil + } + out := new(EnvVarPluginOverrideParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Events) DeepCopyInto(out *Events) { + *out = *in + in.DevWorkspaceEvents.DeepCopyInto(&out.DevWorkspaceEvents) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Events. +func (in *Events) DeepCopy() *Events { + if in == nil { + return nil + } + out := new(Events) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecCommand) DeepCopyInto(out *ExecCommand) { + *out = *in + in.LabeledCommand.DeepCopyInto(&out.LabeledCommand) + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCommand. +func (in *ExecCommand) DeepCopy() *ExecCommand { + if in == nil { + return nil + } + out := new(ExecCommand) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecCommandParentOverride) DeepCopyInto(out *ExecCommandParentOverride) { + *out = *in + in.LabeledCommandParentOverride.DeepCopyInto(&out.LabeledCommandParentOverride) + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVarParentOverride, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCommandParentOverride. +func (in *ExecCommandParentOverride) DeepCopy() *ExecCommandParentOverride { + if in == nil { + return nil + } + out := new(ExecCommandParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecCommandPluginOverride) DeepCopyInto(out *ExecCommandPluginOverride) { + *out = *in + in.LabeledCommandPluginOverride.DeepCopyInto(&out.LabeledCommandPluginOverride) + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVarPluginOverride, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCommandPluginOverride. +func (in *ExecCommandPluginOverride) DeepCopy() *ExecCommandPluginOverride { + if in == nil { + return nil + } + out := new(ExecCommandPluginOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecCommandPluginOverrideParentOverride) DeepCopyInto(out *ExecCommandPluginOverrideParentOverride) { + *out = *in + in.LabeledCommandPluginOverrideParentOverride.DeepCopyInto(&out.LabeledCommandPluginOverrideParentOverride) + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVarPluginOverrideParentOverride, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCommandPluginOverrideParentOverride. +func (in *ExecCommandPluginOverrideParentOverride) DeepCopy() *ExecCommandPluginOverrideParentOverride { + if in == nil { + return nil + } + out := new(ExecCommandPluginOverrideParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitLikeProjectSource) DeepCopyInto(out *GitLikeProjectSource) { + *out = *in + out.CommonProjectSource = in.CommonProjectSource + if in.CheckoutFrom != nil { + in, out := &in.CheckoutFrom, &out.CheckoutFrom + *out = new(CheckoutFrom) + **out = **in + } + if in.Remotes != nil { + in, out := &in.Remotes, &out.Remotes + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitLikeProjectSource. +func (in *GitLikeProjectSource) DeepCopy() *GitLikeProjectSource { + if in == nil { + return nil + } + out := new(GitLikeProjectSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitLikeProjectSourceParentOverride) DeepCopyInto(out *GitLikeProjectSourceParentOverride) { + *out = *in + out.CommonProjectSourceParentOverride = in.CommonProjectSourceParentOverride + if in.CheckoutFrom != nil { + in, out := &in.CheckoutFrom, &out.CheckoutFrom + *out = new(CheckoutFromParentOverride) + **out = **in + } + if in.Remotes != nil { + in, out := &in.Remotes, &out.Remotes + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitLikeProjectSourceParentOverride. +func (in *GitLikeProjectSourceParentOverride) DeepCopy() *GitLikeProjectSourceParentOverride { + if in == nil { + return nil + } + out := new(GitLikeProjectSourceParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitProjectSource) DeepCopyInto(out *GitProjectSource) { + *out = *in + in.GitLikeProjectSource.DeepCopyInto(&out.GitLikeProjectSource) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitProjectSource. +func (in *GitProjectSource) DeepCopy() *GitProjectSource { + if in == nil { + return nil + } + out := new(GitProjectSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitProjectSourceParentOverride) DeepCopyInto(out *GitProjectSourceParentOverride) { + *out = *in + in.GitLikeProjectSourceParentOverride.DeepCopyInto(&out.GitLikeProjectSourceParentOverride) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitProjectSourceParentOverride. +func (in *GitProjectSourceParentOverride) DeepCopy() *GitProjectSourceParentOverride { + if in == nil { + return nil + } + out := new(GitProjectSourceParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImportReference) DeepCopyInto(out *ImportReference) { + *out = *in + in.ImportReferenceUnion.DeepCopyInto(&out.ImportReferenceUnion) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImportReference. +func (in *ImportReference) DeepCopy() *ImportReference { + if in == nil { + return nil + } + out := new(ImportReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImportReferenceParentOverride) DeepCopyInto(out *ImportReferenceParentOverride) { + *out = *in + in.ImportReferenceUnionParentOverride.DeepCopyInto(&out.ImportReferenceUnionParentOverride) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImportReferenceParentOverride. +func (in *ImportReferenceParentOverride) DeepCopy() *ImportReferenceParentOverride { + if in == nil { + return nil + } + out := new(ImportReferenceParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImportReferenceUnion) DeepCopyInto(out *ImportReferenceUnion) { + *out = *in + if in.Kubernetes != nil { + in, out := &in.Kubernetes, &out.Kubernetes + *out = new(KubernetesCustomResourceImportReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImportReferenceUnion. +func (in *ImportReferenceUnion) DeepCopy() *ImportReferenceUnion { + if in == nil { + return nil + } + out := new(ImportReferenceUnion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImportReferenceUnionParentOverride) DeepCopyInto(out *ImportReferenceUnionParentOverride) { + *out = *in + if in.Kubernetes != nil { + in, out := &in.Kubernetes, &out.Kubernetes + *out = new(KubernetesCustomResourceImportReferenceParentOverride) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImportReferenceUnionParentOverride. +func (in *ImportReferenceUnionParentOverride) DeepCopy() *ImportReferenceUnionParentOverride { + if in == nil { + return nil + } + out := new(ImportReferenceUnionParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K8sLikeComponent) DeepCopyInto(out *K8sLikeComponent) { + *out = *in + out.BaseComponent = in.BaseComponent + out.K8sLikeComponentLocation = in.K8sLikeComponentLocation + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]Endpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sLikeComponent. +func (in *K8sLikeComponent) DeepCopy() *K8sLikeComponent { + if in == nil { + return nil + } + out := new(K8sLikeComponent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K8sLikeComponentLocation) DeepCopyInto(out *K8sLikeComponentLocation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sLikeComponentLocation. +func (in *K8sLikeComponentLocation) DeepCopy() *K8sLikeComponentLocation { + if in == nil { + return nil + } + out := new(K8sLikeComponentLocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K8sLikeComponentLocationParentOverride) DeepCopyInto(out *K8sLikeComponentLocationParentOverride) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sLikeComponentLocationParentOverride. +func (in *K8sLikeComponentLocationParentOverride) DeepCopy() *K8sLikeComponentLocationParentOverride { + if in == nil { + return nil + } + out := new(K8sLikeComponentLocationParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K8sLikeComponentLocationPluginOverride) DeepCopyInto(out *K8sLikeComponentLocationPluginOverride) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sLikeComponentLocationPluginOverride. +func (in *K8sLikeComponentLocationPluginOverride) DeepCopy() *K8sLikeComponentLocationPluginOverride { + if in == nil { + return nil + } + out := new(K8sLikeComponentLocationPluginOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K8sLikeComponentLocationPluginOverrideParentOverride) DeepCopyInto(out *K8sLikeComponentLocationPluginOverrideParentOverride) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sLikeComponentLocationPluginOverrideParentOverride. +func (in *K8sLikeComponentLocationPluginOverrideParentOverride) DeepCopy() *K8sLikeComponentLocationPluginOverrideParentOverride { + if in == nil { + return nil + } + out := new(K8sLikeComponentLocationPluginOverrideParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K8sLikeComponentParentOverride) DeepCopyInto(out *K8sLikeComponentParentOverride) { + *out = *in + out.BaseComponentParentOverride = in.BaseComponentParentOverride + out.K8sLikeComponentLocationParentOverride = in.K8sLikeComponentLocationParentOverride + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]EndpointParentOverride, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sLikeComponentParentOverride. +func (in *K8sLikeComponentParentOverride) DeepCopy() *K8sLikeComponentParentOverride { + if in == nil { + return nil + } + out := new(K8sLikeComponentParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K8sLikeComponentPluginOverride) DeepCopyInto(out *K8sLikeComponentPluginOverride) { + *out = *in + out.BaseComponentPluginOverride = in.BaseComponentPluginOverride + out.K8sLikeComponentLocationPluginOverride = in.K8sLikeComponentLocationPluginOverride + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]EndpointPluginOverride, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sLikeComponentPluginOverride. +func (in *K8sLikeComponentPluginOverride) DeepCopy() *K8sLikeComponentPluginOverride { + if in == nil { + return nil + } + out := new(K8sLikeComponentPluginOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K8sLikeComponentPluginOverrideParentOverride) DeepCopyInto(out *K8sLikeComponentPluginOverrideParentOverride) { + *out = *in + out.BaseComponentPluginOverrideParentOverride = in.BaseComponentPluginOverrideParentOverride + out.K8sLikeComponentLocationPluginOverrideParentOverride = in.K8sLikeComponentLocationPluginOverrideParentOverride + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]EndpointPluginOverrideParentOverride, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sLikeComponentPluginOverrideParentOverride. +func (in *K8sLikeComponentPluginOverrideParentOverride) DeepCopy() *K8sLikeComponentPluginOverrideParentOverride { + if in == nil { + return nil + } + out := new(K8sLikeComponentPluginOverrideParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesComponent) DeepCopyInto(out *KubernetesComponent) { + *out = *in + in.K8sLikeComponent.DeepCopyInto(&out.K8sLikeComponent) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesComponent. +func (in *KubernetesComponent) DeepCopy() *KubernetesComponent { + if in == nil { + return nil + } + out := new(KubernetesComponent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesComponentParentOverride) DeepCopyInto(out *KubernetesComponentParentOverride) { + *out = *in + in.K8sLikeComponentParentOverride.DeepCopyInto(&out.K8sLikeComponentParentOverride) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesComponentParentOverride. +func (in *KubernetesComponentParentOverride) DeepCopy() *KubernetesComponentParentOverride { + if in == nil { + return nil + } + out := new(KubernetesComponentParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesComponentPluginOverride) DeepCopyInto(out *KubernetesComponentPluginOverride) { + *out = *in + in.K8sLikeComponentPluginOverride.DeepCopyInto(&out.K8sLikeComponentPluginOverride) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesComponentPluginOverride. +func (in *KubernetesComponentPluginOverride) DeepCopy() *KubernetesComponentPluginOverride { + if in == nil { + return nil + } + out := new(KubernetesComponentPluginOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesComponentPluginOverrideParentOverride) DeepCopyInto(out *KubernetesComponentPluginOverrideParentOverride) { + *out = *in + in.K8sLikeComponentPluginOverrideParentOverride.DeepCopyInto(&out.K8sLikeComponentPluginOverrideParentOverride) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesComponentPluginOverrideParentOverride. +func (in *KubernetesComponentPluginOverrideParentOverride) DeepCopy() *KubernetesComponentPluginOverrideParentOverride { + if in == nil { + return nil + } + out := new(KubernetesComponentPluginOverrideParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesCustomResourceImportReference) DeepCopyInto(out *KubernetesCustomResourceImportReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesCustomResourceImportReference. +func (in *KubernetesCustomResourceImportReference) DeepCopy() *KubernetesCustomResourceImportReference { + if in == nil { + return nil + } + out := new(KubernetesCustomResourceImportReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesCustomResourceImportReferenceParentOverride) DeepCopyInto(out *KubernetesCustomResourceImportReferenceParentOverride) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesCustomResourceImportReferenceParentOverride. +func (in *KubernetesCustomResourceImportReferenceParentOverride) DeepCopy() *KubernetesCustomResourceImportReferenceParentOverride { + if in == nil { + return nil + } + out := new(KubernetesCustomResourceImportReferenceParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabeledCommand) DeepCopyInto(out *LabeledCommand) { + *out = *in + in.BaseCommand.DeepCopyInto(&out.BaseCommand) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabeledCommand. +func (in *LabeledCommand) DeepCopy() *LabeledCommand { + if in == nil { + return nil + } + out := new(LabeledCommand) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabeledCommandParentOverride) DeepCopyInto(out *LabeledCommandParentOverride) { + *out = *in + in.BaseCommandParentOverride.DeepCopyInto(&out.BaseCommandParentOverride) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabeledCommandParentOverride. +func (in *LabeledCommandParentOverride) DeepCopy() *LabeledCommandParentOverride { + if in == nil { + return nil + } + out := new(LabeledCommandParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabeledCommandPluginOverride) DeepCopyInto(out *LabeledCommandPluginOverride) { + *out = *in + in.BaseCommandPluginOverride.DeepCopyInto(&out.BaseCommandPluginOverride) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabeledCommandPluginOverride. +func (in *LabeledCommandPluginOverride) DeepCopy() *LabeledCommandPluginOverride { + if in == nil { + return nil + } + out := new(LabeledCommandPluginOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabeledCommandPluginOverrideParentOverride) DeepCopyInto(out *LabeledCommandPluginOverrideParentOverride) { + *out = *in + in.BaseCommandPluginOverrideParentOverride.DeepCopyInto(&out.BaseCommandPluginOverrideParentOverride) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabeledCommandPluginOverrideParentOverride. +func (in *LabeledCommandPluginOverrideParentOverride) DeepCopy() *LabeledCommandPluginOverrideParentOverride { + if in == nil { + return nil + } + out := new(LabeledCommandPluginOverrideParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenshiftComponent) DeepCopyInto(out *OpenshiftComponent) { + *out = *in + in.K8sLikeComponent.DeepCopyInto(&out.K8sLikeComponent) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenshiftComponent. +func (in *OpenshiftComponent) DeepCopy() *OpenshiftComponent { + if in == nil { + return nil + } + out := new(OpenshiftComponent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenshiftComponentParentOverride) DeepCopyInto(out *OpenshiftComponentParentOverride) { + *out = *in + in.K8sLikeComponentParentOverride.DeepCopyInto(&out.K8sLikeComponentParentOverride) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenshiftComponentParentOverride. +func (in *OpenshiftComponentParentOverride) DeepCopy() *OpenshiftComponentParentOverride { + if in == nil { + return nil + } + out := new(OpenshiftComponentParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenshiftComponentPluginOverride) DeepCopyInto(out *OpenshiftComponentPluginOverride) { + *out = *in + in.K8sLikeComponentPluginOverride.DeepCopyInto(&out.K8sLikeComponentPluginOverride) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenshiftComponentPluginOverride. +func (in *OpenshiftComponentPluginOverride) DeepCopy() *OpenshiftComponentPluginOverride { + if in == nil { + return nil + } + out := new(OpenshiftComponentPluginOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenshiftComponentPluginOverrideParentOverride) DeepCopyInto(out *OpenshiftComponentPluginOverrideParentOverride) { + *out = *in + in.K8sLikeComponentPluginOverrideParentOverride.DeepCopyInto(&out.K8sLikeComponentPluginOverrideParentOverride) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenshiftComponentPluginOverrideParentOverride. +func (in *OpenshiftComponentPluginOverrideParentOverride) DeepCopy() *OpenshiftComponentPluginOverrideParentOverride { + if in == nil { + return nil + } + out := new(OpenshiftComponentPluginOverrideParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverrideDirective) DeepCopyInto(out *OverrideDirective) { + *out = *in + if in.DeleteFromPrimitiveList != nil { + in, out := &in.DeleteFromPrimitiveList, &out.DeleteFromPrimitiveList + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SetElementOrder != nil { + in, out := &in.SetElementOrder, &out.SetElementOrder + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideDirective. +func (in *OverrideDirective) DeepCopy() *OverrideDirective { + if in == nil { + return nil + } + out := new(OverrideDirective) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverridesBase) DeepCopyInto(out *OverridesBase) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverridesBase. +func (in *OverridesBase) DeepCopy() *OverridesBase { + if in == nil { + return nil + } + out := new(OverridesBase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverridesBaseParentOverride) DeepCopyInto(out *OverridesBaseParentOverride) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverridesBaseParentOverride. +func (in *OverridesBaseParentOverride) DeepCopy() *OverridesBaseParentOverride { + if in == nil { + return nil + } + out := new(OverridesBaseParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Parent) DeepCopyInto(out *Parent) { + *out = *in + in.ImportReference.DeepCopyInto(&out.ImportReference) + in.ParentOverrides.DeepCopyInto(&out.ParentOverrides) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Parent. +func (in *Parent) DeepCopy() *Parent { + if in == nil { + return nil + } + out := new(Parent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParentOverrides) DeepCopyInto(out *ParentOverrides) { + *out = *in + out.OverridesBase = in.OverridesBase + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(attributes.Attributes, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.Components != nil { + in, out := &in.Components, &out.Components + *out = make([]ComponentParentOverride, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Projects != nil { + in, out := &in.Projects, &out.Projects + *out = make([]ProjectParentOverride, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StarterProjects != nil { + in, out := &in.StarterProjects, &out.StarterProjects + *out = make([]StarterProjectParentOverride, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Commands != nil { + in, out := &in.Commands, &out.Commands + *out = make([]CommandParentOverride, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParentOverrides. +func (in *ParentOverrides) DeepCopy() *ParentOverrides { + if in == nil { + return nil + } + out := new(ParentOverrides) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PluginComponent) DeepCopyInto(out *PluginComponent) { + *out = *in + out.BaseComponent = in.BaseComponent + in.ImportReference.DeepCopyInto(&out.ImportReference) + in.PluginOverrides.DeepCopyInto(&out.PluginOverrides) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginComponent. +func (in *PluginComponent) DeepCopy() *PluginComponent { + if in == nil { + return nil + } + out := new(PluginComponent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PluginComponentParentOverride) DeepCopyInto(out *PluginComponentParentOverride) { + *out = *in + out.BaseComponentParentOverride = in.BaseComponentParentOverride + in.ImportReferenceParentOverride.DeepCopyInto(&out.ImportReferenceParentOverride) + in.PluginOverridesParentOverride.DeepCopyInto(&out.PluginOverridesParentOverride) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginComponentParentOverride. +func (in *PluginComponentParentOverride) DeepCopy() *PluginComponentParentOverride { + if in == nil { + return nil + } + out := new(PluginComponentParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PluginOverrides) DeepCopyInto(out *PluginOverrides) { + *out = *in + out.OverridesBase = in.OverridesBase + if in.Components != nil { + in, out := &in.Components, &out.Components + *out = make([]ComponentPluginOverride, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Commands != nil { + in, out := &in.Commands, &out.Commands + *out = make([]CommandPluginOverride, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginOverrides. +func (in *PluginOverrides) DeepCopy() *PluginOverrides { + if in == nil { + return nil + } + out := new(PluginOverrides) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PluginOverridesParentOverride) DeepCopyInto(out *PluginOverridesParentOverride) { + *out = *in + out.OverridesBaseParentOverride = in.OverridesBaseParentOverride + if in.Components != nil { + in, out := &in.Components, &out.Components + *out = make([]ComponentPluginOverrideParentOverride, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Commands != nil { + in, out := &in.Commands, &out.Commands + *out = make([]CommandPluginOverrideParentOverride, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginOverridesParentOverride. +func (in *PluginOverridesParentOverride) DeepCopy() *PluginOverridesParentOverride { + if in == nil { + return nil + } + out := new(PluginOverridesParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Project) DeepCopyInto(out *Project) { + *out = *in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(attributes.Attributes, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + in.ProjectSource.DeepCopyInto(&out.ProjectSource) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Project. +func (in *Project) DeepCopy() *Project { + if in == nil { + return nil + } + out := new(Project) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectParentOverride) DeepCopyInto(out *ProjectParentOverride) { + *out = *in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(attributes.Attributes, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + in.ProjectSourceParentOverride.DeepCopyInto(&out.ProjectSourceParentOverride) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectParentOverride. +func (in *ProjectParentOverride) DeepCopy() *ProjectParentOverride { + if in == nil { + return nil + } + out := new(ProjectParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectSource) DeepCopyInto(out *ProjectSource) { + *out = *in + if in.Git != nil { + in, out := &in.Git, &out.Git + *out = new(GitProjectSource) + (*in).DeepCopyInto(*out) + } + if in.Zip != nil { + in, out := &in.Zip, &out.Zip + *out = new(ZipProjectSource) + **out = **in + } + if in.Custom != nil { + in, out := &in.Custom, &out.Custom + *out = new(CustomProjectSource) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectSource. +func (in *ProjectSource) DeepCopy() *ProjectSource { + if in == nil { + return nil + } + out := new(ProjectSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectSourceParentOverride) DeepCopyInto(out *ProjectSourceParentOverride) { + *out = *in + if in.Git != nil { + in, out := &in.Git, &out.Git + *out = new(GitProjectSourceParentOverride) + (*in).DeepCopyInto(*out) + } + if in.Zip != nil { + in, out := &in.Zip, &out.Zip + *out = new(ZipProjectSourceParentOverride) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectSourceParentOverride. +func (in *ProjectSourceParentOverride) DeepCopy() *ProjectSourceParentOverride { + if in == nil { + return nil + } + out := new(ProjectSourceParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StarterProject) DeepCopyInto(out *StarterProject) { + *out = *in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(attributes.Attributes, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + in.ProjectSource.DeepCopyInto(&out.ProjectSource) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StarterProject. +func (in *StarterProject) DeepCopy() *StarterProject { + if in == nil { + return nil + } + out := new(StarterProject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StarterProjectParentOverride) DeepCopyInto(out *StarterProjectParentOverride) { + *out = *in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(attributes.Attributes, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + in.ProjectSourceParentOverride.DeepCopyInto(&out.ProjectSourceParentOverride) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StarterProjectParentOverride. +func (in *StarterProjectParentOverride) DeepCopy() *StarterProjectParentOverride { + if in == nil { + return nil + } + out := new(StarterProjectParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Volume) DeepCopyInto(out *Volume) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume. +func (in *Volume) DeepCopy() *Volume { + if in == nil { + return nil + } + out := new(Volume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeComponent) DeepCopyInto(out *VolumeComponent) { + *out = *in + out.BaseComponent = in.BaseComponent + out.Volume = in.Volume +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeComponent. +func (in *VolumeComponent) DeepCopy() *VolumeComponent { + if in == nil { + return nil + } + out := new(VolumeComponent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeComponentParentOverride) DeepCopyInto(out *VolumeComponentParentOverride) { + *out = *in + out.BaseComponentParentOverride = in.BaseComponentParentOverride + out.VolumeParentOverride = in.VolumeParentOverride +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeComponentParentOverride. +func (in *VolumeComponentParentOverride) DeepCopy() *VolumeComponentParentOverride { + if in == nil { + return nil + } + out := new(VolumeComponentParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeComponentPluginOverride) DeepCopyInto(out *VolumeComponentPluginOverride) { + *out = *in + out.BaseComponentPluginOverride = in.BaseComponentPluginOverride + out.VolumePluginOverride = in.VolumePluginOverride +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeComponentPluginOverride. +func (in *VolumeComponentPluginOverride) DeepCopy() *VolumeComponentPluginOverride { + if in == nil { + return nil + } + out := new(VolumeComponentPluginOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeComponentPluginOverrideParentOverride) DeepCopyInto(out *VolumeComponentPluginOverrideParentOverride) { + *out = *in + out.BaseComponentPluginOverrideParentOverride = in.BaseComponentPluginOverrideParentOverride + out.VolumePluginOverrideParentOverride = in.VolumePluginOverrideParentOverride +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeComponentPluginOverrideParentOverride. +func (in *VolumeComponentPluginOverrideParentOverride) DeepCopy() *VolumeComponentPluginOverrideParentOverride { + if in == nil { + return nil + } + out := new(VolumeComponentPluginOverrideParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeMount) DeepCopyInto(out *VolumeMount) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeMount. +func (in *VolumeMount) DeepCopy() *VolumeMount { + if in == nil { + return nil + } + out := new(VolumeMount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeMountParentOverride) DeepCopyInto(out *VolumeMountParentOverride) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeMountParentOverride. +func (in *VolumeMountParentOverride) DeepCopy() *VolumeMountParentOverride { + if in == nil { + return nil + } + out := new(VolumeMountParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeMountPluginOverride) DeepCopyInto(out *VolumeMountPluginOverride) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeMountPluginOverride. +func (in *VolumeMountPluginOverride) DeepCopy() *VolumeMountPluginOverride { + if in == nil { + return nil + } + out := new(VolumeMountPluginOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeMountPluginOverrideParentOverride) DeepCopyInto(out *VolumeMountPluginOverrideParentOverride) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeMountPluginOverrideParentOverride. +func (in *VolumeMountPluginOverrideParentOverride) DeepCopy() *VolumeMountPluginOverrideParentOverride { + if in == nil { + return nil + } + out := new(VolumeMountPluginOverrideParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeParentOverride) DeepCopyInto(out *VolumeParentOverride) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeParentOverride. +func (in *VolumeParentOverride) DeepCopy() *VolumeParentOverride { + if in == nil { + return nil + } + out := new(VolumeParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumePluginOverride) DeepCopyInto(out *VolumePluginOverride) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumePluginOverride. +func (in *VolumePluginOverride) DeepCopy() *VolumePluginOverride { + if in == nil { + return nil + } + out := new(VolumePluginOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumePluginOverrideParentOverride) DeepCopyInto(out *VolumePluginOverrideParentOverride) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumePluginOverrideParentOverride. +func (in *VolumePluginOverrideParentOverride) DeepCopy() *VolumePluginOverrideParentOverride { + if in == nil { + return nil + } + out := new(VolumePluginOverrideParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZipProjectSource) DeepCopyInto(out *ZipProjectSource) { + *out = *in + out.CommonProjectSource = in.CommonProjectSource +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZipProjectSource. +func (in *ZipProjectSource) DeepCopy() *ZipProjectSource { + if in == nil { + return nil + } + out := new(ZipProjectSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZipProjectSourceParentOverride) DeepCopyInto(out *ZipProjectSourceParentOverride) { + *out = *in + out.CommonProjectSourceParentOverride = in.CommonProjectSourceParentOverride +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZipProjectSourceParentOverride. +func (in *ZipProjectSourceParentOverride) DeepCopy() *ZipProjectSourceParentOverride { + if in == nil { + return nil + } + out := new(ZipProjectSourceParentOverride) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.keyed_definitions.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.keyed_definitions.go new file mode 100644 index 000000000..27a85e013 --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.keyed_definitions.go @@ -0,0 +1,49 @@ +package v1alpha2 + +func (keyed Component) Key() string { + return keyed.Name +} + +func (keyed Project) Key() string { + return keyed.Name +} + +func (keyed StarterProject) Key() string { + return keyed.Name +} + +func (keyed Command) Key() string { + return keyed.Id +} + +func (keyed ComponentParentOverride) Key() string { + return keyed.Name +} + +func (keyed ProjectParentOverride) Key() string { + return keyed.Name +} + +func (keyed StarterProjectParentOverride) Key() string { + return keyed.Name +} + +func (keyed CommandParentOverride) Key() string { + return keyed.Id +} + +func (keyed ComponentPluginOverrideParentOverride) Key() string { + return keyed.Name +} + +func (keyed CommandPluginOverrideParentOverride) Key() string { + return keyed.Id +} + +func (keyed ComponentPluginOverride) Key() string { + return keyed.Name +} + +func (keyed CommandPluginOverride) Key() string { + return keyed.Id +} diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.parent_overrides.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.parent_overrides.go new file mode 100644 index 000000000..e24b2d49d --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.parent_overrides.go @@ -0,0 +1,1143 @@ +package v1alpha2 + +import ( + attributes "github.com/devfile/api/v2/pkg/attributes" +) + +// +devfile:jsonschema:generate +type ParentOverrides struct { + OverridesBase `json:",inline"` + + // Overrides of variables encapsulated in a parent devfile. + // Overriding is done according to K8S strategic merge patch standard rules. + // +optional + // +patchStrategy=merge + Variables map[string]string `json:"variables,omitempty" patchStrategy:"merge"` + + // Overrides of attributes encapsulated in a parent devfile. + // Overriding is done according to K8S strategic merge patch standard rules. + // +optional + // +patchStrategy=merge + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless + Attributes attributes.Attributes `json:"attributes,omitempty" patchStrategy:"merge"` + + // Overrides of components encapsulated in a parent devfile or a plugin. + // Overriding is done according to K8S strategic merge patch standard rules. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // +devfile:toplevellist + Components []ComponentParentOverride `json:"components,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + + // Overrides of projects encapsulated in a parent devfile. + // Overriding is done according to K8S strategic merge patch standard rules. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // +devfile:toplevellist + Projects []ProjectParentOverride `json:"projects,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + + // Overrides of starterProjects encapsulated in a parent devfile. + // Overriding is done according to K8S strategic merge patch standard rules. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // +devfile:toplevellist + StarterProjects []StarterProjectParentOverride `json:"starterProjects,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + + // Overrides of commands encapsulated in a parent devfile or a plugin. + // Overriding is done according to K8S strategic merge patch standard rules. + // +optional + // +patchMergeKey=id + // +patchStrategy=merge + // +devfile:toplevellist + Commands []CommandParentOverride `json:"commands,omitempty" patchStrategy:"merge" patchMergeKey:"id"` +} + +//+k8s:openapi-gen=true +type ComponentParentOverride struct { + + // Mandatory name that allows referencing the component + // from other elements (such as commands) or from an external + // devfile that may reference this component through a parent or a plugin. + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + // +kubebuilder:validation:MaxLength=63 + Name string `json:"name"` + + // Map of implementation-dependant free-form YAML attributes. + // +optional + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless + Attributes attributes.Attributes `json:"attributes,omitempty"` + ComponentUnionParentOverride `json:",inline"` +} + +type ProjectParentOverride struct { + + // Project name + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + // +kubebuilder:validation:MaxLength=63 + Name string `json:"name"` + + // Map of implementation-dependant free-form YAML attributes. + // +optional + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless + Attributes attributes.Attributes `json:"attributes,omitempty"` + + // Path relative to the root of the projects to which this project should be cloned into. This is a unix-style relative path (i.e. uses forward slashes). The path is invalid if it is absolute or tries to escape the project root through the usage of '..'. If not specified, defaults to the project name. + // +optional + ClonePath string `json:"clonePath,omitempty"` + + ProjectSourceParentOverride `json:",inline"` +} + +type StarterProjectParentOverride struct { + + // Project name + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + // +kubebuilder:validation:MaxLength=63 + Name string `json:"name"` + + // Map of implementation-dependant free-form YAML attributes. + // +optional + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless + Attributes attributes.Attributes `json:"attributes,omitempty"` + + // Description of a starter project + // +optional + Description string `json:"description,omitempty"` + + // Sub-directory from a starter project to be used as root for starter project. + // +optional + SubDir string `json:"subDir,omitempty"` + + ProjectSourceParentOverride `json:",inline"` +} + +type CommandParentOverride struct { + + // Mandatory identifier that allows referencing + // this command in composite commands, from + // a parent, or in events. + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + // +kubebuilder:validation:MaxLength=63 + Id string `json:"id"` + + // Map of implementation-dependant free-form YAML attributes. + // +optional + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless + Attributes attributes.Attributes `json:"attributes,omitempty"` + CommandUnionParentOverride `json:",inline"` +} + +// +union +type ComponentUnionParentOverride struct { + + // +kubebuilder:validation:Enum=Container;Kubernetes;Openshift;Volume;Plugin + // Type of component + // + // +unionDiscriminator + // +optional + ComponentType ComponentTypeParentOverride `json:"componentType,omitempty"` + + // Allows adding and configuring devworkspace-related containers + // +optional + Container *ContainerComponentParentOverride `json:"container,omitempty"` + + // Allows importing into the devworkspace the Kubernetes resources + // defined in a given manifest. For example this allows reusing the Kubernetes + // definitions used to deploy some runtime components in production. + // + // +optional + Kubernetes *KubernetesComponentParentOverride `json:"kubernetes,omitempty"` + + // Allows importing into the devworkspace the OpenShift resources + // defined in a given manifest. For example this allows reusing the OpenShift + // definitions used to deploy some runtime components in production. + // + // +optional + Openshift *OpenshiftComponentParentOverride `json:"openshift,omitempty"` + + // Allows specifying the definition of a volume + // shared by several other components + // +optional + Volume *VolumeComponentParentOverride `json:"volume,omitempty"` + + // Allows importing a plugin. + // + // Plugins are mainly imported devfiles that contribute components, commands + // and events as a consistent single unit. They are defined in either YAML files + // following the devfile syntax, + // or as `DevWorkspaceTemplate` Kubernetes Custom Resources + // +optional + // +devfile:overrides:include:omitInPlugin=true + Plugin *PluginComponentParentOverride `json:"plugin,omitempty"` +} + +// +union +type ProjectSourceParentOverride struct { + + // +kubebuilder:validation:Enum=Git;Zip + // Type of project source + // + + // +unionDiscriminator + // +optional + SourceType ProjectSourceTypeParentOverride `json:"sourceType,omitempty"` + + // Project's Git source + // +optional + Git *GitProjectSourceParentOverride `json:"git,omitempty"` + + // Project's Zip source + // +optional + Zip *ZipProjectSourceParentOverride `json:"zip,omitempty"` +} + +// +union +type CommandUnionParentOverride struct { + + // +kubebuilder:validation:Enum=Exec;Apply;Composite + // Type of devworkspace command + // +unionDiscriminator + // +optional + CommandType CommandTypeParentOverride `json:"commandType,omitempty"` + + // CLI Command executed in an existing component container + // +optional + Exec *ExecCommandParentOverride `json:"exec,omitempty"` + + // Command that consists in applying a given component definition, + // typically bound to a devworkspace event. + // + // For example, when an `apply` command is bound to a `preStart` event, + // and references a `container` component, it will start the container as a + // K8S initContainer in the devworkspace POD, unless the component has its + // `dedicatedPod` field set to `true`. + // + // When no `apply` command exist for a given component, + // it is assumed the component will be applied at devworkspace start + // by default. + // +optional + Apply *ApplyCommandParentOverride `json:"apply,omitempty"` + + // Composite command that allows executing several sub-commands + // either sequentially or concurrently + // +optional + Composite *CompositeCommandParentOverride `json:"composite,omitempty"` +} + +// ComponentType describes the type of component. +// Only one of the following component type may be specified. +type ComponentTypeParentOverride string + +// Component that allows the developer to add a configured container into their devworkspace +type ContainerComponentParentOverride struct { + BaseComponentParentOverride `json:",inline"` + ContainerParentOverride `json:",inline"` + Endpoints []EndpointParentOverride `json:"endpoints,omitempty" patchStrategy:"merge" patchMergeKey:"name"` +} + +// Component that allows partly importing Kubernetes resources into the devworkspace POD +type KubernetesComponentParentOverride struct { + K8sLikeComponentParentOverride `json:",inline"` +} + +// Component that allows partly importing Openshift resources into the devworkspace POD +type OpenshiftComponentParentOverride struct { + K8sLikeComponentParentOverride `json:",inline"` +} + +// Component that allows the developer to declare and configure a volume into their devworkspace +type VolumeComponentParentOverride struct { + BaseComponentParentOverride `json:",inline"` + VolumeParentOverride `json:",inline"` +} + +type PluginComponentParentOverride struct { + BaseComponentParentOverride `json:",inline"` + ImportReferenceParentOverride `json:",inline"` + PluginOverridesParentOverride `json:",inline"` +} + +// ProjectSourceType describes the type of Project sources. +// Only one of the following project sources may be specified. +// If none of the following policies is specified, the default one +// is AllowConcurrent. +type ProjectSourceTypeParentOverride string + +type GitProjectSourceParentOverride struct { + GitLikeProjectSourceParentOverride `json:",inline"` +} + +type ZipProjectSourceParentOverride struct { + CommonProjectSourceParentOverride `json:",inline"` + + // Zip project's source location address. Should be file path of the archive, e.g. file://$FILE_PATH + // +required + Location string `json:"location,omitempty"` +} + +// CommandType describes the type of command. +// Only one of the following command type may be specified. +type CommandTypeParentOverride string + +type ExecCommandParentOverride struct { + LabeledCommandParentOverride `json:",inline"` + + // +optional + // The actual command-line string + // + // Special variables that can be used: + // + // - `$PROJECTS_ROOT`: A path where projects sources are mounted as defined by container component's sourceMapping. + // + // - `$PROJECT_SOURCE`: A path to a project source ($PROJECTS_ROOT/). If there are multiple projects, this will point to the directory of the first one. + CommandLine string `json:"commandLine,omitempty"` + + // +optional + // Describes component to which given action relates + // + Component string `json:"component,omitempty"` + + // Working directory where the command should be executed + // + // Special variables that can be used: + // + // - `$PROJECTS_ROOT`: A path where projects sources are mounted as defined by container component's sourceMapping. + // + // - `$PROJECT_SOURCE`: A path to a project source ($PROJECTS_ROOT/). If there are multiple projects, this will point to the directory of the first one. + // +optional + WorkingDir string `json:"workingDir,omitempty"` + + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // Optional list of environment variables that have to be set + // before running the command + Env []EnvVarParentOverride `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + + // +optional + // Whether the command is capable to reload itself when source code changes. + // If set to `true` the command won't be restarted and it is expected to handle file changes on its own. + // + // Default value is `false` + HotReloadCapable bool `json:"hotReloadCapable,omitempty"` +} + +type ApplyCommandParentOverride struct { + LabeledCommandParentOverride `json:",inline"` + + // +optional + // Describes component that will be applied + // + Component string `json:"component,omitempty"` +} + +type CompositeCommandParentOverride struct { + LabeledCommandParentOverride `json:",inline"` + + // The commands that comprise this composite command + Commands []string `json:"commands,omitempty" patchStrategy:"replace"` + + // Indicates if the sub-commands should be executed concurrently + // +optional + Parallel bool `json:"parallel,omitempty"` +} + +// DevWorkspace component: Anything that will bring additional features / tooling / behaviour / context +// to the devworkspace, in order to make working in it easier. +type BaseComponentParentOverride struct { +} + +type ContainerParentOverride struct { + // +optional + Image string `json:"image,omitempty"` + + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // Environment variables used in this container. + // + // The following variables are reserved and cannot be overridden via env: + // + // - `$PROJECTS_ROOT` + // + // - `$PROJECT_SOURCE` + Env []EnvVarParentOverride `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + + // +optional + // List of volumes mounts that should be mounted is this container. + VolumeMounts []VolumeMountParentOverride `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + + // +optional + MemoryLimit string `json:"memoryLimit,omitempty"` + + // +optional + MemoryRequest string `json:"memoryRequest,omitempty"` + + // +optional + CpuLimit string `json:"cpuLimit,omitempty"` + + // +optional + CpuRequest string `json:"cpuRequest,omitempty"` + + // The command to run in the dockerimage component instead of the default one provided in the image. + // + // Defaults to an empty array, meaning use whatever is defined in the image. + // +optional + Command []string `json:"command,omitempty" patchStrategy:"replace"` + + // The arguments to supply to the command running the dockerimage component. The arguments are supplied either to the default command provided in the image or to the overridden command. + // + // Defaults to an empty array, meaning use whatever is defined in the image. + // +optional + Args []string `json:"args,omitempty" patchStrategy:"replace"` + + // Toggles whether or not the project source code should + // be mounted in the component. + // + // Defaults to true for all component types except plugins and components that set `dedicatedPod` to true. + // +optional + MountSources *bool `json:"mountSources,omitempty"` + + // Optional specification of the path in the container where + // project sources should be transferred/mounted when `mountSources` is `true`. + // When omitted, the default value of /projects is used. + // +optional + SourceMapping string `json:"sourceMapping,omitempty"` + + // Specify if a container should run in its own separated pod, + // instead of running as part of the main development environment pod. + // + // Default value is `false` + // +optional + DedicatedPod bool `json:"dedicatedPod,omitempty"` +} + +type EndpointParentOverride struct { + + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + // +kubebuilder:validation:MaxLength=63 + Name string `json:"name"` + + // +optional + TargetPort int `json:"targetPort,omitempty"` + + // Describes how the endpoint should be exposed on the network. + // + // - `public` means that the endpoint will be exposed on the public network, typically through + // a K8S ingress or an OpenShift route. + // + // - `internal` means that the endpoint will be exposed internally outside of the main devworkspace POD, + // typically by K8S services, to be consumed by other elements running + // on the same cloud internal network. + // + // - `none` means that the endpoint will not be exposed and will only be accessible + // inside the main devworkspace POD, on a local address. + // + // Default value is `public` + // +optional + Exposure EndpointExposureParentOverride `json:"exposure,omitempty"` + + // Describes the application and transport protocols of the traffic that will go through this endpoint. + // + // - `http`: Endpoint will have `http` traffic, typically on a TCP connection. + // It will be automaticaly promoted to `https` when the `secure` field is set to `true`. + // + // - `https`: Endpoint will have `https` traffic, typically on a TCP connection. + // + // - `ws`: Endpoint will have `ws` traffic, typically on a TCP connection. + // It will be automaticaly promoted to `wss` when the `secure` field is set to `true`. + // + // - `wss`: Endpoint will have `wss` traffic, typically on a TCP connection. + // + // - `tcp`: Endpoint will have traffic on a TCP connection, without specifying an application protocol. + // + // - `udp`: Endpoint will have traffic on an UDP connection, without specifying an application protocol. + // + // Default value is `http` + // +optional + Protocol EndpointProtocolParentOverride `json:"protocol,omitempty"` + + // Describes whether the endpoint should be secured and protected by some + // authentication process. This requires a protocol of `https` or `wss`. + // +optional + Secure bool `json:"secure,omitempty"` + + // Path of the endpoint URL + // +optional + Path string `json:"path,omitempty"` + + // Map of implementation-dependant string-based free-form attributes. + // + // Examples of Che-specific attributes: + // + // - cookiesAuthEnabled: "true" / "false", + // + // - type: "terminal" / "ide" / "ide-dev", + // +optional + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless + Attributes attributes.Attributes `json:"attributes,omitempty"` +} + +type K8sLikeComponentParentOverride struct { + BaseComponentParentOverride `json:",inline"` + K8sLikeComponentLocationParentOverride `json:",inline"` + Endpoints []EndpointParentOverride `json:"endpoints,omitempty" patchStrategy:"merge" patchMergeKey:"name"` +} + +// Volume that should be mounted to a component container +type VolumeParentOverride struct { + + // +optional + // Size of the volume + Size string `json:"size,omitempty"` + + // +optional + // Ephemeral volumes are not stored persistently across restarts. Defaults + // to false + Ephemeral bool `json:"ephemeral,omitempty"` +} + +type ImportReferenceParentOverride struct { + ImportReferenceUnionParentOverride `json:",inline"` + + // Registry URL to pull the parent devfile from when using id in the parent reference. + // To ensure the parent devfile gets resolved consistently in different environments, + // it is recommended to always specify the `regsitryURL` when `Id` is used. + // +optional + RegistryUrl string `json:"registryUrl,omitempty"` +} + +type PluginOverridesParentOverride struct { + OverridesBaseParentOverride `json:",inline"` + + // Overrides of components encapsulated in a parent devfile or a plugin. + // Overriding is done according to K8S strategic merge patch standard rules. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // +devfile:toplevellist + Components []ComponentPluginOverrideParentOverride `json:"components,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + + // Overrides of commands encapsulated in a parent devfile or a plugin. + // Overriding is done according to K8S strategic merge patch standard rules. + // +optional + // +patchMergeKey=id + // +patchStrategy=merge + // +devfile:toplevellist + Commands []CommandPluginOverrideParentOverride `json:"commands,omitempty" patchStrategy:"merge" patchMergeKey:"id"` +} + +type GitLikeProjectSourceParentOverride struct { + CommonProjectSourceParentOverride `json:",inline"` + + // Defines from what the project should be checked out. Required if there are more than one remote configured + // +optional + CheckoutFrom *CheckoutFromParentOverride `json:"checkoutFrom,omitempty"` + + // +optional + // The remotes map which should be initialized in the git project. Must have at least one remote configured + Remotes map[string]string `json:"remotes,omitempty"` +} + +type CommonProjectSourceParentOverride struct { +} + +type LabeledCommandParentOverride struct { + BaseCommandParentOverride `json:",inline"` + + // +optional + // Optional label that provides a label for this command + // to be used in Editor UI menus for example + Label string `json:"label,omitempty"` +} + +type EnvVarParentOverride struct { + Name string `json:"name" yaml:"name"` + // +optional + Value string `json:"value,omitempty" yaml:"value"` +} + +// Volume that should be mounted to a component container +type VolumeMountParentOverride struct { + + // The volume mount name is the name of an existing `Volume` component. + // If several containers mount the same volume name + // then they will reuse the same volume and will be able to access to the same files. + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + // +kubebuilder:validation:MaxLength=63 + Name string `json:"name"` + + // The path in the component container where the volume should be mounted. + // If not path is mentioned, default path is the is `/`. + // +optional + Path string `json:"path,omitempty"` +} + +// EndpointExposure describes the way an endpoint is exposed on the network. +// Only one of the following exposures may be specified: public, internal, none. +// +kubebuilder:validation:Enum=public;internal;none +type EndpointExposureParentOverride string + +// EndpointProtocol defines the application and transport protocols of the traffic that will go through this endpoint. +// Only one of the following protocols may be specified: http, ws, tcp, udp. +// +kubebuilder:validation:Enum=http;https;ws;wss;tcp;udp +type EndpointProtocolParentOverride string + +// +union +type K8sLikeComponentLocationParentOverride struct { + + // +kubebuilder:validation:Enum=Uri;Inlined + // Type of Kubernetes-like location + // + + // +unionDiscriminator + // +optional + LocationType K8sLikeComponentLocationTypeParentOverride `json:"locationType,omitempty"` + + // Location in a file fetched from a uri. + // +optional + Uri string `json:"uri,omitempty"` + + // Inlined manifest + // +optional + Inlined string `json:"inlined,omitempty"` +} + +// Location from where the an import reference is retrieved +// +union +type ImportReferenceUnionParentOverride struct { + + // +kubebuilder:validation:Enum=Uri;Id;Kubernetes + // type of location from where the referenced template structure should be retrieved + // + + // +unionDiscriminator + // +optional + ImportReferenceType ImportReferenceTypeParentOverride `json:"importReferenceType,omitempty"` + + // URI Reference of a parent devfile YAML file. + // It can be a full URL or a relative URI with the current devfile as the base URI. + // +optional + Uri string `json:"uri,omitempty"` + + // Id in a registry that contains a Devfile yaml file + // +optional + Id string `json:"id,omitempty"` + + // Reference to a Kubernetes CRD of type DevWorkspaceTemplate + // +optional + Kubernetes *KubernetesCustomResourceImportReferenceParentOverride `json:"kubernetes,omitempty"` +} + +// OverridesBase is used in the Overrides generator in order to provide a common base for the generated Overrides +// So please be careful when renaming +type OverridesBaseParentOverride struct{} + +//+k8s:openapi-gen=true +type ComponentPluginOverrideParentOverride struct { + + // Mandatory name that allows referencing the component + // from other elements (such as commands) or from an external + // devfile that may reference this component through a parent or a plugin. + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + // +kubebuilder:validation:MaxLength=63 + Name string `json:"name"` + + // Map of implementation-dependant free-form YAML attributes. + // +optional + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless + Attributes attributes.Attributes `json:"attributes,omitempty"` + ComponentUnionPluginOverrideParentOverride `json:",inline"` +} + +type CommandPluginOverrideParentOverride struct { + + // Mandatory identifier that allows referencing + // this command in composite commands, from + // a parent, or in events. + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + // +kubebuilder:validation:MaxLength=63 + Id string `json:"id"` + + // Map of implementation-dependant free-form YAML attributes. + // +optional + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless + Attributes attributes.Attributes `json:"attributes,omitempty"` + CommandUnionPluginOverrideParentOverride `json:",inline"` +} + +type CheckoutFromParentOverride struct { + + // The revision to checkout from. Should be branch name, tag or commit id. + // Default branch is used if missing or specified revision is not found. + // +optional + Revision string `json:"revision,omitempty"` + + // The remote name should be used as init. Required if there are more than one remote configured + // +optional + Remote string `json:"remote,omitempty"` +} + +type BaseCommandParentOverride struct { + + // +optional + // Defines the group this command is part of + Group *CommandGroupParentOverride `json:"group,omitempty"` +} + +// K8sLikeComponentLocationType describes the type of +// the location the configuration is fetched from. +// Only one of the following component type may be specified. +type K8sLikeComponentLocationTypeParentOverride string + +// ImportReferenceType describes the type of location +// from where the referenced template structure should be retrieved. +// Only one of the following parent locations may be specified. +type ImportReferenceTypeParentOverride string + +type KubernetesCustomResourceImportReferenceParentOverride struct { + // +optional + Name string `json:"name,omitempty"` + + // +optional + Namespace string `json:"namespace,omitempty"` +} + +// +union +type ComponentUnionPluginOverrideParentOverride struct { + + // +kubebuilder:validation:Enum=Container;Kubernetes;Openshift;Volume + // Type of component + // + // +unionDiscriminator + // +optional + ComponentType ComponentTypePluginOverrideParentOverride `json:"componentType,omitempty"` + + // Allows adding and configuring devworkspace-related containers + // +optional + Container *ContainerComponentPluginOverrideParentOverride `json:"container,omitempty"` + + // Allows importing into the devworkspace the Kubernetes resources + // defined in a given manifest. For example this allows reusing the Kubernetes + // definitions used to deploy some runtime components in production. + // + // +optional + Kubernetes *KubernetesComponentPluginOverrideParentOverride `json:"kubernetes,omitempty"` + + // Allows importing into the devworkspace the OpenShift resources + // defined in a given manifest. For example this allows reusing the OpenShift + // definitions used to deploy some runtime components in production. + // + // +optional + Openshift *OpenshiftComponentPluginOverrideParentOverride `json:"openshift,omitempty"` + + // Allows specifying the definition of a volume + // shared by several other components + // +optional + Volume *VolumeComponentPluginOverrideParentOverride `json:"volume,omitempty"` +} + +// +union +type CommandUnionPluginOverrideParentOverride struct { + + // +kubebuilder:validation:Enum=Exec;Apply;Composite + // Type of devworkspace command + // +unionDiscriminator + // +optional + CommandType CommandTypePluginOverrideParentOverride `json:"commandType,omitempty"` + + // CLI Command executed in an existing component container + // +optional + Exec *ExecCommandPluginOverrideParentOverride `json:"exec,omitempty"` + + // Command that consists in applying a given component definition, + // typically bound to a devworkspace event. + // + // For example, when an `apply` command is bound to a `preStart` event, + // and references a `container` component, it will start the container as a + // K8S initContainer in the devworkspace POD, unless the component has its + // `dedicatedPod` field set to `true`. + // + // When no `apply` command exist for a given component, + // it is assumed the component will be applied at devworkspace start + // by default. + // +optional + Apply *ApplyCommandPluginOverrideParentOverride `json:"apply,omitempty"` + + // Composite command that allows executing several sub-commands + // either sequentially or concurrently + // +optional + Composite *CompositeCommandPluginOverrideParentOverride `json:"composite,omitempty"` +} + +type CommandGroupParentOverride struct { + + // +optional + // Kind of group the command is part of + Kind CommandGroupKindParentOverride `json:"kind,omitempty"` + + // +optional + // Identifies the default command for a given group kind + IsDefault bool `json:"isDefault,omitempty"` +} + +// ComponentType describes the type of component. +// Only one of the following component type may be specified. +type ComponentTypePluginOverrideParentOverride string + +// Component that allows the developer to add a configured container into their devworkspace +type ContainerComponentPluginOverrideParentOverride struct { + BaseComponentPluginOverrideParentOverride `json:",inline"` + ContainerPluginOverrideParentOverride `json:",inline"` + Endpoints []EndpointPluginOverrideParentOverride `json:"endpoints,omitempty" patchStrategy:"merge" patchMergeKey:"name"` +} + +// Component that allows partly importing Kubernetes resources into the devworkspace POD +type KubernetesComponentPluginOverrideParentOverride struct { + K8sLikeComponentPluginOverrideParentOverride `json:",inline"` +} + +// Component that allows partly importing Openshift resources into the devworkspace POD +type OpenshiftComponentPluginOverrideParentOverride struct { + K8sLikeComponentPluginOverrideParentOverride `json:",inline"` +} + +// Component that allows the developer to declare and configure a volume into their devworkspace +type VolumeComponentPluginOverrideParentOverride struct { + BaseComponentPluginOverrideParentOverride `json:",inline"` + VolumePluginOverrideParentOverride `json:",inline"` +} + +// CommandType describes the type of command. +// Only one of the following command type may be specified. +type CommandTypePluginOverrideParentOverride string + +type ExecCommandPluginOverrideParentOverride struct { + LabeledCommandPluginOverrideParentOverride `json:",inline"` + + // +optional + // The actual command-line string + // + // Special variables that can be used: + // + // - `$PROJECTS_ROOT`: A path where projects sources are mounted as defined by container component's sourceMapping. + // + // - `$PROJECT_SOURCE`: A path to a project source ($PROJECTS_ROOT/). If there are multiple projects, this will point to the directory of the first one. + CommandLine string `json:"commandLine,omitempty"` + + // +optional + // Describes component to which given action relates + // + Component string `json:"component,omitempty"` + + // Working directory where the command should be executed + // + // Special variables that can be used: + // + // - `$PROJECTS_ROOT`: A path where projects sources are mounted as defined by container component's sourceMapping. + // + // - `$PROJECT_SOURCE`: A path to a project source ($PROJECTS_ROOT/). If there are multiple projects, this will point to the directory of the first one. + // +optional + WorkingDir string `json:"workingDir,omitempty"` + + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // Optional list of environment variables that have to be set + // before running the command + Env []EnvVarPluginOverrideParentOverride `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + + // +optional + // Whether the command is capable to reload itself when source code changes. + // If set to `true` the command won't be restarted and it is expected to handle file changes on its own. + // + // Default value is `false` + HotReloadCapable bool `json:"hotReloadCapable,omitempty"` +} + +type ApplyCommandPluginOverrideParentOverride struct { + LabeledCommandPluginOverrideParentOverride `json:",inline"` + + // +optional + // Describes component that will be applied + // + Component string `json:"component,omitempty"` +} + +type CompositeCommandPluginOverrideParentOverride struct { + LabeledCommandPluginOverrideParentOverride `json:",inline"` + + // The commands that comprise this composite command + Commands []string `json:"commands,omitempty" patchStrategy:"replace"` + + // Indicates if the sub-commands should be executed concurrently + // +optional + Parallel bool `json:"parallel,omitempty"` +} + +// CommandGroupKind describes the kind of command group. +// +kubebuilder:validation:Enum=build;run;test;debug +type CommandGroupKindParentOverride string + +// DevWorkspace component: Anything that will bring additional features / tooling / behaviour / context +// to the devworkspace, in order to make working in it easier. +type BaseComponentPluginOverrideParentOverride struct { +} + +type ContainerPluginOverrideParentOverride struct { + + // +optional + Image string `json:"image,omitempty"` + + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // Environment variables used in this container. + // + // The following variables are reserved and cannot be overridden via env: + // + // - `$PROJECTS_ROOT` + // + // - `$PROJECT_SOURCE` + Env []EnvVarPluginOverrideParentOverride `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + + // +optional + // List of volumes mounts that should be mounted is this container. + VolumeMounts []VolumeMountPluginOverrideParentOverride `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + + // +optional + MemoryLimit string `json:"memoryLimit,omitempty"` + + // +optional + MemoryRequest string `json:"memoryRequest,omitempty"` + + // +optional + CpuLimit string `json:"cpuLimit,omitempty"` + + // +optional + CpuRequest string `json:"cpuRequest,omitempty"` + + // The command to run in the dockerimage component instead of the default one provided in the image. + // + // Defaults to an empty array, meaning use whatever is defined in the image. + // +optional + Command []string `json:"command,omitempty" patchStrategy:"replace"` + + // The arguments to supply to the command running the dockerimage component. The arguments are supplied either to the default command provided in the image or to the overridden command. + // + // Defaults to an empty array, meaning use whatever is defined in the image. + // +optional + Args []string `json:"args,omitempty" patchStrategy:"replace"` + + // Toggles whether or not the project source code should + // be mounted in the component. + // + // Defaults to true for all component types except plugins and components that set `dedicatedPod` to true. + // +optional + MountSources *bool `json:"mountSources,omitempty"` + + // Optional specification of the path in the container where + // project sources should be transferred/mounted when `mountSources` is `true`. + // When omitted, the default value of /projects is used. + // +optional + SourceMapping string `json:"sourceMapping,omitempty"` + + // Specify if a container should run in its own separated pod, + // instead of running as part of the main development environment pod. + // + // Default value is `false` + // +optional + DedicatedPod bool `json:"dedicatedPod,omitempty"` +} + +type EndpointPluginOverrideParentOverride struct { + + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + // +kubebuilder:validation:MaxLength=63 + Name string `json:"name"` + + // +optional + TargetPort int `json:"targetPort,omitempty"` + + // Describes how the endpoint should be exposed on the network. + // + // - `public` means that the endpoint will be exposed on the public network, typically through + // a K8S ingress or an OpenShift route. + // + // - `internal` means that the endpoint will be exposed internally outside of the main devworkspace POD, + // typically by K8S services, to be consumed by other elements running + // on the same cloud internal network. + // + // - `none` means that the endpoint will not be exposed and will only be accessible + // inside the main devworkspace POD, on a local address. + // + // Default value is `public` + // +optional + Exposure EndpointExposurePluginOverrideParentOverride `json:"exposure,omitempty"` + + // Describes the application and transport protocols of the traffic that will go through this endpoint. + // + // - `http`: Endpoint will have `http` traffic, typically on a TCP connection. + // It will be automaticaly promoted to `https` when the `secure` field is set to `true`. + // + // - `https`: Endpoint will have `https` traffic, typically on a TCP connection. + // + // - `ws`: Endpoint will have `ws` traffic, typically on a TCP connection. + // It will be automaticaly promoted to `wss` when the `secure` field is set to `true`. + // + // - `wss`: Endpoint will have `wss` traffic, typically on a TCP connection. + // + // - `tcp`: Endpoint will have traffic on a TCP connection, without specifying an application protocol. + // + // - `udp`: Endpoint will have traffic on an UDP connection, without specifying an application protocol. + // + // Default value is `http` + // +optional + Protocol EndpointProtocolPluginOverrideParentOverride `json:"protocol,omitempty"` + + // Describes whether the endpoint should be secured and protected by some + // authentication process. This requires a protocol of `https` or `wss`. + // +optional + Secure bool `json:"secure,omitempty"` + + // Path of the endpoint URL + // +optional + Path string `json:"path,omitempty"` + + // Map of implementation-dependant string-based free-form attributes. + // + // Examples of Che-specific attributes: + // + // - cookiesAuthEnabled: "true" / "false", + // + // - type: "terminal" / "ide" / "ide-dev", + // +optional + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless + Attributes attributes.Attributes `json:"attributes,omitempty"` +} + +type K8sLikeComponentPluginOverrideParentOverride struct { + BaseComponentPluginOverrideParentOverride `json:",inline"` + K8sLikeComponentLocationPluginOverrideParentOverride `json:",inline"` + Endpoints []EndpointPluginOverrideParentOverride `json:"endpoints,omitempty" patchStrategy:"merge" patchMergeKey:"name"` +} + +// Volume that should be mounted to a component container +type VolumePluginOverrideParentOverride struct { + + // +optional + // Size of the volume + Size string `json:"size,omitempty"` + + // +optional + // Ephemeral volumes are not stored persistently across restarts. Defaults + // to false + Ephemeral bool `json:"ephemeral,omitempty"` +} + +type LabeledCommandPluginOverrideParentOverride struct { + BaseCommandPluginOverrideParentOverride `json:",inline"` + + // +optional + // Optional label that provides a label for this command + // to be used in Editor UI menus for example + Label string `json:"label,omitempty"` +} + +type EnvVarPluginOverrideParentOverride struct { + Name string `json:"name" yaml:"name"` + + // +optional + Value string `json:"value,omitempty" yaml:"value"` +} + +// Volume that should be mounted to a component container +type VolumeMountPluginOverrideParentOverride struct { + + // The volume mount name is the name of an existing `Volume` component. + // If several containers mount the same volume name + // then they will reuse the same volume and will be able to access to the same files. + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + // +kubebuilder:validation:MaxLength=63 + Name string `json:"name"` + + // The path in the component container where the volume should be mounted. + // If not path is mentioned, default path is the is `/`. + // +optional + Path string `json:"path,omitempty"` +} + +// EndpointExposure describes the way an endpoint is exposed on the network. +// Only one of the following exposures may be specified: public, internal, none. +// +kubebuilder:validation:Enum=public;internal;none +type EndpointExposurePluginOverrideParentOverride string + +// EndpointProtocol defines the application and transport protocols of the traffic that will go through this endpoint. +// Only one of the following protocols may be specified: http, ws, tcp, udp. +// +kubebuilder:validation:Enum=http;https;ws;wss;tcp;udp +type EndpointProtocolPluginOverrideParentOverride string + +// +union +type K8sLikeComponentLocationPluginOverrideParentOverride struct { + + // +kubebuilder:validation:Enum=Uri;Inlined + // Type of Kubernetes-like location + // + + // +unionDiscriminator + // +optional + LocationType K8sLikeComponentLocationTypePluginOverrideParentOverride `json:"locationType,omitempty"` + + // Location in a file fetched from a uri. + // +optional + Uri string `json:"uri,omitempty"` + + // Inlined manifest + // +optional + Inlined string `json:"inlined,omitempty"` +} + +type BaseCommandPluginOverrideParentOverride struct { + + // +optional + // Defines the group this command is part of + Group *CommandGroupPluginOverrideParentOverride `json:"group,omitempty"` +} + +// K8sLikeComponentLocationType describes the type of +// the location the configuration is fetched from. +// Only one of the following component type may be specified. +type K8sLikeComponentLocationTypePluginOverrideParentOverride string + +type CommandGroupPluginOverrideParentOverride struct { + + // +optional + // Kind of group the command is part of + Kind CommandGroupKindPluginOverrideParentOverride `json:"kind,omitempty"` + + // +optional + // Identifies the default command for a given group kind + IsDefault bool `json:"isDefault,omitempty"` +} + +// CommandGroupKind describes the kind of command group. +// +kubebuilder:validation:Enum=build;run;test;debug +type CommandGroupKindPluginOverrideParentOverride string + +func (overrides ParentOverrides) isOverride() {} diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.plugin_overrides.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.plugin_overrides.go new file mode 100644 index 000000000..00efd4512 --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.plugin_overrides.go @@ -0,0 +1,470 @@ +package v1alpha2 + +import ( + attributes "github.com/devfile/api/v2/pkg/attributes" +) + +// +devfile:jsonschema:generate +type PluginOverrides struct { + OverridesBase `json:",inline"` + + // Overrides of components encapsulated in a parent devfile or a plugin. + // Overriding is done according to K8S strategic merge patch standard rules. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // +devfile:toplevellist + Components []ComponentPluginOverride `json:"components,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + + // Overrides of commands encapsulated in a parent devfile or a plugin. + // Overriding is done according to K8S strategic merge patch standard rules. + // +optional + // +patchMergeKey=id + // +patchStrategy=merge + // +devfile:toplevellist + Commands []CommandPluginOverride `json:"commands,omitempty" patchStrategy:"merge" patchMergeKey:"id"` +} + +//+k8s:openapi-gen=true +type ComponentPluginOverride struct { + + // Mandatory name that allows referencing the component + // from other elements (such as commands) or from an external + // devfile that may reference this component through a parent or a plugin. + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + // +kubebuilder:validation:MaxLength=63 + Name string `json:"name"` + + // Map of implementation-dependant free-form YAML attributes. + // +optional + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless + Attributes attributes.Attributes `json:"attributes,omitempty"` + ComponentUnionPluginOverride `json:",inline"` +} + +type CommandPluginOverride struct { + + // Mandatory identifier that allows referencing + // this command in composite commands, from + // a parent, or in events. + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + // +kubebuilder:validation:MaxLength=63 + Id string `json:"id"` + + // Map of implementation-dependant free-form YAML attributes. + // +optional + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless + Attributes attributes.Attributes `json:"attributes,omitempty"` + CommandUnionPluginOverride `json:",inline"` +} + +// +union +type ComponentUnionPluginOverride struct { + + // +kubebuilder:validation:Enum=Container;Kubernetes;Openshift;Volume + // Type of component + // + // +unionDiscriminator + // +optional + ComponentType ComponentTypePluginOverride `json:"componentType,omitempty"` + + // Allows adding and configuring devworkspace-related containers + // +optional + Container *ContainerComponentPluginOverride `json:"container,omitempty"` + + // Allows importing into the devworkspace the Kubernetes resources + // defined in a given manifest. For example this allows reusing the Kubernetes + // definitions used to deploy some runtime components in production. + // + // +optional + Kubernetes *KubernetesComponentPluginOverride `json:"kubernetes,omitempty"` + + // Allows importing into the devworkspace the OpenShift resources + // defined in a given manifest. For example this allows reusing the OpenShift + // definitions used to deploy some runtime components in production. + // + // +optional + Openshift *OpenshiftComponentPluginOverride `json:"openshift,omitempty"` + + // Allows specifying the definition of a volume + // shared by several other components + // +optional + Volume *VolumeComponentPluginOverride `json:"volume,omitempty"` +} + +// +union +type CommandUnionPluginOverride struct { + + // +kubebuilder:validation:Enum=Exec;Apply;Composite + // Type of devworkspace command + // +unionDiscriminator + // +optional + CommandType CommandTypePluginOverride `json:"commandType,omitempty"` + + // CLI Command executed in an existing component container + // +optional + Exec *ExecCommandPluginOverride `json:"exec,omitempty"` + + // Command that consists in applying a given component definition, + // typically bound to a devworkspace event. + // + // For example, when an `apply` command is bound to a `preStart` event, + // and references a `container` component, it will start the container as a + // K8S initContainer in the devworkspace POD, unless the component has its + // `dedicatedPod` field set to `true`. + // + // When no `apply` command exist for a given component, + // it is assumed the component will be applied at devworkspace start + // by default. + // +optional + Apply *ApplyCommandPluginOverride `json:"apply,omitempty"` + + // Composite command that allows executing several sub-commands + // either sequentially or concurrently + // +optional + Composite *CompositeCommandPluginOverride `json:"composite,omitempty"` +} + +// ComponentType describes the type of component. +// Only one of the following component type may be specified. +type ComponentTypePluginOverride string + +// Component that allows the developer to add a configured container into their devworkspace +type ContainerComponentPluginOverride struct { + BaseComponentPluginOverride `json:",inline"` + ContainerPluginOverride `json:",inline"` + Endpoints []EndpointPluginOverride `json:"endpoints,omitempty" patchStrategy:"merge" patchMergeKey:"name"` +} + +// Component that allows partly importing Kubernetes resources into the devworkspace POD +type KubernetesComponentPluginOverride struct { + K8sLikeComponentPluginOverride `json:",inline"` +} + +// Component that allows partly importing Openshift resources into the devworkspace POD +type OpenshiftComponentPluginOverride struct { + K8sLikeComponentPluginOverride `json:",inline"` +} + +// Component that allows the developer to declare and configure a volume into their devworkspace +type VolumeComponentPluginOverride struct { + BaseComponentPluginOverride `json:",inline"` + VolumePluginOverride `json:",inline"` +} + +// CommandType describes the type of command. +// Only one of the following command type may be specified. +type CommandTypePluginOverride string + +type ExecCommandPluginOverride struct { + LabeledCommandPluginOverride `json:",inline"` + + // +optional + // The actual command-line string + // + // Special variables that can be used: + // + // - `$PROJECTS_ROOT`: A path where projects sources are mounted as defined by container component's sourceMapping. + // + // - `$PROJECT_SOURCE`: A path to a project source ($PROJECTS_ROOT/). If there are multiple projects, this will point to the directory of the first one. + CommandLine string `json:"commandLine,omitempty"` + + // +optional + // Describes component to which given action relates + // + Component string `json:"component,omitempty"` + + // Working directory where the command should be executed + // + // Special variables that can be used: + // + // - `$PROJECTS_ROOT`: A path where projects sources are mounted as defined by container component's sourceMapping. + // + // - `$PROJECT_SOURCE`: A path to a project source ($PROJECTS_ROOT/). If there are multiple projects, this will point to the directory of the first one. + // +optional + WorkingDir string `json:"workingDir,omitempty"` + + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // Optional list of environment variables that have to be set + // before running the command + Env []EnvVarPluginOverride `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + + // +optional + // Whether the command is capable to reload itself when source code changes. + // If set to `true` the command won't be restarted and it is expected to handle file changes on its own. + // + // Default value is `false` + HotReloadCapable bool `json:"hotReloadCapable,omitempty"` +} + +type ApplyCommandPluginOverride struct { + LabeledCommandPluginOverride `json:",inline"` + + // +optional + // Describes component that will be applied + // + Component string `json:"component,omitempty"` +} + +type CompositeCommandPluginOverride struct { + LabeledCommandPluginOverride `json:",inline"` + + // The commands that comprise this composite command + Commands []string `json:"commands,omitempty" patchStrategy:"replace"` + + // Indicates if the sub-commands should be executed concurrently + // +optional + Parallel bool `json:"parallel,omitempty"` +} + +// DevWorkspace component: Anything that will bring additional features / tooling / behaviour / context +// to the devworkspace, in order to make working in it easier. +type BaseComponentPluginOverride struct { +} + +type ContainerPluginOverride struct { + // +optional + Image string `json:"image,omitempty"` + + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // Environment variables used in this container. + // + // The following variables are reserved and cannot be overridden via env: + // + // - `$PROJECTS_ROOT` + // + // - `$PROJECT_SOURCE` + Env []EnvVarPluginOverride `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + + // +optional + // List of volumes mounts that should be mounted is this container. + VolumeMounts []VolumeMountPluginOverride `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + + // +optional + MemoryLimit string `json:"memoryLimit,omitempty"` + + // +optional + MemoryRequest string `json:"memoryRequest,omitempty"` + + // +optional + CpuLimit string `json:"cpuLimit,omitempty"` + + // +optional + CpuRequest string `json:"cpuRequest,omitempty"` + + // The command to run in the dockerimage component instead of the default one provided in the image. + // + // Defaults to an empty array, meaning use whatever is defined in the image. + // +optional + Command []string `json:"command,omitempty" patchStrategy:"replace"` + + // The arguments to supply to the command running the dockerimage component. The arguments are supplied either to the default command provided in the image or to the overridden command. + // + // Defaults to an empty array, meaning use whatever is defined in the image. + // +optional + Args []string `json:"args,omitempty" patchStrategy:"replace"` + + // Toggles whether or not the project source code should + // be mounted in the component. + // + // Defaults to true for all component types except plugins and components that set `dedicatedPod` to true. + // +optional + MountSources *bool `json:"mountSources,omitempty"` + + // Optional specification of the path in the container where + // project sources should be transferred/mounted when `mountSources` is `true`. + // When omitted, the default value of /projects is used. + // +optional + SourceMapping string `json:"sourceMapping,omitempty"` + + // Specify if a container should run in its own separated pod, + // instead of running as part of the main development environment pod. + // + // Default value is `false` + // +optional + DedicatedPod bool `json:"dedicatedPod,omitempty"` +} + +type EndpointPluginOverride struct { + + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + // +kubebuilder:validation:MaxLength=63 + Name string `json:"name"` + + // +optional + TargetPort int `json:"targetPort,omitempty"` + + // Describes how the endpoint should be exposed on the network. + // + // - `public` means that the endpoint will be exposed on the public network, typically through + // a K8S ingress or an OpenShift route. + // + // - `internal` means that the endpoint will be exposed internally outside of the main devworkspace POD, + // typically by K8S services, to be consumed by other elements running + // on the same cloud internal network. + // + // - `none` means that the endpoint will not be exposed and will only be accessible + // inside the main devworkspace POD, on a local address. + // + // Default value is `public` + // +optional + Exposure EndpointExposurePluginOverride `json:"exposure,omitempty"` + + // Describes the application and transport protocols of the traffic that will go through this endpoint. + // + // - `http`: Endpoint will have `http` traffic, typically on a TCP connection. + // It will be automaticaly promoted to `https` when the `secure` field is set to `true`. + // + // - `https`: Endpoint will have `https` traffic, typically on a TCP connection. + // + // - `ws`: Endpoint will have `ws` traffic, typically on a TCP connection. + // It will be automaticaly promoted to `wss` when the `secure` field is set to `true`. + // + // - `wss`: Endpoint will have `wss` traffic, typically on a TCP connection. + // + // - `tcp`: Endpoint will have traffic on a TCP connection, without specifying an application protocol. + // + // - `udp`: Endpoint will have traffic on an UDP connection, without specifying an application protocol. + // + // Default value is `http` + // +optional + Protocol EndpointProtocolPluginOverride `json:"protocol,omitempty"` + + // Describes whether the endpoint should be secured and protected by some + // authentication process. This requires a protocol of `https` or `wss`. + // +optional + Secure bool `json:"secure,omitempty"` + + // Path of the endpoint URL + // +optional + Path string `json:"path,omitempty"` + + // Map of implementation-dependant string-based free-form attributes. + // + // Examples of Che-specific attributes: + // + // - cookiesAuthEnabled: "true" / "false", + // + // - type: "terminal" / "ide" / "ide-dev", + // +optional + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless + Attributes attributes.Attributes `json:"attributes,omitempty"` +} + +type K8sLikeComponentPluginOverride struct { + BaseComponentPluginOverride `json:",inline"` + K8sLikeComponentLocationPluginOverride `json:",inline"` + Endpoints []EndpointPluginOverride `json:"endpoints,omitempty" patchStrategy:"merge" patchMergeKey:"name"` +} + +// Volume that should be mounted to a component container +type VolumePluginOverride struct { + + // +optional + // Size of the volume + Size string `json:"size,omitempty"` + + // +optional + // Ephemeral volumes are not stored persistently across restarts. Defaults + // to false + Ephemeral bool `json:"ephemeral,omitempty"` +} + +type LabeledCommandPluginOverride struct { + BaseCommandPluginOverride `json:",inline"` + + // +optional + // Optional label that provides a label for this command + // to be used in Editor UI menus for example + Label string `json:"label,omitempty"` +} + +type EnvVarPluginOverride struct { + Name string `json:"name" yaml:"name"` + // +optional + Value string `json:"value,omitempty" yaml:"value"` +} + +// Volume that should be mounted to a component container +type VolumeMountPluginOverride struct { + + // The volume mount name is the name of an existing `Volume` component. + // If several containers mount the same volume name + // then they will reuse the same volume and will be able to access to the same files. + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + // +kubebuilder:validation:MaxLength=63 + Name string `json:"name"` + + // The path in the component container where the volume should be mounted. + // If not path is mentioned, default path is the is `/`. + // +optional + Path string `json:"path,omitempty"` +} + +// EndpointExposure describes the way an endpoint is exposed on the network. +// Only one of the following exposures may be specified: public, internal, none. +// +kubebuilder:validation:Enum=public;internal;none +type EndpointExposurePluginOverride string + +// EndpointProtocol defines the application and transport protocols of the traffic that will go through this endpoint. +// Only one of the following protocols may be specified: http, ws, tcp, udp. +// +kubebuilder:validation:Enum=http;https;ws;wss;tcp;udp +type EndpointProtocolPluginOverride string + +// +union +type K8sLikeComponentLocationPluginOverride struct { + + // +kubebuilder:validation:Enum=Uri;Inlined + // Type of Kubernetes-like location + // + + // +unionDiscriminator + // +optional + LocationType K8sLikeComponentLocationTypePluginOverride `json:"locationType,omitempty"` + + // Location in a file fetched from a uri. + // +optional + Uri string `json:"uri,omitempty"` + + // Inlined manifest + // +optional + Inlined string `json:"inlined,omitempty"` +} + +type BaseCommandPluginOverride struct { + + // +optional + // Defines the group this command is part of + Group *CommandGroupPluginOverride `json:"group,omitempty"` +} + +// K8sLikeComponentLocationType describes the type of +// the location the configuration is fetched from. +// Only one of the following component type may be specified. +type K8sLikeComponentLocationTypePluginOverride string + +type CommandGroupPluginOverride struct { + + // +optional + // Kind of group the command is part of + Kind CommandGroupKindPluginOverride `json:"kind,omitempty"` + + // +optional + // Identifies the default command for a given group kind + IsDefault bool `json:"isDefault,omitempty"` +} + +// CommandGroupKind describes the kind of command group. +// +kubebuilder:validation:Enum=build;run;test;debug +type CommandGroupKindPluginOverride string + +func (overrides PluginOverrides) isOverride() {} diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.toplevellistcontainer_definitions.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.toplevellistcontainer_definitions.go new file mode 100644 index 000000000..45be709ff --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.toplevellistcontainer_definitions.go @@ -0,0 +1,33 @@ +package v1alpha2 + +func (container DevWorkspaceTemplateSpecContent) GetToplevelLists() TopLevelLists { + return TopLevelLists{ + "Components": extractKeys(container.Components), + "Projects": extractKeys(container.Projects), + "StarterProjects": extractKeys(container.StarterProjects), + "Commands": extractKeys(container.Commands), + } +} + +func (container ParentOverrides) GetToplevelLists() TopLevelLists { + return TopLevelLists{ + "Components": extractKeys(container.Components), + "Projects": extractKeys(container.Projects), + "StarterProjects": extractKeys(container.StarterProjects), + "Commands": extractKeys(container.Commands), + } +} + +func (container PluginOverridesParentOverride) GetToplevelLists() TopLevelLists { + return TopLevelLists{ + "Components": extractKeys(container.Components), + "Commands": extractKeys(container.Commands), + } +} + +func (container PluginOverrides) GetToplevelLists() TopLevelLists { + return TopLevelLists{ + "Components": extractKeys(container.Components), + "Commands": extractKeys(container.Commands), + } +} diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.union_definitions.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.union_definitions.go new file mode 100644 index 000000000..50116cb1b --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.union_definitions.go @@ -0,0 +1,360 @@ +package v1alpha2 + +import ( + "reflect" +) + +var commandUnion reflect.Type = reflect.TypeOf(CommandUnionVisitor{}) + +func (union CommandUnion) Visit(visitor CommandUnionVisitor) error { + return visitUnion(union, visitor) +} +func (union *CommandUnion) discriminator() *string { + return (*string)(&union.CommandType) +} +func (union *CommandUnion) Normalize() error { + return normalizeUnion(union, commandUnion) +} +func (union *CommandUnion) Simplify() { + simplifyUnion(union, commandUnion) +} + +// +k8s:deepcopy-gen=false +type CommandUnionVisitor struct { + Exec func(*ExecCommand) error + Apply func(*ApplyCommand) error + Composite func(*CompositeCommand) error + Custom func(*CustomCommand) error +} + +var k8sLikeComponentLocation reflect.Type = reflect.TypeOf(K8sLikeComponentLocationVisitor{}) + +func (union K8sLikeComponentLocation) Visit(visitor K8sLikeComponentLocationVisitor) error { + return visitUnion(union, visitor) +} +func (union *K8sLikeComponentLocation) discriminator() *string { + return (*string)(&union.LocationType) +} +func (union *K8sLikeComponentLocation) Normalize() error { + return normalizeUnion(union, k8sLikeComponentLocation) +} +func (union *K8sLikeComponentLocation) Simplify() { + simplifyUnion(union, k8sLikeComponentLocation) +} + +// +k8s:deepcopy-gen=false +type K8sLikeComponentLocationVisitor struct { + Uri func(string) error + Inlined func(string) error +} + +var componentUnion reflect.Type = reflect.TypeOf(ComponentUnionVisitor{}) + +func (union ComponentUnion) Visit(visitor ComponentUnionVisitor) error { + return visitUnion(union, visitor) +} +func (union *ComponentUnion) discriminator() *string { + return (*string)(&union.ComponentType) +} +func (union *ComponentUnion) Normalize() error { + return normalizeUnion(union, componentUnion) +} +func (union *ComponentUnion) Simplify() { + simplifyUnion(union, componentUnion) +} + +// +k8s:deepcopy-gen=false +type ComponentUnionVisitor struct { + Container func(*ContainerComponent) error + Kubernetes func(*KubernetesComponent) error + Openshift func(*OpenshiftComponent) error + Volume func(*VolumeComponent) error + Plugin func(*PluginComponent) error + Custom func(*CustomComponent) error +} + +var importReferenceUnion reflect.Type = reflect.TypeOf(ImportReferenceUnionVisitor{}) + +func (union ImportReferenceUnion) Visit(visitor ImportReferenceUnionVisitor) error { + return visitUnion(union, visitor) +} +func (union *ImportReferenceUnion) discriminator() *string { + return (*string)(&union.ImportReferenceType) +} +func (union *ImportReferenceUnion) Normalize() error { + return normalizeUnion(union, importReferenceUnion) +} +func (union *ImportReferenceUnion) Simplify() { + simplifyUnion(union, importReferenceUnion) +} + +// +k8s:deepcopy-gen=false +type ImportReferenceUnionVisitor struct { + Uri func(string) error + Id func(string) error + Kubernetes func(*KubernetesCustomResourceImportReference) error +} + +var projectSource reflect.Type = reflect.TypeOf(ProjectSourceVisitor{}) + +func (union ProjectSource) Visit(visitor ProjectSourceVisitor) error { + return visitUnion(union, visitor) +} +func (union *ProjectSource) discriminator() *string { + return (*string)(&union.SourceType) +} +func (union *ProjectSource) Normalize() error { + return normalizeUnion(union, projectSource) +} +func (union *ProjectSource) Simplify() { + simplifyUnion(union, projectSource) +} + +// +k8s:deepcopy-gen=false +type ProjectSourceVisitor struct { + Git func(*GitProjectSource) error + Zip func(*ZipProjectSource) error + Custom func(*CustomProjectSource) error +} + +var componentUnionParentOverride reflect.Type = reflect.TypeOf(ComponentUnionParentOverrideVisitor{}) + +func (union ComponentUnionParentOverride) Visit(visitor ComponentUnionParentOverrideVisitor) error { + return visitUnion(union, visitor) +} +func (union *ComponentUnionParentOverride) discriminator() *string { + return (*string)(&union.ComponentType) +} +func (union *ComponentUnionParentOverride) Normalize() error { + return normalizeUnion(union, componentUnionParentOverride) +} +func (union *ComponentUnionParentOverride) Simplify() { + simplifyUnion(union, componentUnionParentOverride) +} + +// +k8s:deepcopy-gen=false +type ComponentUnionParentOverrideVisitor struct { + Container func(*ContainerComponentParentOverride) error + Kubernetes func(*KubernetesComponentParentOverride) error + Openshift func(*OpenshiftComponentParentOverride) error + Volume func(*VolumeComponentParentOverride) error + Plugin func(*PluginComponentParentOverride) error +} + +var projectSourceParentOverride reflect.Type = reflect.TypeOf(ProjectSourceParentOverrideVisitor{}) + +func (union ProjectSourceParentOverride) Visit(visitor ProjectSourceParentOverrideVisitor) error { + return visitUnion(union, visitor) +} +func (union *ProjectSourceParentOverride) discriminator() *string { + return (*string)(&union.SourceType) +} +func (union *ProjectSourceParentOverride) Normalize() error { + return normalizeUnion(union, projectSourceParentOverride) +} +func (union *ProjectSourceParentOverride) Simplify() { + simplifyUnion(union, projectSourceParentOverride) +} + +// +k8s:deepcopy-gen=false +type ProjectSourceParentOverrideVisitor struct { + Git func(*GitProjectSourceParentOverride) error + Zip func(*ZipProjectSourceParentOverride) error +} + +var commandUnionParentOverride reflect.Type = reflect.TypeOf(CommandUnionParentOverrideVisitor{}) + +func (union CommandUnionParentOverride) Visit(visitor CommandUnionParentOverrideVisitor) error { + return visitUnion(union, visitor) +} +func (union *CommandUnionParentOverride) discriminator() *string { + return (*string)(&union.CommandType) +} +func (union *CommandUnionParentOverride) Normalize() error { + return normalizeUnion(union, commandUnionParentOverride) +} +func (union *CommandUnionParentOverride) Simplify() { + simplifyUnion(union, commandUnionParentOverride) +} + +// +k8s:deepcopy-gen=false +type CommandUnionParentOverrideVisitor struct { + Exec func(*ExecCommandParentOverride) error + Apply func(*ApplyCommandParentOverride) error + Composite func(*CompositeCommandParentOverride) error +} + +var k8sLikeComponentLocationParentOverride reflect.Type = reflect.TypeOf(K8sLikeComponentLocationParentOverrideVisitor{}) + +func (union K8sLikeComponentLocationParentOverride) Visit(visitor K8sLikeComponentLocationParentOverrideVisitor) error { + return visitUnion(union, visitor) +} +func (union *K8sLikeComponentLocationParentOverride) discriminator() *string { + return (*string)(&union.LocationType) +} +func (union *K8sLikeComponentLocationParentOverride) Normalize() error { + return normalizeUnion(union, k8sLikeComponentLocationParentOverride) +} +func (union *K8sLikeComponentLocationParentOverride) Simplify() { + simplifyUnion(union, k8sLikeComponentLocationParentOverride) +} + +// +k8s:deepcopy-gen=false +type K8sLikeComponentLocationParentOverrideVisitor struct { + Uri func(string) error + Inlined func(string) error +} + +var importReferenceUnionParentOverride reflect.Type = reflect.TypeOf(ImportReferenceUnionParentOverrideVisitor{}) + +func (union ImportReferenceUnionParentOverride) Visit(visitor ImportReferenceUnionParentOverrideVisitor) error { + return visitUnion(union, visitor) +} +func (union *ImportReferenceUnionParentOverride) discriminator() *string { + return (*string)(&union.ImportReferenceType) +} +func (union *ImportReferenceUnionParentOverride) Normalize() error { + return normalizeUnion(union, importReferenceUnionParentOverride) +} +func (union *ImportReferenceUnionParentOverride) Simplify() { + simplifyUnion(union, importReferenceUnionParentOverride) +} + +// +k8s:deepcopy-gen=false +type ImportReferenceUnionParentOverrideVisitor struct { + Uri func(string) error + Id func(string) error + Kubernetes func(*KubernetesCustomResourceImportReferenceParentOverride) error +} + +var componentUnionPluginOverrideParentOverride reflect.Type = reflect.TypeOf(ComponentUnionPluginOverrideParentOverrideVisitor{}) + +func (union ComponentUnionPluginOverrideParentOverride) Visit(visitor ComponentUnionPluginOverrideParentOverrideVisitor) error { + return visitUnion(union, visitor) +} +func (union *ComponentUnionPluginOverrideParentOverride) discriminator() *string { + return (*string)(&union.ComponentType) +} +func (union *ComponentUnionPluginOverrideParentOverride) Normalize() error { + return normalizeUnion(union, componentUnionPluginOverrideParentOverride) +} +func (union *ComponentUnionPluginOverrideParentOverride) Simplify() { + simplifyUnion(union, componentUnionPluginOverrideParentOverride) +} + +// +k8s:deepcopy-gen=false +type ComponentUnionPluginOverrideParentOverrideVisitor struct { + Container func(*ContainerComponentPluginOverrideParentOverride) error + Kubernetes func(*KubernetesComponentPluginOverrideParentOverride) error + Openshift func(*OpenshiftComponentPluginOverrideParentOverride) error + Volume func(*VolumeComponentPluginOverrideParentOverride) error +} + +var commandUnionPluginOverrideParentOverride reflect.Type = reflect.TypeOf(CommandUnionPluginOverrideParentOverrideVisitor{}) + +func (union CommandUnionPluginOverrideParentOverride) Visit(visitor CommandUnionPluginOverrideParentOverrideVisitor) error { + return visitUnion(union, visitor) +} +func (union *CommandUnionPluginOverrideParentOverride) discriminator() *string { + return (*string)(&union.CommandType) +} +func (union *CommandUnionPluginOverrideParentOverride) Normalize() error { + return normalizeUnion(union, commandUnionPluginOverrideParentOverride) +} +func (union *CommandUnionPluginOverrideParentOverride) Simplify() { + simplifyUnion(union, commandUnionPluginOverrideParentOverride) +} + +// +k8s:deepcopy-gen=false +type CommandUnionPluginOverrideParentOverrideVisitor struct { + Exec func(*ExecCommandPluginOverrideParentOverride) error + Apply func(*ApplyCommandPluginOverrideParentOverride) error + Composite func(*CompositeCommandPluginOverrideParentOverride) error +} + +var k8sLikeComponentLocationPluginOverrideParentOverride reflect.Type = reflect.TypeOf(K8sLikeComponentLocationPluginOverrideParentOverrideVisitor{}) + +func (union K8sLikeComponentLocationPluginOverrideParentOverride) Visit(visitor K8sLikeComponentLocationPluginOverrideParentOverrideVisitor) error { + return visitUnion(union, visitor) +} +func (union *K8sLikeComponentLocationPluginOverrideParentOverride) discriminator() *string { + return (*string)(&union.LocationType) +} +func (union *K8sLikeComponentLocationPluginOverrideParentOverride) Normalize() error { + return normalizeUnion(union, k8sLikeComponentLocationPluginOverrideParentOverride) +} +func (union *K8sLikeComponentLocationPluginOverrideParentOverride) Simplify() { + simplifyUnion(union, k8sLikeComponentLocationPluginOverrideParentOverride) +} + +// +k8s:deepcopy-gen=false +type K8sLikeComponentLocationPluginOverrideParentOverrideVisitor struct { + Uri func(string) error + Inlined func(string) error +} + +var componentUnionPluginOverride reflect.Type = reflect.TypeOf(ComponentUnionPluginOverrideVisitor{}) + +func (union ComponentUnionPluginOverride) Visit(visitor ComponentUnionPluginOverrideVisitor) error { + return visitUnion(union, visitor) +} +func (union *ComponentUnionPluginOverride) discriminator() *string { + return (*string)(&union.ComponentType) +} +func (union *ComponentUnionPluginOverride) Normalize() error { + return normalizeUnion(union, componentUnionPluginOverride) +} +func (union *ComponentUnionPluginOverride) Simplify() { + simplifyUnion(union, componentUnionPluginOverride) +} + +// +k8s:deepcopy-gen=false +type ComponentUnionPluginOverrideVisitor struct { + Container func(*ContainerComponentPluginOverride) error + Kubernetes func(*KubernetesComponentPluginOverride) error + Openshift func(*OpenshiftComponentPluginOverride) error + Volume func(*VolumeComponentPluginOverride) error +} + +var commandUnionPluginOverride reflect.Type = reflect.TypeOf(CommandUnionPluginOverrideVisitor{}) + +func (union CommandUnionPluginOverride) Visit(visitor CommandUnionPluginOverrideVisitor) error { + return visitUnion(union, visitor) +} +func (union *CommandUnionPluginOverride) discriminator() *string { + return (*string)(&union.CommandType) +} +func (union *CommandUnionPluginOverride) Normalize() error { + return normalizeUnion(union, commandUnionPluginOverride) +} +func (union *CommandUnionPluginOverride) Simplify() { + simplifyUnion(union, commandUnionPluginOverride) +} + +// +k8s:deepcopy-gen=false +type CommandUnionPluginOverrideVisitor struct { + Exec func(*ExecCommandPluginOverride) error + Apply func(*ApplyCommandPluginOverride) error + Composite func(*CompositeCommandPluginOverride) error +} + +var k8sLikeComponentLocationPluginOverride reflect.Type = reflect.TypeOf(K8sLikeComponentLocationPluginOverrideVisitor{}) + +func (union K8sLikeComponentLocationPluginOverride) Visit(visitor K8sLikeComponentLocationPluginOverrideVisitor) error { + return visitUnion(union, visitor) +} +func (union *K8sLikeComponentLocationPluginOverride) discriminator() *string { + return (*string)(&union.LocationType) +} +func (union *K8sLikeComponentLocationPluginOverride) Normalize() error { + return normalizeUnion(union, k8sLikeComponentLocationPluginOverride) +} +func (union *K8sLikeComponentLocationPluginOverride) Simplify() { + simplifyUnion(union, k8sLikeComponentLocationPluginOverride) +} + +// +k8s:deepcopy-gen=false +type K8sLikeComponentLocationPluginOverrideVisitor struct { + Uri func(string) error + Inlined func(string) error +} diff --git a/vendor/github.com/devfile/api/v2/pkg/attributes/attributes.go b/vendor/github.com/devfile/api/v2/pkg/attributes/attributes.go new file mode 100644 index 000000000..43c77c4e6 --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/attributes/attributes.go @@ -0,0 +1,453 @@ +package attributes + +import ( + "encoding/json" + "strconv" + + apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" +) + +// Attributes provides a way to add a map of arbitrary YAML/JSON +// objects. +// +kubebuilder:validation:Type=object +// +kubebuilder:validation:XPreserveUnknownFields +type Attributes map[string]apiext.JSON + +// MarshalJSON implements custom JSON marshaling +// to support free-form attributes +func (attributes Attributes) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]apiext.JSON(attributes)) +} + +// UnmarshalJSON implements custom JSON unmarshalling +// to support free-form attributes +func (attributes *Attributes) UnmarshalJSON(data []byte) error { + return json.Unmarshal(data, (*map[string]apiext.JSON)(attributes)) +} + +// Exists returns `true` if the attribute with the given key +// exists in the attributes map. +func (attributes Attributes) Exists(key string) bool { + _, exists := attributes[key] + return exists +} + +type convertPrimitiveFunc func(attributes Attributes, key string, attributeType string) (interface{}, error) + +func (attributes Attributes) getPrimitive(key string, zeroValue interface{}, resultType string, convert convertPrimitiveFunc, errorHolder *error) interface{} { + var err error + if attribute, exists := attributes[key]; exists { + var result interface{} + switch resultType { + case "string": + primitiveResult := new(string) + err = json.Unmarshal(attribute.Raw, primitiveResult) + result = *primitiveResult + case "boolean": + primitiveResult := new(bool) + err = json.Unmarshal(attribute.Raw, primitiveResult) + result = *primitiveResult + case "number": + primitiveResult := new(float64) + err = json.Unmarshal(attribute.Raw, primitiveResult) + result = *primitiveResult + } + if err == nil { + return result + } + + switch typeError := err.(type) { + case *json.UnmarshalTypeError: + convertedValue, retryError := convert(attributes, key, typeError.Value) + if retryError == nil && convertedValue != nil { + return convertedValue + } + } + } else { + err = &KeyNotFoundError{Key: key} + } + if errorHolder != nil { + *errorHolder = err + } + return zeroValue +} + +// GetString allows returning the attribute with the given key +// as a string. If the attribute JSON/YAML content is +// not a JSON string (or a primitive type that can be converted into a string), +// then the result will be the empty string and an error will be raised. +// +// An optional error holder can be passed as an argument +// to receive any error that might have be raised during the attribute +// decoding +func (attributes Attributes) GetString(key string, errorHolder *error) string { + return attributes.getPrimitive( + key, + "", + "string", + func(attributes Attributes, key string, attributeType string) (interface{}, error) { + var convertedValue interface{} + var retryError error + switch attributeType { + case "bool": + convertedValue = strconv.FormatBool(attributes.GetBoolean(key, &retryError)) + case "number": + convertedValue = strconv.FormatFloat(attributes.GetNumber(key, &retryError), 'g', -1, 64) + } + return convertedValue, retryError + }, + errorHolder).(string) +} + +// GetNumber allows returning the attribute with the given key +// as a float64. If the attribute JSON/YAML content is +// not a JSON number (or a JSON string that can be converted into a JSON number), +// then the result will be the zero value and an error is raised. +// +// An optional error holder can be passed as an argument +// to receive any error that might have be raised during the attribute +// decoding +func (attributes Attributes) GetNumber(key string, errorHolder *error) float64 { + return attributes.getPrimitive( + key, + 0.0, + "number", + func(attributes Attributes, key string, attributeType string) (interface{}, error) { + var convertedValue interface{} + var retryError error + switch attributeType { + case "string": + var convError error + convertedValue, convError = strconv.ParseFloat(attributes.GetString(key, &retryError), 64) + if retryError == nil { + retryError = convError + } + } + return convertedValue, retryError + }, + errorHolder).(float64) +} + +// GetBoolean allows returning the attribute with the given key +// as a bool. If the attribute JSON/YAML content is +// not a JSON boolean (or a JSON string that can be converted into a JSON boolean), +// then the result will be the `false` zero value and an error is raised. +// +// String values can be converted to boolean values according to the following rules: +// +// - strings "1", "t", "T", "TRUE", "true", and "True" will be converted to a `true` boolean +// +// - strings "0, "f", "F", "FALSE", "false", "False" will be converted to a `false` boolean +// +// - any other string value will raise an error. +// +// An optional error holder can be passed as an argument +// to receive any error that might have be raised during the attribute +// decoding +func (attributes Attributes) GetBoolean(key string, errorHolder *error) bool { + return attributes.getPrimitive( + key, + false, + "boolean", + func(attributes Attributes, key string, attributeType string) (interface{}, error) { + var convertedValue interface{} + var retryError error + switch attributeType { + case "string": + var convError error + convertedValue, convError = strconv.ParseBool(attributes.GetString(key, &retryError)) + if retryError == nil { + retryError = convError + } + } + return convertedValue, retryError + }, + errorHolder).(bool) +} + +// Get allows returning the attribute with the given key +// as an interface. The underlying type of the returned interface +// depends on the JSON/YAML content of the attribute. It can be either a simple type +// like a string, a float64 or a bool, either a structured type like +// a map of interfaces or an array of interfaces. +// +// An optional error holder can be passed as an argument +// to receive any error that might have occurred during the attribute +// decoding +func (attributes Attributes) Get(key string, errorHolder *error) interface{} { + if attribute, exists := attributes[key]; exists { + container := &[]interface{}{} + err := json.Unmarshal([]byte("[ "+string(attribute.Raw)+" ]"), container) + if err != nil && errorHolder != nil { + *errorHolder = err + } + if len(*container) > 0 { + return (*container)[0] + } + } else if !exists && errorHolder != nil { + *errorHolder = &KeyNotFoundError{Key: key} + } + return nil +} + +// GetInto allows decoding the attribute with the given key +// into a given interface. The provided interface should be a pointer +// to a struct, to an array, or to any simple type. +// +// An error is returned if the provided interface type is not compatible +// with the attribute content +func (attributes Attributes) GetInto(key string, into interface{}) error { + var err error + if attribute, exists := attributes[key]; exists { + err = json.Unmarshal(attribute.Raw, into) + } else { + err = &KeyNotFoundError{Key: key} + } + return err +} + +// Strings allows returning only the attributes whose content +// is a JSON string. +// +// An optional error holder can be passed as an argument +// to receive any error that might have be raised during the attribute +// decoding +func (attributes Attributes) Strings(errorHolder *error) map[string]string { + result := map[string]string{} + for key := range attributes { + // Here only the last error is returned. + // Let's keep it simple and avoid adding a dependency + // on an external package just for gathering errors. + if value, isRightType := attributes.Get(key, errorHolder).(string); isRightType { + result[key] = value + } + } + return result +} + +// Numbers allows returning only the attributes whose content +// is a JSON number. +// +// An optional error holder can be passed as an argument +// to receive any error that might have be raised during the attribute +// decoding +func (attributes Attributes) Numbers(errorHolder *error) map[string]float64 { + result := map[string]float64{} + for key := range attributes { + // Here only the last error is returned. + // Let's keep it simple and avoid adding a dependency + // on an external package just for gathering errors. + if value, isRightType := attributes.Get(key, errorHolder).(float64); isRightType { + result[key] = value + } + } + return result +} + +// Booleans allows returning only the attributes whose content +// is a JSON boolean. +// +// An optional error holder can be passed as an argument +// to receive any error that might have be raised during the attribute +// decoding +func (attributes Attributes) Booleans(errorHolder *error) map[string]bool { + result := map[string]bool{} + for key := range attributes { + // Here only the last error is returned. + // Let's keep it simple and avoid adding a dependency + // on an external package just for gathering errors + if value, isRightType := attributes.Get(key, errorHolder).(bool); isRightType { + result[key] = value + } + } + return result +} + +// Into allows decoding the whole attributes map +// into a given interface. The provided interface should be either a pointer +// to a struct, or to a map. +// +// An error is returned if the provided interface type is not compatible +// with the structure of the attributes +func (attributes Attributes) Into(into interface{}) error { + if attributes == nil { + return nil + } + + rawJSON, err := json.Marshal(attributes) + if err != nil { + return err + } + + err = json.Unmarshal(rawJSON, into) + return err +} + +// AsInterface allows returning the whole attributes map... +// as an interface. When the attributes are not empty, +// the returned interface will be a map +// of interfaces. +// +// An optional error holder can be passed as an argument +// to receive any error that might have occured during the attributes +// decoding +func (attributes Attributes) AsInterface(errorHolder *error) interface{} { + rawJSON, err := json.Marshal(attributes) + if err != nil && errorHolder != nil { + *errorHolder = err + return nil + } + + container := &[]interface{}{} + err = json.Unmarshal([]byte("[ "+string(rawJSON)+" ]"), container) + if err != nil && errorHolder != nil { + *errorHolder = err + return nil + } + + return (*container)[0] +} + +// PutString allows adding a string attribute to the +// current map of attributes +func (attributes Attributes) PutString(key string, value string) Attributes { + rawJSON, _ := json.Marshal(value) + attributes[key] = apiext.JSON{ + Raw: rawJSON, + } + return attributes +} + +// FromStringMap allows adding into the current map of attributes all +// the attributes contained in the given string map +func (attributes Attributes) FromStringMap(strings map[string]string) Attributes { + for key, value := range strings { + attributes.PutString(key, value) + } + return attributes +} + +// PutFloat allows adding a float attribute to the +// current map of attributes +func (attributes Attributes) PutFloat(key string, value float64) Attributes { + rawJSON, _ := json.Marshal(value) + attributes[key] = apiext.JSON{ + Raw: rawJSON, + } + return attributes +} + +// FromFloatMap allows adding into the current map of attributes all +// the attributes contained in the given map of floats +func (attributes Attributes) FromFloatMap(strings map[string]float64) Attributes { + for key, value := range strings { + attributes.PutFloat(key, value) + } + return attributes +} + +// PutInteger allows adding an integer attribute to the +// current map of attributes +func (attributes Attributes) PutInteger(key string, value int) Attributes { + rawJSON, _ := json.Marshal(value) + attributes[key] = apiext.JSON{ + Raw: rawJSON, + } + return attributes +} + +// FromIntegerMap allows adding into the current map of attributes all +// the attributes contained in the given map of integers +func (attributes Attributes) FromIntegerMap(strings map[string]int) Attributes { + for key, value := range strings { + rawJSON, _ := json.Marshal(value) + attributes[key] = apiext.JSON{ + Raw: rawJSON, + } + } + return attributes +} + +// PutBoolean allows adding a boolean attribute to the +// current map of attributes +func (attributes Attributes) PutBoolean(key string, value bool) Attributes { + rawJSON, _ := json.Marshal(value) + attributes[key] = apiext.JSON{ + Raw: rawJSON, + } + return attributes +} + +// FromBooleanMap allows adding into the current map of attributes all +// the attributes contained in the given map of booleans +func (attributes Attributes) FromBooleanMap(strings map[string]bool) Attributes { + for key, value := range strings { + rawJSON, _ := json.Marshal(value) + attributes[key] = apiext.JSON{ + Raw: rawJSON, + } + } + return attributes +} + +// Put allows adding an attribute to the +// current map of attributes. +// The attribute is provided as an interface, and can be any value +// that supports Json Marshaling. +// +// An optional error holder can be passed as an argument +// to receive any error that might have occured during the attributes +// decoding +func (attributes Attributes) Put(key string, value interface{}, errorHolder *error) Attributes { + rawJSON, err := json.Marshal(value) + if err != nil && errorHolder != nil { + *errorHolder = err + } + + attributes[key] = apiext.JSON{ + Raw: rawJSON, + } + return attributes +} + +// FromMap allows adding into the current map of attributes all +// the attributes contained in the given map of interfaces +// each attribute of the given map is provided as an interface, and can be any value +// that supports Json Marshaling. +// +// An optional error holder can be passed as an argument +// to receive any error that might have occured during the attributes +// decoding +func (attributes Attributes) FromMap(strings map[string]interface{}, errorHolder *error) Attributes { + for key, value := range strings { + // Here only the last error is returned. + // Let's keep it simple and avoid adding a dependency + // on an external package just for gathering errors. + attributes.Put(key, value, errorHolder) + } + return attributes +} + +// FromInterface allows completing the map of attributes from the given interface. +// The given interface, and can be any value +// that supports Json Marshaling and will be marshalled as a JSON object. +// +// This is especially useful to create attributes from well-known, but +// implementation- dependent Go structures. +// +// An optional error holder can be passed as an argument +// to receive any error that might have occured during the attributes +// decoding +func (attributes Attributes) FromInterface(structure interface{}, errorHolder *error) Attributes { + newAttributes := Attributes{} + completeJSON, err := json.Marshal(structure) + if err != nil && errorHolder != nil { + *errorHolder = err + } + + err = json.Unmarshal(completeJSON, &newAttributes) + for key, value := range newAttributes { + attributes[key] = value + } + return attributes +} diff --git a/vendor/github.com/devfile/api/v2/pkg/attributes/errors.go b/vendor/github.com/devfile/api/v2/pkg/attributes/errors.go new file mode 100644 index 000000000..3c7552416 --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/attributes/errors.go @@ -0,0 +1,12 @@ +package attributes + +import "fmt" + +// KeyNotFoundError returns an error if no key is found for the attribute +type KeyNotFoundError struct { + Key string +} + +func (e *KeyNotFoundError) Error() string { + return fmt.Sprintf("Attribute with key %q does not exist", e.Key) +} diff --git a/vendor/github.com/devfile/api/v2/pkg/devfile/header.go b/vendor/github.com/devfile/api/v2/pkg/devfile/header.go new file mode 100644 index 000000000..21f8e8c87 --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/devfile/header.go @@ -0,0 +1,84 @@ +package devfile + +import ( + attributes "github.com/devfile/api/v2/pkg/attributes" +) + +// DevfileHeader describes the structure of the devfile-specific top-level fields +// that are not part of the K8S API structures +type DevfileHeader struct { + // Devfile schema version + // +kubebuilder:validation:Pattern=^([2-9])\.([0-9]+)\.([0-9]+)(\-[0-9a-z-]+(\.[0-9a-z-]+)*)?(\+[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?$ + SchemaVersion string `json:"schemaVersion"` + + // +kubebuilder:pruning:PreserveUnknownFields + // +optional + // Optional metadata + Metadata DevfileMetadata `json:"metadata,omitempty"` +} + +// Architecture describes the architecture type +// +kubebuilder:validation:Enum=amd64;arm64;ppc64le;s390x +type Architecture string + +const ( + AMD64 Architecture = "amd64" + ARM64 Architecture = "arm64" + PPC64LE Architecture = "ppc64le" + S390X Architecture = "s390x" +) + +type DevfileMetadata struct { + // Optional devfile name + // +optional + Name string `json:"name,omitempty"` + + // Optional semver-compatible version + // +optional + // +kubebuilder:validation:Pattern=^([0-9]+)\.([0-9]+)\.([0-9]+)(\-[0-9a-z-]+(\.[0-9a-z-]+)*)?(\+[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?$ + Version string `json:"version,omitempty"` + + // Map of implementation-dependant free-form YAML attributes. Deprecated, use the top-level attributes field instead. + // +optional + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless + Attributes attributes.Attributes `json:"attributes,omitempty"` + + // Optional devfile display name + // +optional + DisplayName string `json:"displayName,omitempty"` + + // Optional devfile description + // +optional + Description string `json:"description,omitempty"` + + // Optional devfile tags + // +optional + Tags []string `json:"tags,omitempty"` + + // Optional list of processor architectures that the devfile supports, empty list suggests that the devfile can be used on any architecture + // +optional + // +kubebuilder:validation:UniqueItems=true + Architectures []Architecture `json:"architectures,omitempty"` + + // Optional devfile icon, can be a URI or a relative path in the project + // +optional + Icon string `json:"icon,omitempty"` + + // Optional devfile global memory limit + // +optional + GlobalMemoryLimit string `json:"globalMemoryLimit,omitempty"` + + // Optional devfile project type + // +optional + ProjectType string `json:"projectType,omitempty"` + + // Optional devfile language + // +optional + Language string `json:"language,omitempty"` + + // Optional devfile website + // +optional + Website string `json:"website,omitempty"` +} diff --git a/vendor/github.com/devfile/devworkspace-operator/LICENSE b/vendor/github.com/devfile/devworkspace-operator/LICENSE new file mode 100644 index 000000000..e55f34467 --- /dev/null +++ b/vendor/github.com/devfile/devworkspace-operator/LICENSE @@ -0,0 +1,277 @@ +Eclipse Public License - v 2.0 + + THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE + PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION + OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. + +1. DEFINITIONS + +"Contribution" means: + + a) in the case of the initial Contributor, the initial content + Distributed under this Agreement, and + + b) in the case of each subsequent Contributor: + i) changes to the Program, and + ii) additions to the Program; + where such changes and/or additions to the Program originate from + and are Distributed by that particular Contributor. A Contribution + "originates" from a Contributor if it was added to the Program by + such Contributor itself or anyone acting on such Contributor's behalf. + Contributions do not include changes or additions to the Program that + are not Modified Works. + +"Contributor" means any person or entity that Distributes the Program. + +"Licensed Patents" mean patent claims licensable by a Contributor which +are necessarily infringed by the use or sale of its Contribution alone +or when combined with the Program. + +"Program" means the Contributions Distributed in accordance with this +Agreement. + +"Recipient" means anyone who receives the Program under this Agreement +or any Secondary License (as applicable), including Contributors. + +"Derivative Works" shall mean any work, whether in Source Code or other +form, that is based on (or derived from) the Program and for which the +editorial revisions, annotations, elaborations, or other modifications +represent, as a whole, an original work of authorship. + +"Modified Works" shall mean any work in Source Code or other form that +results from an addition to, deletion from, or modification of the +contents of the Program, including, for purposes of clarity any new file +in Source Code form that contains any contents of the Program. Modified +Works shall not include works that contain only declarations, +interfaces, types, classes, structures, or files of the Program solely +in each case in order to link to, bind by name, or subclass the Program +or Modified Works thereof. + +"Distribute" means the acts of a) distributing or b) making available +in any manner that enables the transfer of a copy. + +"Source Code" means the form of a Program preferred for making +modifications, including but not limited to software source code, +documentation source, and configuration files. + +"Secondary License" means either the GNU General Public License, +Version 2.0, or any later versions of that license, including any +exceptions or additional permissions as identified by the initial +Contributor. + +2. GRANT OF RIGHTS + + a) Subject to the terms of this Agreement, each Contributor hereby + grants Recipient a non-exclusive, worldwide, royalty-free copyright + license to reproduce, prepare Derivative Works of, publicly display, + publicly perform, Distribute and sublicense the Contribution of such + Contributor, if any, and such Derivative Works. + + b) Subject to the terms of this Agreement, each Contributor hereby + grants Recipient a non-exclusive, worldwide, royalty-free patent + license under Licensed Patents to make, use, sell, offer to sell, + import and otherwise transfer the Contribution of such Contributor, + if any, in Source Code or other form. This patent license shall + apply to the combination of the Contribution and the Program if, at + the time the Contribution is added by the Contributor, such addition + of the Contribution causes such combination to be covered by the + Licensed Patents. The patent license shall not apply to any other + combinations which include the Contribution. No hardware per se is + licensed hereunder. + + c) Recipient understands that although each Contributor grants the + licenses to its Contributions set forth herein, no assurances are + provided by any Contributor that the Program does not infringe the + patent or other intellectual property rights of any other entity. + Each Contributor disclaims any liability to Recipient for claims + brought by any other entity based on infringement of intellectual + property rights or otherwise. As a condition to exercising the + rights and licenses granted hereunder, each Recipient hereby + assumes sole responsibility to secure any other intellectual + property rights needed, if any. For example, if a third party + patent license is required to allow Recipient to Distribute the + Program, it is Recipient's responsibility to acquire that license + before distributing the Program. + + d) Each Contributor represents that to its knowledge it has + sufficient copyright rights in its Contribution, if any, to grant + the copyright license set forth in this Agreement. + + e) Notwithstanding the terms of any Secondary License, no + Contributor makes additional grants to any Recipient (other than + those set forth in this Agreement) as a result of such Recipient's + receipt of the Program under the terms of a Secondary License + (if permitted under the terms of Section 3). + +3. REQUIREMENTS + +3.1 If a Contributor Distributes the Program in any form, then: + + a) the Program must also be made available as Source Code, in + accordance with section 3.2, and the Contributor must accompany + the Program with a statement that the Source Code for the Program + is available under this Agreement, and informs Recipients how to + obtain it in a reasonable manner on or through a medium customarily + used for software exchange; and + + b) the Contributor may Distribute the Program under a license + different than this Agreement, provided that such license: + i) effectively disclaims on behalf of all other Contributors all + warranties and conditions, express and implied, including + warranties or conditions of title and non-infringement, and + implied warranties or conditions of merchantability and fitness + for a particular purpose; + + ii) effectively excludes on behalf of all other Contributors all + liability for damages, including direct, indirect, special, + incidental and consequential damages, such as lost profits; + + iii) does not attempt to limit or alter the recipients' rights + in the Source Code under section 3.2; and + + iv) requires any subsequent distribution of the Program by any + party to be under a license that satisfies the requirements + of this section 3. + +3.2 When the Program is Distributed as Source Code: + + a) it must be made available under this Agreement, or if the + Program (i) is combined with other material in a separate file or + files made available under a Secondary License, and (ii) the initial + Contributor attached to the Source Code the notice described in + Exhibit A of this Agreement, then the Program may be made available + under the terms of such Secondary Licenses, and + + b) a copy of this Agreement must be included with each copy of + the Program. + +3.3 Contributors may not remove or alter any copyright, patent, +trademark, attribution notices, disclaimers of warranty, or limitations +of liability ("notices") contained within the Program from any copy of +the Program which they Distribute, provided that Contributors may add +their own appropriate notices. + +4. COMMERCIAL DISTRIBUTION + +Commercial distributors of software may accept certain responsibilities +with respect to end users, business partners and the like. While this +license is intended to facilitate the commercial use of the Program, +the Contributor who includes the Program in a commercial product +offering should do so in a manner which does not create potential +liability for other Contributors. Therefore, if a Contributor includes +the Program in a commercial product offering, such Contributor +("Commercial Contributor") hereby agrees to defend and indemnify every +other Contributor ("Indemnified Contributor") against any losses, +damages and costs (collectively "Losses") arising from claims, lawsuits +and other legal actions brought by a third party against the Indemnified +Contributor to the extent caused by the acts or omissions of such +Commercial Contributor in connection with its distribution of the Program +in a commercial product offering. The obligations in this section do not +apply to any claims or Losses relating to any actual or alleged +intellectual property infringement. In order to qualify, an Indemnified +Contributor must: a) promptly notify the Commercial Contributor in +writing of such claim, and b) allow the Commercial Contributor to control, +and cooperate with the Commercial Contributor in, the defense and any +related settlement negotiations. The Indemnified Contributor may +participate in any such claim at its own expense. + +For example, a Contributor might include the Program in a commercial +product offering, Product X. That Contributor is then a Commercial +Contributor. If that Commercial Contributor then makes performance +claims, or offers warranties related to Product X, those performance +claims and warranties are such Commercial Contributor's responsibility +alone. Under this section, the Commercial Contributor would have to +defend claims against the other Contributors related to those performance +claims and warranties, and if a court requires any other Contributor to +pay any damages as a result, the Commercial Contributor must pay +those damages. + +5. NO WARRANTY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT +PERMITTED BY APPLICABLE LAW, THE PROGRAM IS PROVIDED ON AN "AS IS" +BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR +IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF +TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR +PURPOSE. Each Recipient is solely responsible for determining the +appropriateness of using and distributing the Program and assumes all +risks associated with its exercise of rights under this Agreement, +including but not limited to the risks and costs of program errors, +compliance with applicable laws, damage to or loss of data, programs +or equipment, and unavailability or interruption of operations. + +6. DISCLAIMER OF LIABILITY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT +PERMITTED BY APPLICABLE LAW, NEITHER RECIPIENT NOR ANY CONTRIBUTORS +SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST +PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE +EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + +7. GENERAL + +If any provision of this Agreement is invalid or unenforceable under +applicable law, it shall not affect the validity or enforceability of +the remainder of the terms of this Agreement, and without further +action by the parties hereto, such provision shall be reformed to the +minimum extent necessary to make such provision valid and enforceable. + +If Recipient institutes patent litigation against any entity +(including a cross-claim or counterclaim in a lawsuit) alleging that the +Program itself (excluding combinations of the Program with other software +or hardware) infringes such Recipient's patent(s), then such Recipient's +rights granted under Section 2(b) shall terminate as of the date such +litigation is filed. + +All Recipient's rights under this Agreement shall terminate if it +fails to comply with any of the material terms or conditions of this +Agreement and does not cure such failure in a reasonable period of +time after becoming aware of such noncompliance. If all Recipient's +rights under this Agreement terminate, Recipient agrees to cease use +and distribution of the Program as soon as reasonably practicable. +However, Recipient's obligations under this Agreement and any licenses +granted by Recipient relating to the Program shall continue and survive. + +Everyone is permitted to copy and distribute copies of this Agreement, +but in order to avoid inconsistency the Agreement is copyrighted and +may only be modified in the following manner. The Agreement Steward +reserves the right to publish new versions (including revisions) of +this Agreement from time to time. No one other than the Agreement +Steward has the right to modify this Agreement. The Eclipse Foundation +is the initial Agreement Steward. The Eclipse Foundation may assign the +responsibility to serve as the Agreement Steward to a suitable separate +entity. Each new version of the Agreement will be given a distinguishing +version number. The Program (including Contributions) may always be +Distributed subject to the version of the Agreement under which it was +received. In addition, after a new version of the Agreement is published, +Contributor may elect to Distribute the Program (including its +Contributions) under the new version. + +Except as expressly stated in Sections 2(a) and 2(b) above, Recipient +receives no rights or licenses to the intellectual property of any +Contributor under this Agreement, whether expressly, by implication, +estoppel or otherwise. All rights in the Program not expressly granted +under this Agreement are reserved. Nothing in this Agreement is intended +to be enforceable by any entity that is not a Contributor or Recipient. +No third-party beneficiary rights are created under this Agreement. + +Exhibit A - Form of Secondary Licenses Notice + +"This Source Code may also be made available under the following +Secondary Licenses when the conditions for such availability set forth +in the Eclipse Public License, v. 2.0 are satisfied: {name license(s), +version(s), and exceptions or additional permissions here}." + + Simply including a copy of this Agreement, including this Exhibit A + is not sufficient to license the Source Code under Secondary Licenses. + + If it is not possible or desirable to put the notice in a particular + file, then You may include the notice in a location (such as a LICENSE + file in a relevant directory) where a recipient would be likely to + look for such a notice. + + You may add additional accurate notices of copyright ownership. \ No newline at end of file diff --git a/vendor/github.com/devfile/devworkspace-operator/apis/controller/v1alpha1/common.go b/vendor/github.com/devfile/devworkspace-operator/apis/controller/v1alpha1/common.go new file mode 100644 index 000000000..4977dcf03 --- /dev/null +++ b/vendor/github.com/devfile/devworkspace-operator/apis/controller/v1alpha1/common.go @@ -0,0 +1,59 @@ +// +// Copyright (c) 2019-2021 Red Hat, Inc. +// This program and the accompanying materials are made +// available under the terms of the Eclipse Public License 2.0 +// which is available at https://www.eclipse.org/legal/epl-2.0/ +// +// SPDX-License-Identifier: EPL-2.0 +// +// Contributors: +// Red Hat, Inc. - initial API and implementation +// + +package v1alpha1 + +import v1 "k8s.io/api/core/v1" + +// Summary of additions that are to be merged into the main devworkspace deployment +type PodAdditions struct { + // Annotations to be applied to devworkspace deployment + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Annotations map[string]string `json:"annotations,omitempty"` + // Labels to be applied to devworkspace deployment + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Labels map[string]string `json:"labels,omitempty"` + // Containers to add to devworkspace deployment + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Containers []v1.Container `json:"containers,omitempty"` + // Init containers to add to devworkspace deployment + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + InitContainers []v1.Container `json:"initContainers,omitempty"` + // Volumes to add to devworkspace deployment + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Volumes []v1.Volume `json:"volumes,omitempty"` + // VolumeMounts to add to all containers in a devworkspace deployment + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + VolumeMounts []v1.VolumeMount `json:"volumeMounts,omitempty"` + // ImagePullSecrets to add to devworkspace deployment + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + PullSecrets []v1.LocalObjectReference `json:"pullSecrets,omitempty"` + // Annotations for the devworkspace service account, it might be used for e.g. OpenShift oauth with SA as auth client + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + ServiceAccountAnnotations map[string]string `json:"serviceAccountAnnotations,omitempty"` +} diff --git a/vendor/github.com/devfile/devworkspace-operator/apis/controller/v1alpha1/devfile.go b/vendor/github.com/devfile/devworkspace-operator/apis/controller/v1alpha1/devfile.go new file mode 100644 index 000000000..0724e9200 --- /dev/null +++ b/vendor/github.com/devfile/devworkspace-operator/apis/controller/v1alpha1/devfile.go @@ -0,0 +1,31 @@ +// +// Copyright (c) 2019-2021 Red Hat, Inc. +// This program and the accompanying materials are made +// available under the terms of the Eclipse Public License 2.0 +// which is available at https://www.eclipse.org/legal/epl-2.0/ +// +// SPDX-License-Identifier: EPL-2.0 +// +// Contributors: +// Red Hat, Inc. - initial API and implementation +// + +package v1alpha1 + +type EndpointAttribute string +type EndpointType string + +const ( + // TypeEndpointAttribute is an attribute used for devfile endpoints that specifies the endpoint type. + // See EndpointType for respected values + TypeEndpointAttribute EndpointAttribute = "type" + + // The value for `type` endpoint attribute that indicates that it should be exposed as mainUrl + // in the workspace status + MainEndpointType EndpointType = "main" + + // DiscoverableAttribute defines an endpoint as "discoverable", meaning that a service should be + // created using the endpoint name (i.e. instead of generating a service name for all endpoints, + // this endpoint should be statically accessible) + DiscoverableAttribute EndpointAttribute = "discoverable" +) diff --git a/vendor/github.com/devfile/devworkspace-operator/apis/controller/v1alpha1/devworkspacerouting_types.go b/vendor/github.com/devfile/devworkspace-operator/apis/controller/v1alpha1/devworkspacerouting_types.go new file mode 100644 index 000000000..b88057e84 --- /dev/null +++ b/vendor/github.com/devfile/devworkspace-operator/apis/controller/v1alpha1/devworkspacerouting_types.go @@ -0,0 +1,107 @@ +// +// Copyright (c) 2019-2021 Red Hat, Inc. +// This program and the accompanying materials are made +// available under the terms of the Eclipse Public License 2.0 +// which is available at https://www.eclipse.org/legal/epl-2.0/ +// +// SPDX-License-Identifier: EPL-2.0 +// +// Contributors: +// Red Hat, Inc. - initial API and implementation +// + +package v1alpha1 + +import ( + dw "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" + devfileAttr "github.com/devfile/api/v2/pkg/attributes" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// DevWorkspaceRoutingSpec defines the desired state of DevWorkspaceRouting +// +k8s:openapi-gen=true +type DevWorkspaceRoutingSpec struct { + // Id for the DevWorkspace being routed + DevWorkspaceId string `json:"devworkspaceId"` + // Class of the routing: this drives which DevWorkspaceRouting controller will manage this routing + RoutingClass DevWorkspaceRoutingClass `json:"routingClass,omitempty"` + // Machines to endpoints map + Endpoints map[string]EndpointList `json:"endpoints"` + // Selector that should be used by created services to point to the devworkspace Pod + PodSelector map[string]string `json:"podSelector"` +} + +type DevWorkspaceRoutingClass string + +const ( + DevWorkspaceRoutingBasic DevWorkspaceRoutingClass = "basic" + DevWorkspaceRoutingCluster DevWorkspaceRoutingClass = "cluster" + DevWorkspaceRoutingClusterTLS DevWorkspaceRoutingClass = "cluster-tls" + DevWorkspaceRoutingWebTerminal DevWorkspaceRoutingClass = "web-terminal" +) + +// DevWorkspaceRoutingStatus defines the observed state of DevWorkspaceRouting +// +k8s:openapi-gen=true +type DevWorkspaceRoutingStatus struct { + // Additions to main devworkspace deployment + PodAdditions *PodAdditions `json:"podAdditions,omitempty"` + // Machine name to exposed endpoint map + ExposedEndpoints map[string]ExposedEndpointList `json:"exposedEndpoints,omitempty"` + // Routing reconcile phase + Phase DevWorkspaceRoutingPhase `json:"phase,omitempty"` + // Message is a user-readable message explaining the current phase (e.g. reason for failure) + Message string `json:"message,omitempty"` +} + +// Valid phases for devworkspacerouting +type DevWorkspaceRoutingPhase string + +const ( + RoutingReady DevWorkspaceRoutingPhase = "Ready" + RoutingPreparing DevWorkspaceRoutingPhase = "Preparing" + RoutingFailed DevWorkspaceRoutingPhase = "Failed" +) + +type ExposedEndpoint struct { + // Name of the exposed endpoint + Name string `json:"name"` + // Public URL of the exposed endpoint + Url string `json:"url"` + // Attributes of the exposed endpoint + // +optional + Attributes devfileAttr.Attributes `json:"attributes,omitempty"` +} + +type EndpointList []dw.Endpoint + +type ExposedEndpointList []ExposedEndpoint + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DevWorkspaceRouting is the Schema for the devworkspaceroutings API +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:path=devworkspaceroutings,scope=Namespaced,shortName=dwr +// +kubebuilder:printcolumn:name="DevWorkspace ID",type="string",JSONPath=".spec.devworkspaceId",description="The owner DevWorkspace's unique id" +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="The current phase" +// +kubebuilder:printcolumn:name="Info",type="string",JSONPath=".status.message",description="Additional info about DevWorkspaceRouting state" +type DevWorkspaceRouting struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DevWorkspaceRoutingSpec `json:"spec,omitempty"` + Status DevWorkspaceRoutingStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DevWorkspaceRoutingList contains a list of DevWorkspaceRouting +type DevWorkspaceRoutingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DevWorkspaceRouting `json:"items"` +} + +func init() { + SchemeBuilder.Register(&DevWorkspaceRouting{}, &DevWorkspaceRoutingList{}) +} diff --git a/vendor/github.com/devfile/devworkspace-operator/apis/controller/v1alpha1/doc.go b/vendor/github.com/devfile/devworkspace-operator/apis/controller/v1alpha1/doc.go new file mode 100644 index 000000000..933762fb3 --- /dev/null +++ b/vendor/github.com/devfile/devworkspace-operator/apis/controller/v1alpha1/doc.go @@ -0,0 +1,16 @@ +// +// Copyright (c) 2019-2021 Red Hat, Inc. +// This program and the accompanying materials are made +// available under the terms of the Eclipse Public License 2.0 +// which is available at https://www.eclipse.org/legal/epl-2.0/ +// +// SPDX-License-Identifier: EPL-2.0 +// +// Contributors: +// Red Hat, Inc. - initial API and implementation +// + +// Package v1alpha1 contains API Schema definitions for the controller v1alpha1 API group +// +k8s:deepcopy-gen=package,register +// +groupName=controller.devfile.io +package v1alpha1 diff --git a/vendor/github.com/devfile/devworkspace-operator/apis/controller/v1alpha1/groupversion_info.go b/vendor/github.com/devfile/devworkspace-operator/apis/controller/v1alpha1/groupversion_info.go new file mode 100644 index 000000000..59601646b --- /dev/null +++ b/vendor/github.com/devfile/devworkspace-operator/apis/controller/v1alpha1/groupversion_info.go @@ -0,0 +1,32 @@ +// +// Copyright (c) 2019-2021 Red Hat, Inc. +// This program and the accompanying materials are made +// available under the terms of the Eclipse Public License 2.0 +// which is available at https://www.eclipse.org/legal/epl-2.0/ +// +// SPDX-License-Identifier: EPL-2.0 +// +// Contributors: +// Red Hat, Inc. - initial API and implementation +// + +// Package v1alpha1 contains API Schema definitions for the controller v1alpha1 API group +// +kubebuilder:object:generate=true +// +groupName=controller.devfile.io +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "controller.devfile.io", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/vendor/github.com/devfile/devworkspace-operator/apis/controller/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/devfile/devworkspace-operator/apis/controller/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..666ca8660 --- /dev/null +++ b/vendor/github.com/devfile/devworkspace-operator/apis/controller/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,292 @@ +// +build !ignore_autogenerated + +// +// Copyright (c) 2019-2021 Red Hat, Inc. +// This program and the accompanying materials are made +// available under the terms of the Eclipse Public License 2.0 +// which is available at https://www.eclipse.org/legal/epl-2.0/ +// +// SPDX-License-Identifier: EPL-2.0 +// +// Contributors: +// Red Hat, Inc. - initial API and implementation +// + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" + "github.com/devfile/api/v2/pkg/attributes" + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DevWorkspaceRouting) DeepCopyInto(out *DevWorkspaceRouting) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevWorkspaceRouting. +func (in *DevWorkspaceRouting) DeepCopy() *DevWorkspaceRouting { + if in == nil { + return nil + } + out := new(DevWorkspaceRouting) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DevWorkspaceRouting) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DevWorkspaceRoutingList) DeepCopyInto(out *DevWorkspaceRoutingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DevWorkspaceRouting, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevWorkspaceRoutingList. +func (in *DevWorkspaceRoutingList) DeepCopy() *DevWorkspaceRoutingList { + if in == nil { + return nil + } + out := new(DevWorkspaceRoutingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DevWorkspaceRoutingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DevWorkspaceRoutingSpec) DeepCopyInto(out *DevWorkspaceRoutingSpec) { + *out = *in + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make(map[string]EndpointList, len(*in)) + for key, val := range *in { + var outVal []v1alpha2.Endpoint + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(EndpointList, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + (*out)[key] = outVal + } + } + if in.PodSelector != nil { + in, out := &in.PodSelector, &out.PodSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevWorkspaceRoutingSpec. +func (in *DevWorkspaceRoutingSpec) DeepCopy() *DevWorkspaceRoutingSpec { + if in == nil { + return nil + } + out := new(DevWorkspaceRoutingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DevWorkspaceRoutingStatus) DeepCopyInto(out *DevWorkspaceRoutingStatus) { + *out = *in + if in.PodAdditions != nil { + in, out := &in.PodAdditions, &out.PodAdditions + *out = new(PodAdditions) + (*in).DeepCopyInto(*out) + } + if in.ExposedEndpoints != nil { + in, out := &in.ExposedEndpoints, &out.ExposedEndpoints + *out = make(map[string]ExposedEndpointList, len(*in)) + for key, val := range *in { + var outVal []ExposedEndpoint + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(ExposedEndpointList, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevWorkspaceRoutingStatus. +func (in *DevWorkspaceRoutingStatus) DeepCopy() *DevWorkspaceRoutingStatus { + if in == nil { + return nil + } + out := new(DevWorkspaceRoutingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in EndpointList) DeepCopyInto(out *EndpointList) { + { + in := &in + *out = make(EndpointList, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointList. +func (in EndpointList) DeepCopy() EndpointList { + if in == nil { + return nil + } + out := new(EndpointList) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExposedEndpoint) DeepCopyInto(out *ExposedEndpoint) { + *out = *in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(attributes.Attributes, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExposedEndpoint. +func (in *ExposedEndpoint) DeepCopy() *ExposedEndpoint { + if in == nil { + return nil + } + out := new(ExposedEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ExposedEndpointList) DeepCopyInto(out *ExposedEndpointList) { + { + in := &in + *out = make(ExposedEndpointList, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExposedEndpointList. +func (in ExposedEndpointList) DeepCopy() ExposedEndpointList { + if in == nil { + return nil + } + out := new(ExposedEndpointList) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodAdditions) DeepCopyInto(out *PodAdditions) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]v1.Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]v1.Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]v1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]v1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PullSecrets != nil { + in, out := &in.PullSecrets, &out.PullSecrets + *out = make([]v1.LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.ServiceAccountAnnotations != nil { + in, out := &in.ServiceAccountAnnotations, &out.ServiceAccountAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAdditions. +func (in *PodAdditions) DeepCopy() *PodAdditions { + if in == nil { + return nil + } + out := new(PodAdditions) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/devworkspacerouting_controller.go b/vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/devworkspacerouting_controller.go new file mode 100644 index 000000000..a03686723 --- /dev/null +++ b/vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/devworkspacerouting_controller.go @@ -0,0 +1,329 @@ +// +// Copyright (c) 2019-2021 Red Hat, Inc. +// This program and the accompanying materials are made +// available under the terms of the Eclipse Public License 2.0 +// which is available at https://www.eclipse.org/legal/epl-2.0/ +// +// SPDX-License-Identifier: EPL-2.0 +// +// Contributors: +// Red Hat, Inc. - initial API and implementation +// + +package devworkspacerouting + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/solvers" + maputils "github.com/devfile/devworkspace-operator/internal/map" + "github.com/devfile/devworkspace-operator/pkg/config" + "github.com/devfile/devworkspace-operator/pkg/constants" + "github.com/devfile/devworkspace-operator/pkg/infrastructure" + + "github.com/go-logr/logr" + "github.com/google/go-cmp/cmp" + routeV1 "github.com/openshift/api/route/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/api/extensions/v1beta1" + k8sErrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + controllerv1alpha1 "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1" +) + +var ( + NoSolversEnabled = errors.New("reconciler does not define SolverGetter") +) + +const devWorkspaceRoutingFinalizer = "devworkspacerouting.controller.devfile.io" + +// DevWorkspaceRoutingReconciler reconciles a DevWorkspaceRouting object +type DevWorkspaceRoutingReconciler struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + // SolverGetter will be used to get solvers for a particular devWorkspaceRouting + SolverGetter solvers.RoutingSolverGetter +} + +// +kubebuilder:rbac:groups=controller.devfile.io,resources=devworkspaceroutings,verbs=* +// +kubebuilder:rbac:groups=controller.devfile.io,resources=devworkspaceroutings/status,verbs=get;update;patch +// +kubebuilder:rbac:groups="",resources=services,verbs=* +// +kubebuilder:rbac:groups=extensions,resources=ingresses,verbs=* +// +kubebuilder:rbac:groups=route.openshift.io,resources=routes,verbs=* +// +kubebuidler:rbac:groups=route.openshift.io,resources=routes/status,verbs=get,list,watch +// +kubebuilder:rbac:groups=route.openshift.io,resources=routes/custom-host,verbs=create + +func (r *DevWorkspaceRoutingReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { + ctx := context.Background() + + reqLogger := r.Log.WithValues("Request.Namespace", req.Namespace, "Request.Name", req.Name) + + // Fetch the DevWorkspaceRouting instance + instance := &controllerv1alpha1.DevWorkspaceRouting{} + err := r.Get(ctx, req.NamespacedName, instance) + if err != nil { + if k8sErrors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. + // Return and don't requeue + return reconcile.Result{}, nil + } + // Error reading the object - requeue the request. + return reconcile.Result{}, err + } + reqLogger = reqLogger.WithValues(constants.DevWorkspaceIDLoggerKey, instance.Spec.DevWorkspaceId) + reqLogger.Info("Reconciling DevWorkspaceRouting") + + if instance.Spec.RoutingClass == "" { + return reconcile.Result{}, r.markRoutingFailed(instance, "DevWorkspaceRouting requires field routingClass to be set") + } + + solver, err := r.SolverGetter.GetSolver(r.Client, instance.Spec.RoutingClass) + if err != nil { + if errors.Is(err, solvers.RoutingNotSupported) { + return reconcile.Result{}, nil + } + return reconcile.Result{}, r.markRoutingFailed(instance, fmt.Sprintf("Invalid routingClass for DevWorkspace: %s", err)) + } + + // Check if the DevWorkspaceRouting instance is marked to be deleted, which is + // indicated by the deletion timestamp being set. + if instance.GetDeletionTimestamp() != nil { + reqLogger.Info("Finalizing DevWorkspaceRouting") + return reconcile.Result{}, r.finalize(solver, instance) + } + + if instance.Status.Phase == controllerv1alpha1.RoutingFailed { + return reconcile.Result{}, nil + } + + // Add finalizer for this CR if not already present + if err := r.setFinalizer(reqLogger, solver, instance); err != nil { + return reconcile.Result{}, err + } + + workspaceMeta := solvers.DevWorkspaceMetadata{ + DevWorkspaceId: instance.Spec.DevWorkspaceId, + Namespace: instance.Namespace, + PodSelector: instance.Spec.PodSelector, + } + + restrictedAccess, setRestrictedAccess := instance.Annotations[constants.DevWorkspaceRestrictedAccessAnnotation] + routingObjects, err := solver.GetSpecObjects(instance, workspaceMeta) + if err != nil { + var notReady *solvers.RoutingNotReady + if errors.As(err, ¬Ready) { + duration := notReady.Retry + if duration.Milliseconds() == 0 { + duration = 1 * time.Second + } + reqLogger.Info("controller not ready for devworkspace routing. Retrying", "DelayMs", duration.Milliseconds()) + return reconcile.Result{RequeueAfter: duration}, r.reconcileStatus(instance, nil, nil, false, "Waiting for DevWorkspaceRouting controller to be ready") + } + + var invalid *solvers.RoutingInvalid + if errors.As(err, &invalid) { + reqLogger.Error(invalid, "routing controller considers routing invalid") + return reconcile.Result{}, r.markRoutingFailed(instance, fmt.Sprintf("Unable to provision networking for DevWorkspace: %s", invalid)) + } + + // generic error, just fail the reconciliation + return reconcile.Result{}, err + } + + services := routingObjects.Services + for idx := range services { + err := controllerutil.SetControllerReference(instance, &services[idx], r.Scheme) + if err != nil { + return reconcile.Result{}, err + } + if setRestrictedAccess { + services[idx].Annotations = maputils.Append(services[idx].Annotations, constants.DevWorkspaceRestrictedAccessAnnotation, restrictedAccess) + } + } + ingresses := routingObjects.Ingresses + for idx := range ingresses { + err := controllerutil.SetControllerReference(instance, &ingresses[idx], r.Scheme) + if err != nil { + return reconcile.Result{}, err + } + if setRestrictedAccess { + ingresses[idx].Annotations = maputils.Append(ingresses[idx].Annotations, constants.DevWorkspaceRestrictedAccessAnnotation, restrictedAccess) + } + } + routes := routingObjects.Routes + for idx := range routes { + err := controllerutil.SetControllerReference(instance, &routes[idx], r.Scheme) + if err != nil { + return reconcile.Result{}, err + } + if setRestrictedAccess { + routes[idx].Annotations = maputils.Append(routes[idx].Annotations, constants.DevWorkspaceRestrictedAccessAnnotation, restrictedAccess) + } + } + + servicesInSync, clusterServices, err := r.syncServices(instance, services) + if err != nil { + reqLogger.Error(err, "Error syncing services") + return reconcile.Result{Requeue: true}, r.reconcileStatus(instance, nil, nil, false, "Preparing services") + } else if !servicesInSync { + reqLogger.Info("Services not in sync") + return reconcile.Result{Requeue: true}, r.reconcileStatus(instance, nil, nil, false, "Preparing services") + } + + clusterRoutingObj := solvers.RoutingObjects{ + Services: clusterServices, + } + + if infrastructure.IsOpenShift() { + routesInSync, clusterRoutes, err := r.syncRoutes(instance, routes) + if err != nil { + reqLogger.Error(err, "Error syncing routes") + return reconcile.Result{Requeue: true}, r.reconcileStatus(instance, nil, nil, false, "Preparing routes") + } else if !routesInSync { + reqLogger.Info("Routes not in sync") + return reconcile.Result{Requeue: true}, r.reconcileStatus(instance, nil, nil, false, "Preparing routes") + } + clusterRoutingObj.Routes = clusterRoutes + } else { + ingressesInSync, clusterIngresses, err := r.syncIngresses(instance, ingresses) + if err != nil { + reqLogger.Error(err, "Error syncing ingresses") + return reconcile.Result{Requeue: true}, r.reconcileStatus(instance, nil, nil, false, "Preparing ingresses") + } else if !ingressesInSync { + reqLogger.Info("Ingresses not in sync") + return reconcile.Result{Requeue: true}, r.reconcileStatus(instance, nil, nil, false, "Preparing ingresses") + } + clusterRoutingObj.Ingresses = clusterIngresses + } + + exposedEndpoints, endpointsAreReady, err := solver.GetExposedEndpoints(instance.Spec.Endpoints, clusterRoutingObj) + if err != nil { + reqLogger.Error(err, "Could not get exposed endpoints for devworkspace") + return reconcile.Result{}, r.markRoutingFailed(instance, fmt.Sprintf("Could not get exposed endpoints for DevWorkspace: %s", err)) + } + + return reconcile.Result{}, r.reconcileStatus(instance, &routingObjects, exposedEndpoints, endpointsAreReady, "") +} + +// setFinalizer ensures a finalizer is set on a devWorkspaceRouting instance; no-op if finalizer is already present. +func (r *DevWorkspaceRoutingReconciler) setFinalizer(reqLogger logr.Logger, solver solvers.RoutingSolver, m *controllerv1alpha1.DevWorkspaceRouting) error { + if !solver.FinalizerRequired(m) || contains(m.GetFinalizers(), devWorkspaceRoutingFinalizer) { + return nil + } + + reqLogger.Info("Adding Finalizer for the DevWorkspaceRouting") + m.SetFinalizers(append(m.GetFinalizers(), devWorkspaceRoutingFinalizer)) + + // Update CR + err := r.Update(context.TODO(), m) + if err != nil { + reqLogger.Error(err, "Failed to update DevWorkspaceRouting with finalizer") + return err + } + return nil +} + +func (r *DevWorkspaceRoutingReconciler) finalize(solver solvers.RoutingSolver, instance *controllerv1alpha1.DevWorkspaceRouting) error { + if contains(instance.GetFinalizers(), devWorkspaceRoutingFinalizer) { + // let the solver finalize its stuff + err := solver.Finalize(instance) + if err != nil { + return err + } + + // Remove devWorkspaceRoutingFinalizer. Once all finalizers have been + // removed, the object will be deleted. + instance.SetFinalizers(remove(instance.GetFinalizers(), devWorkspaceRoutingFinalizer)) + err = r.Update(context.TODO(), instance) + if err != nil { + return err + } + } + return nil +} + +func (r *DevWorkspaceRoutingReconciler) markRoutingFailed(instance *controllerv1alpha1.DevWorkspaceRouting, message string) error { + instance.Status.Message = message + instance.Status.Phase = controllerv1alpha1.RoutingFailed + return r.Status().Update(context.TODO(), instance) +} + +func (r *DevWorkspaceRoutingReconciler) reconcileStatus( + instance *controllerv1alpha1.DevWorkspaceRouting, + routingObjects *solvers.RoutingObjects, + exposedEndpoints map[string]controllerv1alpha1.ExposedEndpointList, + endpointsReady bool, + message string) error { + + if !endpointsReady { + instance.Status.Phase = controllerv1alpha1.RoutingPreparing + instance.Status.Message = message + return r.Status().Update(context.TODO(), instance) + } + if instance.Status.Phase == controllerv1alpha1.RoutingReady && + cmp.Equal(instance.Status.PodAdditions, routingObjects.PodAdditions) && + cmp.Equal(instance.Status.ExposedEndpoints, exposedEndpoints) { + return nil + } + instance.Status.Phase = controllerv1alpha1.RoutingReady + instance.Status.Message = "DevWorkspaceRouting prepared" + instance.Status.PodAdditions = routingObjects.PodAdditions + instance.Status.ExposedEndpoints = exposedEndpoints + return r.Status().Update(context.TODO(), instance) +} + +func contains(list []string, s string) bool { + for _, v := range list { + if v == s { + return true + } + } + return false +} + +func remove(list []string, s string) []string { + for i, v := range list { + if v == s { + list = append(list[:i], list[i+1:]...) + } + } + return list +} + +func (r *DevWorkspaceRoutingReconciler) SetupWithManager(mgr ctrl.Manager) error { + maxConcurrentReconciles, err := config.GetMaxConcurrentReconciles() + if err != nil { + return err + } + + bld := ctrl.NewControllerManagedBy(mgr). + WithOptions(controller.Options{MaxConcurrentReconciles: maxConcurrentReconciles}). + For(&controllerv1alpha1.DevWorkspaceRouting{}). + Owns(&corev1.Service{}). + Owns(&v1beta1.Ingress{}) + if infrastructure.IsOpenShift() { + bld.Owns(&routeV1.Route{}) + } + if r.SolverGetter == nil { + return NoSolversEnabled + } + + if err := r.SolverGetter.SetupControllerManager(bld); err != nil { + return err + } + + bld.WithEventFilter(getRoutingPredicatesForSolverFunc(r.SolverGetter)) + + return bld.Complete(r) +} diff --git a/vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/predicates.go b/vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/predicates.go new file mode 100644 index 000000000..382bf1e86 --- /dev/null +++ b/vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/predicates.go @@ -0,0 +1,69 @@ +// +// Copyright (c) 2019-2021 Red Hat, Inc. +// This program and the accompanying materials are made +// available under the terms of the Eclipse Public License 2.0 +// which is available at https://www.eclipse.org/legal/epl-2.0/ +// +// SPDX-License-Identifier: EPL-2.0 +// +// Contributors: +// Red Hat, Inc. - initial API and implementation +// + +package devworkspacerouting + +import ( + "github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/solvers" + + controllerv1alpha1 "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +func getRoutingPredicatesForSolverFunc(solverGetter solvers.RoutingSolverGetter) predicate.Funcs { + return predicate.Funcs{ + CreateFunc: func(ev event.CreateEvent) bool { + obj, ok := ev.Object.(*controllerv1alpha1.DevWorkspaceRouting) + if !ok { + // If object is not a DevWorkspaceRouting, it must be a service/ingress/route related to the workspace + // The safe choice here is to trigger a reconcile to ensure that all resources are in sync; it's the job + // of the controller to ignore DevWorkspaceRoutings for other routing classes. + return true + } + if !solverGetter.HasSolver(obj.Spec.RoutingClass) { + return false + } + return true + }, + DeleteFunc: func(_ event.DeleteEvent) bool { + // Return true to ensure objects are recreated if needed, and that finalizers are + // removed on deletion. + return true + }, + UpdateFunc: func(ev event.UpdateEvent) bool { + newObj, ok := ev.ObjectNew.(*controllerv1alpha1.DevWorkspaceRouting) + if !ok { + // If object is not a DevWorkspaceRouting, it must be a service/ingress/route related to the workspace + // The safe choice here is to trigger a reconcile to ensure that all resources are in sync; it's the job + // of the controller to ignore DevWorkspaceRoutings for other routing classes. + return true + } + if !solverGetter.HasSolver(newObj.Spec.RoutingClass) { + // Future improvement: handle case where old object has a supported routingClass and new object does not + // to allow for cleanup when routingClass is switched. + return false + } + return true + }, + GenericFunc: func(ev event.GenericEvent) bool { + obj, ok := ev.Object.(*controllerv1alpha1.DevWorkspaceRouting) + if !ok { + return true + } + if !solverGetter.HasSolver(obj.Spec.RoutingClass) { + return false + } + return true + }, + } +} diff --git a/vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/solvers/basic_solver.go b/vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/solvers/basic_solver.go new file mode 100644 index 000000000..aee9a6140 --- /dev/null +++ b/vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/solvers/basic_solver.go @@ -0,0 +1,81 @@ +// +// Copyright (c) 2019-2021 Red Hat, Inc. +// This program and the accompanying materials are made +// available under the terms of the Eclipse Public License 2.0 +// which is available at https://www.eclipse.org/legal/epl-2.0/ +// +// SPDX-License-Identifier: EPL-2.0 +// +// Contributors: +// Red Hat, Inc. - initial API and implementation +// + +package solvers + +import ( + "errors" + + controllerv1alpha1 "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1" + "github.com/devfile/devworkspace-operator/pkg/config" + "github.com/devfile/devworkspace-operator/pkg/constants" + "github.com/devfile/devworkspace-operator/pkg/infrastructure" +) + +var routeAnnotations = func(endpointName string) map[string]string { + return map[string]string{ + "haproxy.router.openshift.io/rewrite-target": "/", + constants.DevWorkspaceEndpointNameAnnotation: endpointName, + } +} + +var nginxIngressAnnotations = func(endpointName string) map[string]string { + return map[string]string{ + "kubernetes.io/ingress.class": "nginx", + "nginx.ingress.kubernetes.io/rewrite-target": "/", + "nginx.ingress.kubernetes.io/ssl-redirect": "false", + constants.DevWorkspaceEndpointNameAnnotation: endpointName, + } +} + +// Basic solver exposes endpoints without any authentication +// According to the current cluster there is different behavior: +// Kubernetes: use Ingresses without TLS +// OpenShift: use Routes with TLS enabled +type BasicSolver struct{} + +var _ RoutingSolver = (*BasicSolver)(nil) + +func (s *BasicSolver) FinalizerRequired(*controllerv1alpha1.DevWorkspaceRouting) bool { + return false +} + +func (s *BasicSolver) Finalize(*controllerv1alpha1.DevWorkspaceRouting) error { + return nil +} + +func (s *BasicSolver) GetSpecObjects(routing *controllerv1alpha1.DevWorkspaceRouting, workspaceMeta DevWorkspaceMetadata) (RoutingObjects, error) { + routingObjects := RoutingObjects{} + + routingSuffix := config.ControllerCfg.GetProperty(config.RoutingSuffix) + if routingSuffix == nil { + return routingObjects, errors.New(config.RoutingSuffix + " must be set for basic routing") + } + + spec := routing.Spec + services := getServicesForEndpoints(spec.Endpoints, workspaceMeta) + services = append(services, GetDiscoverableServicesForEndpoints(spec.Endpoints, workspaceMeta)...) + routingObjects.Services = services + if infrastructure.IsOpenShift() { + routingObjects.Routes = getRoutesForSpec(*routingSuffix, spec.Endpoints, workspaceMeta) + } else { + routingObjects.Ingresses = getIngressesForSpec(*routingSuffix, spec.Endpoints, workspaceMeta) + } + + return routingObjects, nil +} + +func (s *BasicSolver) GetExposedEndpoints( + endpoints map[string]controllerv1alpha1.EndpointList, + routingObj RoutingObjects) (exposedEndpoints map[string]controllerv1alpha1.ExposedEndpointList, ready bool, err error) { + return getExposedEndpoints(endpoints, routingObj) +} diff --git a/vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/solvers/cluster_solver.go b/vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/solvers/cluster_solver.go new file mode 100644 index 000000000..ac8aeb877 --- /dev/null +++ b/vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/solvers/cluster_solver.go @@ -0,0 +1,126 @@ +// +// Copyright (c) 2019-2021 Red Hat, Inc. +// This program and the accompanying materials are made +// available under the terms of the Eclipse Public License 2.0 +// which is available at https://www.eclipse.org/legal/epl-2.0/ +// +// SPDX-License-Identifier: EPL-2.0 +// +// Contributors: +// Red Hat, Inc. - initial API and implementation +// + +package solvers + +import ( + "fmt" + + "github.com/devfile/devworkspace-operator/pkg/common" + "github.com/devfile/devworkspace-operator/pkg/constants" + + dw "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" + corev1 "k8s.io/api/core/v1" + + controllerv1alpha1 "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1" +) + +const ( + serviceServingCertAnnot = "service.beta.openshift.io/serving-cert-secret-name" +) + +type ClusterSolver struct { + TLS bool +} + +var _ RoutingSolver = (*ClusterSolver)(nil) + +func (s *ClusterSolver) FinalizerRequired(*controllerv1alpha1.DevWorkspaceRouting) bool { + return false +} + +func (s *ClusterSolver) Finalize(*controllerv1alpha1.DevWorkspaceRouting) error { + return nil +} + +func (s *ClusterSolver) GetSpecObjects(routing *controllerv1alpha1.DevWorkspaceRouting, workspaceMeta DevWorkspaceMetadata) (RoutingObjects, error) { + spec := routing.Spec + services := getServicesForEndpoints(spec.Endpoints, workspaceMeta) + podAdditions := &controllerv1alpha1.PodAdditions{} + if s.TLS { + readOnlyMode := int32(420) + for idx, service := range services { + if services[idx].Annotations == nil { + services[idx].Annotations = map[string]string{} + } + services[idx].Annotations[serviceServingCertAnnot] = service.Name + podAdditions.Volumes = append(podAdditions.Volumes, corev1.Volume{ + Name: common.ServingCertVolumeName(service.Name), + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: service.Name, + DefaultMode: &readOnlyMode, + }, + }, + }) + podAdditions.VolumeMounts = append(podAdditions.VolumeMounts, corev1.VolumeMount{ + Name: common.ServingCertVolumeName(service.Name), + ReadOnly: true, + MountPath: "/var/serving-cert/", + }) + } + } + + return RoutingObjects{ + Services: services, + PodAdditions: podAdditions, + }, nil +} + +func (s *ClusterSolver) GetExposedEndpoints( + endpoints map[string]controllerv1alpha1.EndpointList, + routingObj RoutingObjects) (exposedEndpoints map[string]controllerv1alpha1.ExposedEndpointList, ready bool, err error) { + + exposedEndpoints = map[string]controllerv1alpha1.ExposedEndpointList{} + + for machineName, machineEndpoints := range endpoints { + for _, endpoint := range machineEndpoints { + if endpoint.Exposure == dw.NoneEndpointExposure { + continue + } + url, err := resolveServiceHostnameForEndpoint(endpoint, routingObj.Services) + if err != nil { + return nil, false, err + } + + exposedEndpoints[machineName] = append(exposedEndpoints[machineName], controllerv1alpha1.ExposedEndpoint{ + Name: endpoint.Name, + Url: url, + Attributes: endpoint.Attributes, + }) + } + } + + return exposedEndpoints, true, nil +} + +func resolveServiceHostnameForEndpoint(endpoint dw.Endpoint, services []corev1.Service) (string, error) { + for _, service := range services { + if service.Annotations[constants.DevWorkspaceDiscoverableServiceAnnotation] == "true" { + continue + } + for _, servicePort := range service.Spec.Ports { + if servicePort.Port == int32(endpoint.TargetPort) { + return getHostnameFromService(service, servicePort.Port), nil + } + } + } + return "", fmt.Errorf("could not find service for endpoint %s", endpoint.Name) +} + +func getHostnameFromService(service corev1.Service, port int32) string { + scheme := "http" + if _, ok := service.Annotations[serviceServingCertAnnot]; ok { + scheme = "https" + } + return fmt.Sprintf("%s://%s.%s.svc:%d", scheme, service.Name, service.Namespace, port) +} diff --git a/vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/solvers/common.go b/vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/solvers/common.go new file mode 100644 index 000000000..22c8dac77 --- /dev/null +++ b/vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/solvers/common.go @@ -0,0 +1,247 @@ +// +// Copyright (c) 2019-2021 Red Hat, Inc. +// This program and the accompanying materials are made +// available under the terms of the Eclipse Public License 2.0 +// which is available at https://www.eclipse.org/legal/epl-2.0/ +// +// SPDX-License-Identifier: EPL-2.0 +// +// Contributors: +// Red Hat, Inc. - initial API and implementation +// + +package solvers + +import ( + dw "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" + + controllerv1alpha1 "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1" + "github.com/devfile/devworkspace-operator/pkg/common" + "github.com/devfile/devworkspace-operator/pkg/constants" + + routeV1 "github.com/openshift/api/route/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +type DevWorkspaceMetadata struct { + DevWorkspaceId string + Namespace string + PodSelector map[string]string +} + +// GetDiscoverableServicesForEndpoints converts the endpoint list into a set of services, each corresponding to a single discoverable +// endpoint from the list. Endpoints with the NoneEndpointExposure are ignored. +func GetDiscoverableServicesForEndpoints(endpoints map[string]controllerv1alpha1.EndpointList, meta DevWorkspaceMetadata) []corev1.Service { + var services []corev1.Service + for _, machineEndpoints := range endpoints { + for _, endpoint := range machineEndpoints { + if endpoint.Exposure == dw.NoneEndpointExposure { + continue + } + + if endpoint.Attributes.GetBoolean(string(controllerv1alpha1.DiscoverableAttribute), nil) { + // Create service with name matching endpoint + // TODO: This could cause a reconcile conflict if multiple workspaces define the same discoverable endpoint + // Also endpoint names may not be valid as service names + servicePort := corev1.ServicePort{ + Name: common.EndpointName(endpoint.Name), + Protocol: corev1.ProtocolTCP, + Port: int32(endpoint.TargetPort), + TargetPort: intstr.FromInt(endpoint.TargetPort), + } + services = append(services, corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: common.EndpointName(endpoint.Name), + Namespace: meta.Namespace, + Labels: map[string]string{ + constants.DevWorkspaceIDLabel: meta.DevWorkspaceId, + }, + Annotations: map[string]string{ + constants.DevWorkspaceDiscoverableServiceAnnotation: "true", + }, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{servicePort}, + Selector: meta.PodSelector, + Type: corev1.ServiceTypeClusterIP, + }, + }) + } + } + } + return services +} + +// GetServiceForEndpoints returns a single service that exposes all endpoints of given exposure types, possibly also including the discoverable types. +// `nil` is returned if the service would expose no ports satisfying the provided criteria. +func GetServiceForEndpoints(endpoints map[string]controllerv1alpha1.EndpointList, meta DevWorkspaceMetadata, includeDiscoverable bool, exposureType ...dw.EndpointExposure) *corev1.Service { + // "set" of ports that are still left for exposure + ports := map[int]bool{} + for _, es := range endpoints { + for _, endpoint := range es { + ports[endpoint.TargetPort] = true + } + } + + // "set" of exposure types that are allowed + validExposures := map[dw.EndpointExposure]bool{} + for _, exp := range exposureType { + validExposures[exp] = true + } + + var exposedPorts []corev1.ServicePort + + for _, es := range endpoints { + for _, endpoint := range es { + if !validExposures[endpoint.Exposure] { + continue + } + + if !includeDiscoverable && endpoint.Attributes.GetBoolean(string(controllerv1alpha1.DiscoverableAttribute), nil) { + continue + } + + if ports[endpoint.TargetPort] { + // make sure we don't mention the same port twice + ports[endpoint.TargetPort] = false + exposedPorts = append(exposedPorts, corev1.ServicePort{ + Name: common.EndpointName(endpoint.Name), + Protocol: corev1.ProtocolTCP, + Port: int32(endpoint.TargetPort), + TargetPort: intstr.FromInt(endpoint.TargetPort), + }) + } + } + } + + if len(exposedPorts) == 0 { + return nil + } + + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: common.ServiceName(meta.DevWorkspaceId), + Namespace: meta.Namespace, + Labels: map[string]string{ + constants.DevWorkspaceIDLabel: meta.DevWorkspaceId, + }, + }, + Spec: corev1.ServiceSpec{ + Selector: meta.PodSelector, + Type: corev1.ServiceTypeClusterIP, + Ports: exposedPorts, + }, + } +} + +func getServicesForEndpoints(endpoints map[string]controllerv1alpha1.EndpointList, meta DevWorkspaceMetadata) []corev1.Service { + if len(endpoints) == 0 { + return nil + } + + service := GetServiceForEndpoints(endpoints, meta, true, dw.PublicEndpointExposure, dw.InternalEndpointExposure) + if service == nil { + return nil + } + + return []corev1.Service{ + *service, + } +} + +func getRoutesForSpec(routingSuffix string, endpoints map[string]controllerv1alpha1.EndpointList, meta DevWorkspaceMetadata) []routeV1.Route { + var routes []routeV1.Route + for _, machineEndpoints := range endpoints { + for _, endpoint := range machineEndpoints { + if endpoint.Exposure != dw.PublicEndpointExposure { + continue + } + routes = append(routes, getRouteForEndpoint(routingSuffix, endpoint, meta)) + } + } + return routes +} + +func getIngressesForSpec(routingSuffix string, endpoints map[string]controllerv1alpha1.EndpointList, meta DevWorkspaceMetadata) []v1beta1.Ingress { + var ingresses []v1beta1.Ingress + for _, machineEndpoints := range endpoints { + for _, endpoint := range machineEndpoints { + if endpoint.Exposure != dw.PublicEndpointExposure { + continue + } + ingresses = append(ingresses, getIngressForEndpoint(routingSuffix, endpoint, meta)) + } + } + return ingresses +} + +func getRouteForEndpoint(routingSuffix string, endpoint dw.Endpoint, meta DevWorkspaceMetadata) routeV1.Route { + targetEndpoint := intstr.FromInt(endpoint.TargetPort) + endpointName := common.EndpointName(endpoint.Name) + return routeV1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: common.RouteName(meta.DevWorkspaceId, endpointName), + Namespace: meta.Namespace, + Labels: map[string]string{ + constants.DevWorkspaceIDLabel: meta.DevWorkspaceId, + }, + Annotations: routeAnnotations(endpointName), + }, + Spec: routeV1.RouteSpec{ + Host: common.WorkspaceHostname(routingSuffix, meta.DevWorkspaceId), + Path: common.EndpointPath(endpointName), + TLS: &routeV1.TLSConfig{ + InsecureEdgeTerminationPolicy: routeV1.InsecureEdgeTerminationPolicyRedirect, + Termination: routeV1.TLSTerminationEdge, + }, + To: routeV1.RouteTargetReference{ + Kind: "Service", + Name: common.ServiceName(meta.DevWorkspaceId), + }, + Port: &routeV1.RoutePort{ + TargetPort: targetEndpoint, + }, + }, + } +} + +func getIngressForEndpoint(routingSuffix string, endpoint dw.Endpoint, meta DevWorkspaceMetadata) v1beta1.Ingress { + targetEndpoint := intstr.FromInt(endpoint.TargetPort) + endpointName := common.EndpointName(endpoint.Name) + hostname := common.EndpointHostname(routingSuffix, meta.DevWorkspaceId, endpointName, endpoint.TargetPort) + ingressPathType := v1beta1.PathTypeImplementationSpecific + return v1beta1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: common.RouteName(meta.DevWorkspaceId, endpointName), + Namespace: meta.Namespace, + Labels: map[string]string{ + constants.DevWorkspaceIDLabel: meta.DevWorkspaceId, + }, + Annotations: nginxIngressAnnotations(endpoint.Name), + }, + Spec: v1beta1.IngressSpec{ + Rules: []v1beta1.IngressRule{ + { + Host: hostname, + IngressRuleValue: v1beta1.IngressRuleValue{ + HTTP: &v1beta1.HTTPIngressRuleValue{ + Paths: []v1beta1.HTTPIngressPath{ + { + Backend: v1beta1.IngressBackend{ + ServiceName: common.ServiceName(meta.DevWorkspaceId), + ServicePort: targetEndpoint, + }, + PathType: &ingressPathType, + Path: "/", + }, + }, + }, + }, + }, + }, + }, + } +} diff --git a/vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/solvers/errors.go b/vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/solvers/errors.go new file mode 100644 index 000000000..1fbe45d6a --- /dev/null +++ b/vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/solvers/errors.go @@ -0,0 +1,48 @@ +// +// Copyright (c) 2019-2021 Red Hat, Inc. +// This program and the accompanying materials are made +// available under the terms of the Eclipse Public License 2.0 +// which is available at https://www.eclipse.org/legal/epl-2.0/ +// +// SPDX-License-Identifier: EPL-2.0 +// +// Contributors: +// Red Hat, Inc. - initial API and implementation +// + +package solvers + +import ( + "errors" + "time" +) + +var _ error = (*RoutingNotReady)(nil) +var _ error = (*RoutingInvalid)(nil) + +// RoutingNotSupported is used by the solvers when they supported the routingclass of the workspace they've been asked to route +var RoutingNotSupported = errors.New("routingclass not supported by this controller") + +// RoutingNotReady is used by the solvers when they are not ready to route an otherwise OK workspace. They can also suggest the +// duration after which to retry the workspace routing. If not specified, the retry is made after 1 second. +type RoutingNotReady struct { + Retry time.Duration +} + +func (*RoutingNotReady) Error() string { + return "controller not ready to resolve the workspace routing" +} + +// RoutingInvalid is used by the solvers to report that they were asked to route a workspace that has the correct routingclass but +// is invalid in some other sense - missing configuration, etc. +type RoutingInvalid struct { + Reason string +} + +func (e *RoutingInvalid) Error() string { + reason := "" + if len(e.Reason) > 0 { + reason = e.Reason + } + return "workspace routing is invalid: " + reason +} diff --git a/vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/solvers/resolve_endpoints.go b/vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/solvers/resolve_endpoints.go new file mode 100644 index 000000000..3752d7d1e --- /dev/null +++ b/vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/solvers/resolve_endpoints.go @@ -0,0 +1,107 @@ +// +// Copyright (c) 2019-2021 Red Hat, Inc. +// This program and the accompanying materials are made +// available under the terms of the Eclipse Public License 2.0 +// which is available at https://www.eclipse.org/legal/epl-2.0/ +// +// SPDX-License-Identifier: EPL-2.0 +// +// Contributors: +// Red Hat, Inc. - initial API and implementation +// + +package solvers + +import ( + "fmt" + "net/url" + "strings" + + dw "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" + + controllerv1alpha1 "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1" + "github.com/devfile/devworkspace-operator/pkg/constants" +) + +func getExposedEndpoints( + endpoints map[string]controllerv1alpha1.EndpointList, + routingObj RoutingObjects) (exposedEndpoints map[string]controllerv1alpha1.ExposedEndpointList, ready bool, err error) { + + exposedEndpoints = map[string]controllerv1alpha1.ExposedEndpointList{} + ready = true + + for machineName, machineEndpoints := range endpoints { + for _, endpoint := range machineEndpoints { + if endpoint.Exposure != dw.PublicEndpointExposure { + continue + } + endpointUrl, err := resolveURLForEndpoint(endpoint, routingObj) + if err != nil { + return nil, false, err + } + if endpointUrl == "" { + ready = false + } + exposedEndpoints[machineName] = append(exposedEndpoints[machineName], controllerv1alpha1.ExposedEndpoint{ + Name: endpoint.Name, + Url: endpointUrl, + Attributes: endpoint.Attributes, + }) + } + } + return exposedEndpoints, ready, nil +} + +func resolveURLForEndpoint( + endpoint dw.Endpoint, + routingObj RoutingObjects) (string, error) { + for _, route := range routingObj.Routes { + if route.Annotations[constants.DevWorkspaceEndpointNameAnnotation] == endpoint.Name { + return getURLForEndpoint(endpoint, route.Spec.Host, route.Spec.Path, route.Spec.TLS != nil), nil + } + } + for _, ingress := range routingObj.Ingresses { + if ingress.Annotations[constants.DevWorkspaceEndpointNameAnnotation] == endpoint.Name { + if len(ingress.Spec.Rules) == 1 { + return getURLForEndpoint(endpoint, ingress.Spec.Rules[0].Host, "", false), nil // no TLS supported for ingresses yet + } else { + return "", fmt.Errorf("ingress %s contains multiple rules", ingress.Name) + } + } + } + return "", fmt.Errorf("could not find ingress/route for endpoint '%s'", endpoint.Name) +} + +func getURLForEndpoint(endpoint dw.Endpoint, host, basePath string, secure bool) string { + protocol := endpoint.Protocol + if secure && endpoint.Secure { + protocol = dw.EndpointProtocol(getSecureProtocol(string(protocol))) + } + var p string + if endpoint.Path != "" { + // the only one slash should be between these path segments. + // Path.join does not suite here since it eats trailing slash which may be critical for the application + p = fmt.Sprintf("%s/%s", strings.TrimRight(basePath, "/"), strings.TrimLeft(p, endpoint.Path)) + } else { + p = basePath + } + u := url.URL{ + Scheme: string(protocol), + Host: host, + Path: p, + } + return u.String() +} + +// getSecureProtocol takes a (potentially unsecure protocol e.g. http) and returns the secure version (e.g. https). +// If protocol isn't recognized, it is returned unmodified. +func getSecureProtocol(protocol string) string { + switch protocol { + case "ws": + return "wss" + case "http": + return "https" + default: + return protocol + } +} diff --git a/vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/solvers/solver.go b/vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/solvers/solver.go new file mode 100644 index 000000000..9e9de2476 --- /dev/null +++ b/vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/solvers/solver.go @@ -0,0 +1,119 @@ +// +// Copyright (c) 2019-2021 Red Hat, Inc. +// This program and the accompanying materials are made +// available under the terms of the Eclipse Public License 2.0 +// which is available at https://www.eclipse.org/legal/epl-2.0/ +// +// SPDX-License-Identifier: EPL-2.0 +// +// Contributors: +// Red Hat, Inc. - initial API and implementation +// + +package solvers + +import ( + "fmt" + + routeV1 "github.com/openshift/api/route/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/api/extensions/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + + controllerv1alpha1 "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1" + "github.com/devfile/devworkspace-operator/pkg/infrastructure" +) + +type RoutingObjects struct { + Services []v1.Service + Ingresses []v1beta1.Ingress + Routes []routeV1.Route + PodAdditions *controllerv1alpha1.PodAdditions +} + +type RoutingSolver interface { + // FinalizerRequired tells the caller if the solver requires a finalizer on the routing object. + FinalizerRequired(routing *controllerv1alpha1.DevWorkspaceRouting) bool + + // Finalize implements the custom finalization logic required by the solver. The solver doesn't have to + // remove any finalizer from the finalizer list on the routing. Instead just implement the custom + // logic required for the finalization itself. If this method doesn't return any error, the finalizer + // is automatically removed from the routing. + Finalize(routing *controllerv1alpha1.DevWorkspaceRouting) error + + // GetSpecObjects constructs cluster routing objects which should be applied on the cluster + // This method should return RoutingNotReady error if the solver is not ready yet to process + // the workspace routing, RoutingInvalid error if there is a specific reason for the failure or + // any other error. + // The implementors can also create any additional objects not captured by the RoutingObjects struct. If that's + // the case they are required to set the restricted access annotation on any objects created according to the + // restricted access specified by the routing. + GetSpecObjects(routing *controllerv1alpha1.DevWorkspaceRouting, workspaceMeta DevWorkspaceMetadata) (RoutingObjects, error) + + // GetExposedEndpoints retreives the URL for each endpoint in a devfile spec from a set of RoutingObjects. + // Returns is a map from component ids (as defined in the devfile) to the list of endpoints for that component + // Return value "ready" specifies if all endpoints are resolved on the cluster; if false it is necessary to retry, as + // URLs will be undefined. + GetExposedEndpoints(endpoints map[string]controllerv1alpha1.EndpointList, routingObj RoutingObjects) (exposedEndpoints map[string]controllerv1alpha1.ExposedEndpointList, ready bool, err error) +} + +type RoutingSolverGetter interface { + // SetupControllerManager is called during the setup of the controller and can modify the controller manager with additional + // watches, etc., needed for the correct operation of the solver. + SetupControllerManager(mgr *builder.Builder) error + + // HasSolver returns whether the provided routingClass is supported by this RoutingSolverGetter. Returns false if + // calling GetSolver with routingClass will return a RoutingNotSupported error. Can be used to check if a routingClass + // is supported without having to provide a runtime client. Note that GetSolver may still return another error, if e.g. + // an OpenShift-only routingClass is used on a vanilla Kubernetes platform. + HasSolver(routingClass controllerv1alpha1.DevWorkspaceRoutingClass) bool + + // GetSolver that obtains a Solver (see github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/solvers) + // for a particular DevWorkspaceRouting instance. This function should return a RoutingNotSupported error if + // the routingClass is not recognized, and any other error if the routingClass is invalid (e.g. an OpenShift-only + // routingClass on a vanilla Kubernetes platform). Note that an empty routingClass is handled by the DevWorkspace controller itself, + // and should not be handled by external controllers. + GetSolver(client client.Client, routingClass controllerv1alpha1.DevWorkspaceRoutingClass) (solver RoutingSolver, err error) +} + +type SolverGetter struct{} + +var _ RoutingSolverGetter = (*SolverGetter)(nil) + +func (_ *SolverGetter) HasSolver(routingClass controllerv1alpha1.DevWorkspaceRoutingClass) bool { + if routingClass == "" { + // Special case for built-in: empty routing class returns the default solver for the DevWorkspace controller. + return true + } + switch routingClass { + case controllerv1alpha1.DevWorkspaceRoutingBasic, + controllerv1alpha1.DevWorkspaceRoutingCluster, + controllerv1alpha1.DevWorkspaceRoutingClusterTLS, + controllerv1alpha1.DevWorkspaceRoutingWebTerminal: + return true + default: + return false + } +} + +func (_ *SolverGetter) GetSolver(_ client.Client, routingClass controllerv1alpha1.DevWorkspaceRoutingClass) (RoutingSolver, error) { + isOpenShift := infrastructure.IsOpenShift() + switch routingClass { + case controllerv1alpha1.DevWorkspaceRoutingBasic: + return &BasicSolver{}, nil + case controllerv1alpha1.DevWorkspaceRoutingCluster: + return &ClusterSolver{}, nil + case controllerv1alpha1.DevWorkspaceRoutingClusterTLS, controllerv1alpha1.DevWorkspaceRoutingWebTerminal: + if !isOpenShift { + return nil, fmt.Errorf("routing class %s only supported on OpenShift", routingClass) + } + return &ClusterSolver{TLS: true}, nil + default: + return nil, RoutingNotSupported + } +} + +func (*SolverGetter) SetupControllerManager(*builder.Builder) error { + return nil +} diff --git a/vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/sync_ingresses.go b/vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/sync_ingresses.go new file mode 100644 index 000000000..41e4d7337 --- /dev/null +++ b/vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/sync_ingresses.go @@ -0,0 +1,111 @@ +// +// Copyright (c) 2019-2021 Red Hat, Inc. +// This program and the accompanying materials are made +// available under the terms of the Eclipse Public License 2.0 +// which is available at https://www.eclipse.org/legal/epl-2.0/ +// +// SPDX-License-Identifier: EPL-2.0 +// +// Contributors: +// Red Hat, Inc. - initial API and implementation +// + +package devworkspacerouting + +import ( + "context" + "fmt" + + "github.com/devfile/devworkspace-operator/pkg/constants" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/client" + + controllerv1alpha1 "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1" +) + +var ingressDiffOpts = cmp.Options{ + cmpopts.IgnoreFields(v1beta1.Ingress{}, "TypeMeta", "ObjectMeta", "Status"), + cmpopts.IgnoreFields(v1beta1.HTTPIngressPath{}, "PathType"), +} + +func (r *DevWorkspaceRoutingReconciler) syncIngresses(routing *controllerv1alpha1.DevWorkspaceRouting, specIngresses []v1beta1.Ingress) (ok bool, clusterIngresses []v1beta1.Ingress, err error) { + ingressesInSync := true + + clusterIngresses, err = r.getClusterIngresses(routing) + if err != nil { + return false, nil, err + } + + toDelete := getIngressesToDelete(clusterIngresses, specIngresses) + for _, ingress := range toDelete { + err := r.Delete(context.TODO(), &ingress) + if err != nil { + return false, nil, err + } + ingressesInSync = false + } + + for _, specIngress := range specIngresses { + if contains, idx := listContainsIngressByName(specIngress, clusterIngresses); contains { + clusterIngress := clusterIngresses[idx] + if !cmp.Equal(specIngress, clusterIngress, ingressDiffOpts) { + // Update ingress's spec + clusterIngress.Spec = specIngress.Spec + err := r.Update(context.TODO(), &clusterIngress) + if err != nil && !errors.IsConflict(err) { + return false, nil, err + } + ingressesInSync = false + } + } else { + err := r.Create(context.TODO(), &specIngress) + if err != nil { + return false, nil, err + } + ingressesInSync = false + } + } + + return ingressesInSync, clusterIngresses, nil +} + +func (r *DevWorkspaceRoutingReconciler) getClusterIngresses(routing *controllerv1alpha1.DevWorkspaceRouting) ([]v1beta1.Ingress, error) { + found := &v1beta1.IngressList{} + labelSelector, err := labels.Parse(fmt.Sprintf("%s=%s", constants.DevWorkspaceIDLabel, routing.Spec.DevWorkspaceId)) + if err != nil { + return nil, err + } + listOptions := &client.ListOptions{ + Namespace: routing.Namespace, + LabelSelector: labelSelector, + } + err = r.List(context.TODO(), found, listOptions) + if err != nil { + return nil, err + } + return found.Items, nil +} + +func getIngressesToDelete(clusterIngresses, specIngresses []v1beta1.Ingress) []v1beta1.Ingress { + var toDelete []v1beta1.Ingress + for _, clusterIngress := range clusterIngresses { + if contains, _ := listContainsIngressByName(clusterIngress, specIngresses); !contains { + toDelete = append(toDelete, clusterIngress) + } + } + return toDelete +} + +func listContainsIngressByName(query v1beta1.Ingress, list []v1beta1.Ingress) (exists bool, idx int) { + for idx, listIngress := range list { + if query.Name == listIngress.Name { + return true, idx + } + } + return false, -1 +} diff --git a/vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/sync_routes.go b/vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/sync_routes.go new file mode 100644 index 000000000..3f539191f --- /dev/null +++ b/vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/sync_routes.go @@ -0,0 +1,124 @@ +// +// Copyright (c) 2019-2021 Red Hat, Inc. +// This program and the accompanying materials are made +// available under the terms of the Eclipse Public License 2.0 +// which is available at https://www.eclipse.org/legal/epl-2.0/ +// +// SPDX-License-Identifier: EPL-2.0 +// +// Contributors: +// Red Hat, Inc. - initial API and implementation +// + +package devworkspacerouting + +import ( + "context" + "fmt" + + "github.com/devfile/devworkspace-operator/pkg/constants" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + routeV1 "github.com/openshift/api/route/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/client" + + controllerv1alpha1 "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1" +) + +var routeDiffOpts = cmp.Options{ + cmpopts.IgnoreFields(routeV1.Route{}, "TypeMeta", "ObjectMeta", "Status"), + cmpopts.IgnoreFields(routeV1.RouteSpec{}, "WildcardPolicy", "Host"), + cmpopts.IgnoreFields(routeV1.RouteTargetReference{}, "Weight"), +} + +func (r *DevWorkspaceRoutingReconciler) syncRoutes(routing *controllerv1alpha1.DevWorkspaceRouting, specRoutes []routeV1.Route) (ok bool, clusterRoutes []routeV1.Route, err error) { + routesInSync := true + + clusterRoutes, err = r.getClusterRoutes(routing) + if err != nil { + return false, nil, err + } + + toDelete := getRoutesToDelete(clusterRoutes, specRoutes) + for _, route := range toDelete { + err := r.Delete(context.TODO(), &route) + if err != nil { + return false, nil, err + } + routesInSync = false + } + + for _, specRoute := range specRoutes { + if contains, idx := listContainsRouteByName(specRoute, clusterRoutes); contains { + clusterRoute := clusterRoutes[idx] + if !cmp.Equal(specRoute, clusterRoute, routeDiffOpts) { + // Update route's spec + clusterRoute.Spec = specRoute.Spec + err := r.Update(context.TODO(), &clusterRoute) + if err != nil && !errors.IsConflict(err) { + return false, nil, err + } + + routesInSync = false + } + } else { + err := r.Create(context.TODO(), &specRoute) + if err != nil { + return false, nil, err + } + routesInSync = false + } + } + + return routesInSync, clusterRoutes, nil +} + +func (r *DevWorkspaceRoutingReconciler) getClusterRoutes(routing *controllerv1alpha1.DevWorkspaceRouting) ([]routeV1.Route, error) { + found := &routeV1.RouteList{} + labelSelector, err := labels.Parse(fmt.Sprintf("%s=%s", constants.DevWorkspaceIDLabel, routing.Spec.DevWorkspaceId)) + if err != nil { + return nil, err + } + listOptions := &client.ListOptions{ + Namespace: routing.Namespace, + LabelSelector: labelSelector, + } + err = r.List(context.TODO(), found, listOptions) + if err != nil { + return nil, err + } + + var routes []routeV1.Route + for _, route := range found.Items { + for _, ownerref := range route.OwnerReferences { + // We need to filter routes that are created automatically for ingresses on OpenShift + if ownerref.Kind == "Ingress" { + continue + } + routes = append(routes, route) + } + } + return routes, nil +} + +func getRoutesToDelete(clusterRoutes, specRoutes []routeV1.Route) []routeV1.Route { + var toDelete []routeV1.Route + for _, clusterRoute := range clusterRoutes { + if contains, _ := listContainsRouteByName(clusterRoute, specRoutes); !contains { + toDelete = append(toDelete, clusterRoute) + } + } + return toDelete +} + +func listContainsRouteByName(query routeV1.Route, list []routeV1.Route) (exists bool, idx int) { + for idx, listRoute := range list { + if query.Name == listRoute.Name { + return true, idx + } + } + return false, -1 +} diff --git a/vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/sync_services.go b/vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/sync_services.go new file mode 100644 index 000000000..c72b0631c --- /dev/null +++ b/vendor/github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/sync_services.go @@ -0,0 +1,118 @@ +// +// Copyright (c) 2019-2021 Red Hat, Inc. +// This program and the accompanying materials are made +// available under the terms of the Eclipse Public License 2.0 +// which is available at https://www.eclipse.org/legal/epl-2.0/ +// +// SPDX-License-Identifier: EPL-2.0 +// +// Contributors: +// Red Hat, Inc. - initial API and implementation +// + +package devworkspacerouting + +import ( + "context" + "fmt" + "strings" + + "github.com/devfile/devworkspace-operator/pkg/constants" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/client" + + controllerv1alpha1 "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1" +) + +var serviceDiffOpts = cmp.Options{ + cmpopts.IgnoreFields(corev1.Service{}, "TypeMeta", "ObjectMeta", "Status"), + cmpopts.IgnoreFields(corev1.ServiceSpec{}, "ClusterIP", "ClusterIPs", "IPFamilies", "IPFamilyPolicy", "SessionAffinity"), + cmpopts.IgnoreFields(corev1.ServicePort{}, "TargetPort"), + cmpopts.SortSlices(func(a, b corev1.ServicePort) bool { + return strings.Compare(a.Name, b.Name) > 0 + }), +} + +func (r *DevWorkspaceRoutingReconciler) syncServices(routing *controllerv1alpha1.DevWorkspaceRouting, specServices []corev1.Service) (ok bool, clusterServices []corev1.Service, err error) { + servicesInSync := true + + clusterServices, err = r.getClusterServices(routing) + if err != nil { + return false, nil, err + } + + toDelete := getServicesToDelete(clusterServices, specServices) + for _, service := range toDelete { + err := r.Delete(context.TODO(), &service) + if err != nil { + return false, nil, err + } + servicesInSync = false + } + + for _, specService := range specServices { + if contains, idx := listContainsByName(specService, clusterServices); contains { + clusterService := clusterServices[idx] + if !cmp.Equal(specService, clusterService, serviceDiffOpts) { + // Cannot naively copy spec, as clusterIP is unmodifiable + clusterIP := clusterService.Spec.ClusterIP + clusterService.Spec = specService.Spec + clusterService.Spec.ClusterIP = clusterIP + err := r.Update(context.TODO(), &clusterService) + if err != nil && !errors.IsConflict(err) { + return false, nil, err + } + servicesInSync = false + } + } else { + err := r.Create(context.TODO(), &specService) + if err != nil { + return false, nil, err + } + servicesInSync = false + } + } + + return servicesInSync, clusterServices, nil +} + +func (r *DevWorkspaceRoutingReconciler) getClusterServices(routing *controllerv1alpha1.DevWorkspaceRouting) ([]corev1.Service, error) { + found := &corev1.ServiceList{} + labelSelector, err := labels.Parse(fmt.Sprintf("%s=%s", constants.DevWorkspaceIDLabel, routing.Spec.DevWorkspaceId)) + if err != nil { + return nil, err + } + listOptions := &client.ListOptions{ + Namespace: routing.Namespace, + LabelSelector: labelSelector, + } + err = r.List(context.TODO(), found, listOptions) + if err != nil { + return nil, err + } + return found.Items, nil +} + +func getServicesToDelete(clusterServices, specServices []corev1.Service) []corev1.Service { + var toDelete []corev1.Service + for _, clusterService := range clusterServices { + if contains, _ := listContainsByName(clusterService, specServices); !contains { + toDelete = append(toDelete, clusterService) + } + } + return toDelete +} + +func listContainsByName(query corev1.Service, list []corev1.Service) (exists bool, idx int) { + for idx, listService := range list { + if query.Name == listService.Name { + return true, idx + } + } + return false, -1 +} diff --git a/vendor/github.com/devfile/devworkspace-operator/internal/images/image.go b/vendor/github.com/devfile/devworkspace-operator/internal/images/image.go new file mode 100644 index 000000000..26728b76f --- /dev/null +++ b/vendor/github.com/devfile/devworkspace-operator/internal/images/image.go @@ -0,0 +1,152 @@ +// +// Copyright (c) 2019-2021 Red Hat, Inc. +// This program and the accompanying materials are made +// available under the terms of the Eclipse Public License 2.0 +// which is available at https://www.eclipse.org/legal/epl-2.0/ +// +// SPDX-License-Identifier: EPL-2.0 +// +// Contributors: +// Red Hat, Inc. - initial API and implementation +// + +// Package images is intended to support deploying the operator on restricted networks. It contains +// utilities for translating images referenced by environment variables to regular image references, +// allowing images that are defined by a tag to be replaced by digests automatically. This allows all +// images used by the controller to be defined as environment variables on the controller deployment. +// +// All images defined must be referenced by an environment variable of the form RELATED_IMAGE_. +// Functions in this package can be called to replace references to ${RELATED_IMAGE_} with the +// corresponding environment variable. +package images + +import ( + "fmt" + "os" + "regexp" + + dw "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var log = logf.Log.WithName("container-images") + +var envRegexp = regexp.MustCompile(`\${(RELATED_IMAGE_.*)}`) + +const ( + webTerminalToolingImageEnvVar = "RELATED_IMAGE_web_terminal_tooling" + webhookServerImageEnvVar = "RELATED_IMAGE_devworkspace_webhook_server" + kubeRBACProxyImageEnvVar = "RELATED_IMAGE_kube_rbac_proxy" + pvcCleanupJobImageEnvVar = "RELATED_IMAGE_pvc_cleanup_job" + asyncStorageServerImageEnvVar = "RELATED_IMAGE_async_storage_server" + asyncStorageSidecarImageEnvVar = "RELATED_IMAGE_async_storage_sidecar" + projectCloneImageEnvVar = "RELATED_IMAGE_project_clone" +) + +// GetWebhookServerImage returns the image reference for the webhook server image. Returns +// the empty string if environment variable RELATED_IMAGE_devworkspace_webhook_server is not defined +func GetWebhookServerImage() string { + val, ok := os.LookupEnv(webhookServerImageEnvVar) + if !ok { + log.Error(fmt.Errorf("environment variable %s is not set", webhookServerImageEnvVar), "Could not get webhook server image") + return "" + } + return val +} + +// GetKubeRBACProxyImage returns the image reference for the kube RBAC proxy. Returns +// the empty string if environment variable RELATED_IMAGE_kube_rbac_proxy is not defined +func GetKubeRBACProxyImage() string { + val, ok := os.LookupEnv(kubeRBACProxyImageEnvVar) + if !ok { + log.Error(fmt.Errorf("environment variable %s is not set", kubeRBACProxyImageEnvVar), "Could not get webhook server image") + return "" + } + return val +} + +// GetWebTerminalToolingImage returns the image reference for the default web tooling image. Returns +// the empty string if environment variable RELATED_IMAGE_web_terminal_tooling is not defined +func GetWebTerminalToolingImage() string { + val, ok := os.LookupEnv(webTerminalToolingImageEnvVar) + if !ok { + log.Error(fmt.Errorf("environment variable %s is not set", webTerminalToolingImageEnvVar), "Could not get web terminal tooling image") + return "" + } + return val +} + +// GetPVCCleanupJobImage returns the image reference for the PVC cleanup job used to clean workspace +// files from the common PVC in a namespace. +func GetPVCCleanupJobImage() string { + val, ok := os.LookupEnv(pvcCleanupJobImageEnvVar) + if !ok { + log.Error(fmt.Errorf("environment variable %s is not set", pvcCleanupJobImageEnvVar), "Could not get PVC cleanup job image") + return "" + } + return val +} + +func GetAsyncStorageServerImage() string { + val, ok := os.LookupEnv(asyncStorageServerImageEnvVar) + if !ok { + log.Error(fmt.Errorf("environment variable %s is not set", asyncStorageServerImageEnvVar), "Could not get async storage server image") + return "" + } + return val +} + +func GetAsyncStorageSidecarImage() string { + val, ok := os.LookupEnv(asyncStorageSidecarImageEnvVar) + if !ok { + log.Error(fmt.Errorf("environment variable %s is not set", asyncStorageSidecarImageEnvVar), "Could not get async storage sidecar image") + return "" + } + return val +} + +func GetProjectClonerImage() string { + val, ok := os.LookupEnv(projectCloneImageEnvVar) + if !ok { + log.Info(fmt.Sprintf("Could not get initial project clone image: environment variable %s is not set", projectCloneImageEnvVar)) + return "" + } + return val +} + +// FillPluginEnvVars replaces plugin devworkspaceTemplate .spec.components[].container.image environment +// variables of the form ${RELATED_IMAGE_*} with values from environment variables with the same name. +// +// Returns error if any referenced environment variable is undefined. +func FillPluginEnvVars(pluginDWT *dw.DevWorkspaceTemplate) (*dw.DevWorkspaceTemplate, error) { + for idx, component := range pluginDWT.Spec.Components { + if component.Container == nil { + continue + } + img, err := getImageForEnvVar(component.Container.Image) + if err != nil { + return nil, err + } + pluginDWT.Spec.Components[idx].Container.Image = img + } + return pluginDWT, nil +} + +func isImageEnvVar(query string) bool { + return envRegexp.MatchString(query) +} + +func getImageForEnvVar(envStr string) (string, error) { + if !isImageEnvVar(envStr) { + // Value passed in is not env var, return unmodified + return envStr, nil + } + matches := envRegexp.FindStringSubmatch(envStr) + env := matches[1] + val, ok := os.LookupEnv(env) + if !ok { + log.Info(fmt.Sprintf("Environment variable '%s' is unset. Cannot determine image to use", env)) + return "", fmt.Errorf("environment variable %s is unset", env) + } + return val, nil +} diff --git a/vendor/github.com/devfile/devworkspace-operator/internal/map/map.go b/vendor/github.com/devfile/devworkspace-operator/internal/map/map.go new file mode 100644 index 000000000..83512507a --- /dev/null +++ b/vendor/github.com/devfile/devworkspace-operator/internal/map/map.go @@ -0,0 +1,35 @@ +// +// Copyright (c) 2019-2021 Red Hat, Inc. +// This program and the accompanying materials are made +// available under the terms of the Eclipse Public License 2.0 +// which is available at https://www.eclipse.org/legal/epl-2.0/ +// +// SPDX-License-Identifier: EPL-2.0 +// +// Contributors: +// Red Hat, Inc. - initial API and implementation +// + +package maputils + +func Append(target map[string]string, key, value string) map[string]string { + if target == nil { + target = map[string]string{} + } + target[key] = value + return target +} + +// Equal compares string maps for equality, regardless of order. Note that it treats +// a nil map as equal to an empty (but not nil) map. +func Equal(a, b map[string]string) bool { + if len(a) != len(b) { + return false + } + for k, v := range a { + if bval, ok := b[k]; !ok || bval != v { + return false + } + } + return true +} diff --git a/vendor/github.com/devfile/devworkspace-operator/pkg/common/naming.go b/vendor/github.com/devfile/devworkspace-operator/pkg/common/naming.go new file mode 100644 index 000000000..31580d7c0 --- /dev/null +++ b/vendor/github.com/devfile/devworkspace-operator/pkg/common/naming.go @@ -0,0 +1,86 @@ +// +// Copyright (c) 2019-2021 Red Hat, Inc. +// This program and the accompanying materials are made +// available under the terms of the Eclipse Public License 2.0 +// which is available at https://www.eclipse.org/legal/epl-2.0/ +// +// SPDX-License-Identifier: EPL-2.0 +// +// Contributors: +// Red Hat, Inc. - initial API and implementation +// + +package common + +import ( + "fmt" + "regexp" + "strings" +) + +var NonAlphaNumRegexp = regexp.MustCompile(`[^a-z0-9]+`) + +func EndpointName(endpointName string) string { + name := strings.ToLower(endpointName) + name = NonAlphaNumRegexp.ReplaceAllString(name, "-") + name = strings.Trim(name, "-") + return name +} + +func ServiceName(workspaceId string) string { + return fmt.Sprintf("%s-%s", workspaceId, "service") +} + +func ServiceAccountName(workspaceId string) string { + return fmt.Sprintf("%s-%s", workspaceId, "sa") +} + +func EndpointHostname(routingSuffix, workspaceId, endpointName string, endpointPort int) string { + hostname := fmt.Sprintf("%s-%s-%d", workspaceId, endpointName, endpointPort) + if len(hostname) > 63 { + hostname = strings.TrimSuffix(hostname[:63], "-") + } + return fmt.Sprintf("%s.%s", hostname, routingSuffix) +} + +// WorkspaceHostname evaluates a single hostname for a workspace, and should be used for routing +// when endpoints are distinguished by path rules +func WorkspaceHostname(routingSuffix, workspaceId string) string { + hostname := workspaceId + if len(hostname) > 63 { + hostname = strings.TrimSuffix(hostname[:63], "-") + } + return fmt.Sprintf("%s.%s", hostname, routingSuffix) +} + +func EndpointPath(endpointName string) string { + return "/" + endpointName + "/" +} + +func RouteName(workspaceId, endpointName string) string { + return fmt.Sprintf("%s-%s", workspaceId, endpointName) +} + +func DeploymentName(workspaceId string) string { + return workspaceId +} + +func ServingCertVolumeName(serviceName string) string { + return fmt.Sprintf("devworkspace-serving-cert-%s", serviceName) +} + +func PVCCleanupJobName(workspaceId string) string { + return fmt.Sprintf("cleanup-%s", workspaceId) +} + +func MetadataConfigMapName(workspaceId string) string { + return fmt.Sprintf("%s-metadata", workspaceId) +} + +func AutoMountConfigMapVolumeName(volumeName string) string { + return fmt.Sprintf("automount-configmap-%s", volumeName) +} + +func AutoMountSecretVolumeName(volumeName string) string { + return fmt.Sprintf("automount-secret-%s", volumeName) +} diff --git a/vendor/github.com/devfile/devworkspace-operator/pkg/config/cmd_terminal.go b/vendor/github.com/devfile/devworkspace-operator/pkg/config/cmd_terminal.go new file mode 100644 index 000000000..01cbb36c5 --- /dev/null +++ b/vendor/github.com/devfile/devworkspace-operator/pkg/config/cmd_terminal.go @@ -0,0 +1,69 @@ +// +// Copyright (c) 2019-2021 Red Hat, Inc. +// This program and the accompanying materials are made +// available under the terms of the Eclipse Public License 2.0 +// which is available at https://www.eclipse.org/legal/epl-2.0/ +// +// SPDX-License-Identifier: EPL-2.0 +// +// Contributors: +// Red Hat, Inc. - initial API and implementation +// + +package config + +import ( + "fmt" + + "github.com/devfile/devworkspace-operator/internal/images" + + dw "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" + + "sigs.k8s.io/yaml" +) + +const ( + // property name for value with yaml for default dockerimage component + // that should be provisioned if devfile DOES have redhat-developer/web-terminal plugin + // and DOES NOT have any dockerimage component + defaultTerminalDockerimageProperty = "devworkspace.default_dockerimage.redhat-developer.web-terminal" +) + +func (wc *ControllerConfig) GetDefaultTerminalDockerimage() (*dw.Component, error) { + mountSources := false + defaultContainerYaml := wc.GetProperty(defaultTerminalDockerimageProperty) + if defaultContainerYaml == nil { + webTerminalImage := images.GetWebTerminalToolingImage() + if webTerminalImage == "" { + return nil, fmt.Errorf("cannot determine default image for web terminal: environment variable is unset") + } + defaultTerminalDockerimage := &dw.Component{} + defaultTerminalDockerimage.Name = "dev" + defaultTerminalDockerimage.Container = &dw.ContainerComponent{ + Container: dw.Container{ + Image: webTerminalImage, + Args: []string{"tail", "-f", "/dev/null"}, + MemoryLimit: "256Mi", + MountSources: &mountSources, + Env: []dw.EnvVar{ + { + Name: "PS1", + Value: `\[\e[34m\]>\[\e[m\]\[\e[33m\]>\[\e[m\]`, + }, + }, + // Must be set as it is defaulted in ContainerComponent. Otherwise + // spec and cluster objects will be different. + SourceMapping: "/projects", + }, + } + return defaultTerminalDockerimage, nil + } + + var defaultContainer dw.Component + if err := yaml.Unmarshal([]byte(*defaultContainerYaml), &defaultContainer); err != nil { + return nil, fmt.Errorf( + "%s is configured with invalid container component. Error: %s", defaultTerminalDockerimageProperty, err) + } + + return &defaultContainer, nil +} diff --git a/vendor/github.com/devfile/devworkspace-operator/pkg/config/config.go b/vendor/github.com/devfile/devworkspace-operator/pkg/config/config.go new file mode 100644 index 000000000..3bf08428b --- /dev/null +++ b/vendor/github.com/devfile/devworkspace-operator/pkg/config/config.go @@ -0,0 +1,263 @@ +// +// Copyright (c) 2019-2021 Red Hat, Inc. +// This program and the accompanying materials are made +// available under the terms of the Eclipse Public License 2.0 +// which is available at https://www.eclipse.org/legal/epl-2.0/ +// +// SPDX-License-Identifier: EPL-2.0 +// +// Contributors: +// Red Hat, Inc. - initial API and implementation +// + +package config + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/devfile/devworkspace-operator/pkg/constants" + "github.com/devfile/devworkspace-operator/pkg/infrastructure" + + logf "sigs.k8s.io/controller-runtime/pkg/log" + + routeV1 "github.com/openshift/api/route/v1" + corev1 "k8s.io/api/core/v1" + k8sErrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +var ControllerCfg ControllerConfig +var log = logf.Log.WithName("controller_devworkspace_config") + +const ( + ConfigMapNameEnvVar = "CONTROLLER_CONFIG_MAP_NAME" + ConfigMapNamespaceEnvVar = "CONTROLLER_CONFIG_MAP_NAMESPACE" +) + +var ConfigMapReference = client.ObjectKey{ + Namespace: "", + Name: "devworkspace-controller-configmap", +} + +type ControllerConfig struct { + configMap *corev1.ConfigMap +} + +func (wc *ControllerConfig) update(configMap *corev1.ConfigMap) { + log.Info("Updating the configuration from config map '%s' in namespace '%s'", configMap.Name, configMap.Namespace) + wc.configMap = configMap +} + +func (wc *ControllerConfig) GetWorkspacePVCName() string { + return wc.GetPropertyOrDefault(workspacePVCName, defaultWorkspacePVCName) +} + +func (wc *ControllerConfig) GetDefaultRoutingClass() string { + return wc.GetPropertyOrDefault(routingClass, defaultRoutingClass) +} + +//GetExperimentalFeaturesEnabled returns true if experimental features should be enabled. +//DO NOT TURN ON IT IN THE PRODUCTION. +//Experimental features are not well tested and may be totally removed without announcement. +func (wc *ControllerConfig) GetExperimentalFeaturesEnabled() bool { + return wc.GetPropertyOrDefault(experimentalFeaturesEnabled, defaultExperimentalFeaturesEnabled) == "true" +} + +func (wc *ControllerConfig) GetPVCStorageClassName() *string { + return wc.GetProperty(workspacePVCStorageClassName) +} + +func (wc *ControllerConfig) GetSidecarPullPolicy() string { + return wc.GetPropertyOrDefault(sidecarPullPolicy, defaultSidecarPullPolicy) +} + +func (wc *ControllerConfig) GetTlsInsecureSkipVerify() string { + return wc.GetPropertyOrDefault(tlsInsecureSkipVerify, defaultTlsInsecureSkipVerify) +} + +func (wc *ControllerConfig) GetProperty(name string) *string { + val, exists := wc.configMap.Data[name] + if exists { + return &val + } + return nil +} + +func (wc *ControllerConfig) GetPropertyOrDefault(name string, defaultValue string) string { + val, exists := wc.configMap.Data[name] + if exists { + return val + } + return defaultValue +} + +func (wc *ControllerConfig) Validate() error { + return nil +} + +func (wc *ControllerConfig) GetWorkspaceIdleTimeout() string { + return wc.GetPropertyOrDefault(devworkspaceIdleTimeout, defaultDevWorkspaceIdleTimeout) +} + +func (wc *ControllerConfig) GetWorkspaceControllerSA() (string, error) { + saName := os.Getenv(constants.ControllerServiceAccountNameEnvVar) + if saName == "" { + return "", fmt.Errorf("could not get service account name") + } + return saName, nil +} + +func updateConfigMap(client client.Client, meta metav1.Object, obj runtime.Object) { + if meta.GetNamespace() != ConfigMapReference.Namespace || + meta.GetName() != ConfigMapReference.Name { + return + } + if cm, isConfigMap := obj.(*corev1.ConfigMap); isConfigMap { + ControllerCfg.update(cm) + return + } + + configMap := &corev1.ConfigMap{} + err := client.Get(context.TODO(), ConfigMapReference, configMap) + if err != nil { + log.Error(err, fmt.Sprintf("Cannot find the '%s' ConfigMap in namespace '%s'", ConfigMapReference.Name, ConfigMapReference.Namespace)) + } + ControllerCfg.update(configMap) +} + +func WatchControllerConfig(mgr manager.Manager) error { + customConfig := false + configMapName, found := os.LookupEnv(ConfigMapNameEnvVar) + if found && len(configMapName) > 0 { + ConfigMapReference.Name = configMapName + customConfig = true + } + configMapNamespace, found := os.LookupEnv(ConfigMapNamespaceEnvVar) + if found && len(configMapNamespace) > 0 { + ConfigMapReference.Namespace = configMapNamespace + customConfig = true + } + + if ConfigMapReference.Namespace == "" { + return fmt.Errorf("you should set the namespace of the controller config map through the '%s' environment variable", ConfigMapNamespaceEnvVar) + } + + configMap := &corev1.ConfigMap{} + nonCachedClient, err := client.New(mgr.GetConfig(), client.Options{ + Scheme: mgr.GetScheme(), + }) + if err != nil { + return err + } + log.Info(fmt.Sprintf("Searching for config map '%s' in namespace '%s'", ConfigMapReference.Name, ConfigMapReference.Namespace)) + err = nonCachedClient.Get(context.TODO(), ConfigMapReference, configMap) + if err != nil { + if !k8sErrors.IsNotFound(err) { + return err + } + if customConfig { + return fmt.Errorf("cannot find the '%s' ConfigMap in namespace '%s'", ConfigMapReference.Name, ConfigMapReference.Namespace) + } + + buildDefaultConfigMap(configMap) + + err = nonCachedClient.Create(context.TODO(), configMap) + if err != nil { + return err + } + log.Info(fmt.Sprintf(" => created config map '%s' in namespace '%s'", configMap.GetObjectMeta().GetName(), configMap.GetObjectMeta().GetNamespace())) + } else { + log.Info(fmt.Sprintf(" => found config map '%s' in namespace '%s'", configMap.GetObjectMeta().GetName(), configMap.GetObjectMeta().GetNamespace())) + } + + if configMap.Data == nil { + configMap.Data = map[string]string{} + } + err = fillOpenShiftRouteSuffixIfNecessary(nonCachedClient, configMap) + if err != nil { + return err + } + + updateConfigMap(nonCachedClient, configMap.GetObjectMeta(), configMap) + + // TODO: Workaround since we don't have a controller here; we should remove configmap and use + // env vars instead. + //var emptyMapper handler.ToRequestsFunc = func(obj handler.MapObject) []reconcile.Request { + // return []reconcile.Request{} + //} + //err = ctr.Watch(&source.Kind{Type: &corev1.ConfigMap{}}, &handler.EnqueueRequestsFromMapFunc{ + // ToRequests: emptyMapper, + //}, predicate.Funcs{ + // UpdateFunc: func(evt event.UpdateEvent) bool { + // updateConfigMap(mgr.GetClient(), evt.MetaNew, evt.ObjectNew) + // return false + // }, + // CreateFunc: func(evt event.CreateEvent) bool { + // updateConfigMap(mgr.GetClient(), evt.Meta, evt.Object) + // return false + // }, + // DeleteFunc: func(evt event.DeleteEvent) bool { + // return false + // }, + // GenericFunc: func(evt event.GenericEvent) bool { + // return false + // }, + //}) + + return err +} + +func SetupConfigForTesting(cm *corev1.ConfigMap) { + ControllerCfg.update(cm) +} + +func buildDefaultConfigMap(cm *corev1.ConfigMap) { + cm.Name = ConfigMapReference.Name + cm.Namespace = ConfigMapReference.Namespace + cm.Labels = constants.ControllerAppLabels() + + cm.Data = map[string]string{} +} + +func fillOpenShiftRouteSuffixIfNecessary(nonCachedClient client.Client, configMap *corev1.ConfigMap) error { + if !infrastructure.IsOpenShift() { + return nil + } + + testRoute := &routeV1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: configMap.Namespace, + Name: "devworkspace-controller-test-route", + }, + Spec: routeV1.RouteSpec{ + To: routeV1.RouteTargetReference{ + Kind: "Service", + Name: "devworkspace-controller-test-route", + }, + }, + } + + err := nonCachedClient.Create(context.TODO(), testRoute) + if err != nil { + return err + } + defer nonCachedClient.Delete(context.TODO(), testRoute) + host := testRoute.Spec.Host + if host != "" { + prefixToRemove := "devworkspace-controller-test-route-" + configMap.Namespace + "." + configMap.Data[RoutingSuffix] = strings.TrimPrefix(host, prefixToRemove) + } + + err = nonCachedClient.Update(context.TODO(), configMap) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/devfile/devworkspace-operator/pkg/config/doc.go b/vendor/github.com/devfile/devworkspace-operator/pkg/config/doc.go new file mode 100644 index 000000000..4e8764998 --- /dev/null +++ b/vendor/github.com/devfile/devworkspace-operator/pkg/config/doc.go @@ -0,0 +1,23 @@ +// +// Copyright (c) 2019-2021 Red Hat, Inc. +// This program and the accompanying materials are made +// available under the terms of the Eclipse Public License 2.0 +// which is available at https://www.eclipse.org/legal/epl-2.0/ +// +// SPDX-License-Identifier: EPL-2.0 +// +// Contributors: +// Red Hat, Inc. - initial API and implementation +// + +// Package config is used by components to get configuration. +// +// Typically each configuration property has the default value. +// Default value is supposed to be overridden via config map. +// +// There is the following configuration names convention: +// - words are lower-cased +// - . is used to separate subcomponents +// - _ is used to separate words in the component name +// +package config diff --git a/vendor/github.com/devfile/devworkspace-operator/pkg/config/env.go b/vendor/github.com/devfile/devworkspace-operator/pkg/config/env.go new file mode 100644 index 000000000..9f104f447 --- /dev/null +++ b/vendor/github.com/devfile/devworkspace-operator/pkg/config/env.go @@ -0,0 +1,70 @@ +// +// Copyright (c) 2019-2021 Red Hat, Inc. +// This program and the accompanying materials are made +// available under the terms of the Eclipse Public License 2.0 +// which is available at https://www.eclipse.org/legal/epl-2.0/ +// +// SPDX-License-Identifier: EPL-2.0 +// +// Contributors: +// Red Hat, Inc. - initial API and implementation +// + +package config + +import ( + "fmt" + "os" + "strconv" + + "k8s.io/apimachinery/pkg/api/resource" +) + +type ControllerEnv struct{} + +const ( + webhooksSecretNameEnvVar = "WEBHOOK_SECRET_NAME" + developmentModeEnvVar = "DEVELOPMENT_MODE" + maxConcurrentReconciles = "MAX_CONCURRENT_RECONCILES" + + WebhooksMemLimitEnvVar = "WEBHOOKS_SERVER_MEMORY_LIMIT" + WebhooksMemRequestEnvVar = "WEBHOOKS_SERVER_MEMORY_REQUEST" + WebhooksCPULimitEnvVar = "WEBHOOKS_SERVER_CPU_LIMIT" + WebhooksCPURequestEnvVar = "WEBHOOKS_SERVER_CPU_REQUEST" +) + +func GetWebhooksSecretName() (string, error) { + env := os.Getenv(webhooksSecretNameEnvVar) + if env == "" { + return "", fmt.Errorf("environment variable %s is unset", webhooksSecretNameEnvVar) + } + return env, nil +} + +func GetDevModeEnabled() bool { + return os.Getenv(developmentModeEnvVar) == "true" +} + +func GetMaxConcurrentReconciles() (int, error) { + env := os.Getenv(maxConcurrentReconciles) + if env == "" { + return 0, fmt.Errorf("environment variable %s is unset", maxConcurrentReconciles) + } + val, err := strconv.Atoi(env) + if err != nil { + return 0, fmt.Errorf("could not parse environment variable %s: %s", maxConcurrentReconciles, err) + } + return val, nil +} + +func GetResourceQuantityFromEnvVar(env string) (*resource.Quantity, error) { + val := os.Getenv(env) + if val == "" { + return nil, fmt.Errorf("environment variable %s is unset", env) + } + quantity, err := resource.ParseQuantity(val) + if err != nil { + return nil, fmt.Errorf("failed to parse environment variable %s: %s", env, err) + } + return &quantity, nil +} diff --git a/vendor/github.com/devfile/devworkspace-operator/pkg/config/property.go b/vendor/github.com/devfile/devworkspace-operator/pkg/config/property.go new file mode 100644 index 000000000..66dd85c15 --- /dev/null +++ b/vendor/github.com/devfile/devworkspace-operator/pkg/config/property.go @@ -0,0 +1,45 @@ +// +// Copyright (c) 2019-2021 Red Hat, Inc. +// This program and the accompanying materials are made +// available under the terms of the Eclipse Public License 2.0 +// which is available at https://www.eclipse.org/legal/epl-2.0/ +// +// SPDX-License-Identifier: EPL-2.0 +// +// Contributors: +// Red Hat, Inc. - initial API and implementation +// + +package config + +const ( + // image pull policy that is applied to every container within workspace + sidecarPullPolicy = "devworkspace.sidecar.image_pull_policy" + defaultSidecarPullPolicy = "Always" + + // workspacePVCName config property handles the PVC name that should be created and used for all workspaces within one kubernetes namespace + workspacePVCName = "devworkspace.pvc.name" + defaultWorkspacePVCName = "claim-devworkspace" + + workspacePVCStorageClassName = "devworkspace.pvc.storage_class.name" + + // routingClass defines the default routing class that should be used if user does not specify it explicitly + routingClass = "devworkspace.default_routing_class" + defaultRoutingClass = "basic" + + // RoutingSuffix is the base domain for routes/ingresses created on the cluster. All + // routes/ingresses will be created with URL http(s)://. + // is supposed to be used by embedded routing solvers only + RoutingSuffix = "devworkspace.routing.cluster_host_suffix" + + experimentalFeaturesEnabled = "devworkspace.experimental_features_enabled" + defaultExperimentalFeaturesEnabled = "false" + + devworkspaceIdleTimeout = "devworkspace.idle_timeout" + defaultDevWorkspaceIdleTimeout = "15m" + + // Skip Verify for TLS connections + // It's insecure and should be used only for testing + tlsInsecureSkipVerify = "tls.insecure_skip_verify" + defaultTlsInsecureSkipVerify = "false" +) diff --git a/vendor/github.com/devfile/devworkspace-operator/pkg/constants/attributes.go b/vendor/github.com/devfile/devworkspace-operator/pkg/constants/attributes.go new file mode 100644 index 000000000..6238a7909 --- /dev/null +++ b/vendor/github.com/devfile/devworkspace-operator/pkg/constants/attributes.go @@ -0,0 +1,24 @@ +// +// Copyright (c) 2019-2021 Red Hat, Inc. +// This program and the accompanying materials are made +// available under the terms of the Eclipse Public License 2.0 +// which is available at https://www.eclipse.org/legal/epl-2.0/ +// +// SPDX-License-Identifier: EPL-2.0 +// +// Contributors: +// Red Hat, Inc. - initial API and implementation +// + +package constants + +// Constants that are used in attributes on DevWorkspace elements (components, endpoints, etc.) +const ( + // PluginSourceAttribute is an attribute added to components, commands, and projects in a flattened + // DevWorkspace representation to signify where the respective component came from (i.e. which plugin + // or parent imported it) + PluginSourceAttribute = "controller.devfile.io/imported-by" + // EndpointURLAttribute is an attribute added to endpoints to denote the endpoint on the cluster that + // was created to route to this endpoint + EndpointURLAttribute = "controller.devfile.io/endpoint-url" +) diff --git a/vendor/github.com/devfile/devworkspace-operator/pkg/constants/constants.go b/vendor/github.com/devfile/devworkspace-operator/pkg/constants/constants.go new file mode 100644 index 000000000..b225cf286 --- /dev/null +++ b/vendor/github.com/devfile/devworkspace-operator/pkg/constants/constants.go @@ -0,0 +1,72 @@ +// +// Copyright (c) 2019-2021 Red Hat, Inc. +// This program and the accompanying materials are made +// available under the terms of the Eclipse Public License 2.0 +// which is available at https://www.eclipse.org/legal/epl-2.0/ +// +// SPDX-License-Identifier: EPL-2.0 +// +// Contributors: +// Red Hat, Inc. - initial API and implementation +// + +// package constants defines constant values used throughout the DevWorkspace Operator +package constants + +// Labels which should be used for controller related objects +var ControllerAppLabels = func() map[string]string { + return map[string]string{ + "app.kubernetes.io/name": "devworkspace-controller", + "app.kubernetes.io/part-of": "devworkspace-operator", + } +} + +// Internal constants +const ( + DefaultProjectsSourcesRoot = "/projects" + + ServiceAccount = "devworkspace" + + SidecarDefaultMemoryLimit = "128M" + SidecarDefaultMemoryRequest = "64M" + + SidecarDefaultCpuLimit = "" // do not provide any value + SidecarDefaultCpuRequest = "" // do not provide any value + + PVCStorageSize = "1Gi" + + // DevWorkspaceIDLoggerKey is the key used to log workspace ID in the reconcile + DevWorkspaceIDLoggerKey = "devworkspace_id" + + // ControllerServiceAccountNameEnvVar stores the name of the serviceaccount used in the controller. + ControllerServiceAccountNameEnvVar = "CONTROLLER_SERVICE_ACCOUNT_NAME" + + // PVCCleanupPodMemoryLimit is the memory limit used for PVC clean up pods + PVCCleanupPodMemoryLimit = "100Mi" + + // PVCCleanupPodMemoryRequest is the memory request used for PVC clean up pods + PVCCleanupPodMemoryRequest = "32Mi" + + // PVCCleanupPodCPULimit is the cpu limit used for PVC clean up pods + PVCCleanupPodCPULimit = "50m" + + // PVCCleanupPodCPURequest is the cpu request used for PVC clean up pods + PVCCleanupPodCPURequest = "5m" + + // Resource limits/requests for project cloner init container + ProjectCloneMemoryLimit = "1Gi" + ProjectCloneMemoryRequest = "128Mi" + ProjectCloneCPULimit = "1000m" + ProjectCloneCPURequest = "100m" + + // Constants describing storage classes supported by the controller + // CommonStorageClassType defines the 'common' storage policy -- one PVC is provisioned per namespace and all devworkspace storage + // is mounted in it on subpaths according to devworkspace ID. + CommonStorageClassType = "common" + // AsyncStorageClassType defines the 'asynchronous' storage policy. An rsync sidecar is added to devworkspaces that uses SSH to connect + // to a storage deployment that mounts a common PVC for the namespace. + AsyncStorageClassType = "async" + // EphemeralStorageClassType defines the 'ephemeral' storage policy: all volumes are allocated as emptyDir volumes and + // so do not require cleanup. When a DevWorkspace is stopped, all local changes are lost. + EphemeralStorageClassType = "ephemeral" +) diff --git a/vendor/github.com/devfile/devworkspace-operator/pkg/constants/metadata.go b/vendor/github.com/devfile/devworkspace-operator/pkg/constants/metadata.go new file mode 100644 index 000000000..30c95241f --- /dev/null +++ b/vendor/github.com/devfile/devworkspace-operator/pkg/constants/metadata.go @@ -0,0 +1,84 @@ +// +// Copyright (c) 2019-2021 Red Hat, Inc. +// This program and the accompanying materials are made +// available under the terms of the Eclipse Public License 2.0 +// which is available at https://www.eclipse.org/legal/epl-2.0/ +// +// SPDX-License-Identifier: EPL-2.0 +// +// Contributors: +// Red Hat, Inc. - initial API and implementation +// + +package constants + +// Constants that are used in labels and annotation on DevWorkspace-related resources. +const ( + // DevWorkspaceIDLabel is the label key to store workspace identifier + DevWorkspaceIDLabel = "controller.devfile.io/devworkspace_id" + + // DevWorkspaceCreatorLabel is the label key for storing the UID of the user who created the workspace + DevWorkspaceCreatorLabel = "controller.devfile.io/creator" + + // DevWorkspaceNameLabel is the label key to store workspace name + DevWorkspaceNameLabel = "controller.devfile.io/devworkspace_name" + + // DevWorkspaceMountLabel is the label key to store if a configmap or secret should be mounted to the devworkspace + DevWorkspaceMountLabel = "controller.devfile.io/mount-to-devworkspace" + + // DevWorkspaceMountPathAnnotation is the annotation key to store the mount path for the secret or configmap. + // If no mount path is provided, configmaps will be mounted at /etc/config/ and secrets will + // be mounted at /etc/secret/ + DevWorkspaceMountPathAnnotation = "controller.devfile.io/mount-path" + + // DevWorkspaceMountAsAnnotation is the annotation key to configure the way how configmaps or secrets should be mounted. + // Supported options: + // - "env" - mount as environment variables + // - "file" - mount as a file + // If mountAs is not provided, the default behaviour will be to mount as a file. + DevWorkspaceMountAsAnnotation = "controller.devfile.io/mount-as" + + // DevWorkspaceRestrictedAccessAnnotation marks the intention that devworkspace access is restricted to only the creator; setting this + // annotation will cause devworkspace start to fail if webhooks are disabled. + // Operator also propagates it to the devworkspace-related objects to perform authorization. + DevWorkspaceRestrictedAccessAnnotation = "controller.devfile.io/restricted-access" + + // DevWorkspaceStopReasonAnnotation marks the reason why the devworkspace was stopped; when a devworkspace is restarted + // this annotation will be cleared + DevWorkspaceStopReasonAnnotation = "controller.devfile.io/stopped-by" + + // DevWorkspaceDebugStartAnnotation enables debugging workspace startup if set to "true". If a workspace with this annotation + // fails to start (i.e. enters the "Failed" phase), its deployment will not be scaled down in order to allow viewing logs, etc. + DevWorkspaceDebugStartAnnotation = "controller.devfile.io/debug-start" + + // WebhookRestartedAtAnnotation holds the the time (unixnano) of when the webhook server was forced to restart by controller + WebhookRestartedAtAnnotation = "controller.devfile.io/restarted-at" + + // RoutingAnnotationInfix is the infix of the annotations of DevWorkspace that are passed down as annotation to the DevWorkspaceRouting objects. + // The full annotation name is supposed to be ".routing.controller.devfile.io/" + RoutingAnnotationInfix = ".routing.controller.devfile.io/" + + // DevWorkspaceStorageTypeAtrr defines the strategy used for provisioning storage for the workspace. + // If empty, the common PVC strategy is used. + // Supported options: + // - "common": Create one PVC per namespace, and store data for all workspaces in that namespace in that PVC + // - "async" : Create one PVC per namespace, and create a remote server that syncs data from workspaces to the PVC. + // All volumeMounts used for devworkspaces are emptyDir + DevWorkspaceStorageTypeAtrr = "controller.devfile.io/storage-type" + + // WorkspaceEndpointNameAnnotation is the annotation key for storing an endpoint's name from the devfile representation + DevWorkspaceEndpointNameAnnotation = "controller.devfile.io/endpoint_name" + + // DevWorkspaceDiscoverableServiceAnnotation marks a service in a devworkspace as created for a discoverable endpoint, + // as opposed to a service created to support the devworkspace itself. + DevWorkspaceDiscoverableServiceAnnotation = "controller.devfile.io/discoverable-service" + + // PullSecretLabel marks the intention that secret should be used as pull secret for devworkspaces withing namespace + // Only secrets with 'true' value will be mount as pull secret + // Should be assigned to secrets with type docker config types (kubernetes.io/dockercfg and kubernetes.io/dockerconfigjson) + DevWorkspacePullSecretLabel = "controller.devfile.io/devworkspace_pullsecret" + + // NamespacedConfigLabelKey is a label applied to configmaps to mark them as a configuration for all DevWorkspaces in + // the current namespace. + NamespacedConfigLabelKey = "controller.devfile.io/namespaced-config" +) diff --git a/vendor/github.com/devfile/devworkspace-operator/pkg/infrastructure/cluster.go b/vendor/github.com/devfile/devworkspace-operator/pkg/infrastructure/cluster.go new file mode 100644 index 000000000..1c42f2871 --- /dev/null +++ b/vendor/github.com/devfile/devworkspace-operator/pkg/infrastructure/cluster.go @@ -0,0 +1,108 @@ +// +// Copyright (c) 2019-2021 Red Hat, Inc. +// This program and the accompanying materials are made +// available under the terms of the Eclipse Public License 2.0 +// which is available at https://www.eclipse.org/legal/epl-2.0/ +// +// SPDX-License-Identifier: EPL-2.0 +// +// Contributors: +// Red Hat, Inc. - initial API and implementation +// + +package infrastructure + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/discovery" + "sigs.k8s.io/controller-runtime/pkg/client/config" +) + +// Type specifies what kind of infrastructure we're operating in. +type Type int + +const ( + // Unsupported represents an Unsupported cluster version (e.g. OpenShift v3) + Unsupported Type = iota + Kubernetes + OpenShiftv4 +) + +var ( + // current is the infrastructure that we're currently running on. + current Type + initialized = false +) + +// Initialize attempts to determine the type of cluster its currently running on (OpenShift or Kubernetes). This function +// *must* be called before others; otherwise the call will panic. +func Initialize() error { + var err error + current, err = detect() + if err != nil { + return err + } + if current == Unsupported { + return fmt.Errorf("running on unsupported cluster") + } + initialized = true + return nil +} + +// InitializeForTesting is used to mock running on a specific type of cluster (Kubernetes, OpenShift) in testing code. +func InitializeForTesting(currentInfrastructure Type) { + current = currentInfrastructure + initialized = true +} + +// IsOpenShift returns true if the current cluster is an OpenShift (v4.x) cluster. +func IsOpenShift() bool { + if !initialized { + panic("Attempting to determine information about the cluster without initializing first") + } + return current == OpenShiftv4 +} + +func detect() (Type, error) { + kubeCfg, err := config.GetConfig() + if err != nil { + return Unsupported, fmt.Errorf("could not get kube config: %w", err) + } + discoveryClient, err := discovery.NewDiscoveryClientForConfig(kubeCfg) + if err != nil { + return Unsupported, fmt.Errorf("could not get discovery client: %w", err) + } + apiList, err := discoveryClient.ServerGroups() + if err != nil { + return Unsupported, fmt.Errorf("could not read API groups: %w", err) + } + if findAPIGroup(apiList.Groups, "route.openshift.io") == nil { + return Kubernetes, nil + } else { + if findAPIGroup(apiList.Groups, "config.openshift.io") == nil { + return Unsupported, nil + } else { + return OpenShiftv4, nil + } + } +} + +func findAPIGroup(source []metav1.APIGroup, apiName string) *metav1.APIGroup { + for i := 0; i < len(source); i++ { + if source[i].Name == apiName { + return &source[i] + } + } + return nil +} + +func findAPIResources(source []*metav1.APIResourceList, groupName string) []metav1.APIResource { + for i := 0; i < len(source); i++ { + if source[i].GroupVersion == groupName { + return source[i].APIResources + } + } + return nil +} diff --git a/vendor/github.com/devfile/devworkspace-operator/pkg/infrastructure/namespace.go b/vendor/github.com/devfile/devworkspace-operator/pkg/infrastructure/namespace.go new file mode 100644 index 000000000..01a3a5c46 --- /dev/null +++ b/vendor/github.com/devfile/devworkspace-operator/pkg/infrastructure/namespace.go @@ -0,0 +1,50 @@ +// +// Copyright (c) 2019-2021 Red Hat, Inc. +// This program and the accompanying materials are made +// available under the terms of the Eclipse Public License 2.0 +// which is available at https://www.eclipse.org/legal/epl-2.0/ +// +// SPDX-License-Identifier: EPL-2.0 +// +// Contributors: +// Red Hat, Inc. - initial API and implementation +// + +package infrastructure + +import ( + "fmt" + "io/ioutil" + "os" + "strings" +) + +const ( + WatchNamespaceEnvVar = "WATCH_NAMESPACE" +) + +// GetOperatorNamespace returns the namespace the operator should be running in. +// +// This function was ported over from Operator SDK 0.17.0 and modified. +func GetOperatorNamespace() (string, error) { + nsBytes, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") + if err != nil { + if os.IsNotExist(err) { + return "", fmt.Errorf("could not read namespace from mounted serviceaccount info") + } + return "", err + } + ns := strings.TrimSpace(string(nsBytes)) + return ns, nil +} + +// GetWatchNamespace returns the namespace the operator should be watching for changes +// +// This function was ported over from Operator SDK 0.17.0 +func GetWatchNamespace() (string, error) { + ns, found := os.LookupEnv(WatchNamespaceEnvVar) + if !found { + return "", fmt.Errorf("%s must be set", WatchNamespaceEnvVar) + } + return ns, nil +} diff --git a/vendor/github.com/devfile/devworkspace-operator/pkg/infrastructure/webhook.go b/vendor/github.com/devfile/devworkspace-operator/pkg/infrastructure/webhook.go new file mode 100644 index 000000000..778a54cef --- /dev/null +++ b/vendor/github.com/devfile/devworkspace-operator/pkg/infrastructure/webhook.go @@ -0,0 +1,52 @@ +// +// Copyright (c) 2019-2021 Red Hat, Inc. +// This program and the accompanying materials are made +// available under the terms of the Eclipse Public License 2.0 +// which is available at https://www.eclipse.org/legal/epl-2.0/ +// +// SPDX-License-Identifier: EPL-2.0 +// +// Contributors: +// Red Hat, Inc. - initial API and implementation +// + +package infrastructure + +import ( + "k8s.io/client-go/discovery" + "sigs.k8s.io/controller-runtime/pkg/client/config" +) + +// IsWebhookConfigurationEnabled returns true if both of mutating and validating webhook configurations are enabled +func IsWebhookConfigurationEnabled() (bool, error) { + kubeCfg, err := config.GetConfig() + if err != nil { + return false, err + } + discoveryClient, err := discovery.NewDiscoveryClientForConfig(kubeCfg) + if err != nil { + return false, err + } + _, apiResources, err := discoveryClient.ServerGroupsAndResources() + if err != nil { + return false, err + } + + if admissionRegistrationResources := findAPIResources(apiResources, "admissionregistration.k8s.io/v1beta1"); admissionRegistrationResources != nil { + isMutatingHookAvailable := false + isValidatingMutatingHookAvailable := false + for i := range admissionRegistrationResources { + if admissionRegistrationResources[i].Name == "mutatingwebhookconfigurations" { + isMutatingHookAvailable = true + } + + if admissionRegistrationResources[i].Name == "validatingwebhookconfigurations" { + isValidatingMutatingHookAvailable = true + } + } + + return isMutatingHookAvailable && isValidatingMutatingHookAvailable, nil + } + + return false, nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index f61aaabab..a21029ea8 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -33,6 +33,22 @@ github.com/che-incubator/kubernetes-image-puller-operator/pkg/apis github.com/che-incubator/kubernetes-image-puller-operator/pkg/apis/che/v1alpha1 # github.com/davecgh/go-spew v1.1.1 => github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew/spew +# github.com/devfile/api/v2 v2.0.0-20210713124824-03e023e7078b +## explicit +github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2 +github.com/devfile/api/v2/pkg/attributes +github.com/devfile/api/v2/pkg/devfile +# github.com/devfile/devworkspace-operator v0.2.1-0.20210805190010-9c55f69c461d +## explicit +github.com/devfile/devworkspace-operator/apis/controller/v1alpha1 +github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting +github.com/devfile/devworkspace-operator/controllers/controller/devworkspacerouting/solvers +github.com/devfile/devworkspace-operator/internal/images +github.com/devfile/devworkspace-operator/internal/map +github.com/devfile/devworkspace-operator/pkg/common +github.com/devfile/devworkspace-operator/pkg/config +github.com/devfile/devworkspace-operator/pkg/constants +github.com/devfile/devworkspace-operator/pkg/infrastructure # github.com/dgrijalva/jwt-go v3.2.0+incompatible => github.com/dgrijalva/jwt-go v3.2.0+incompatible github.com/dgrijalva/jwt-go # github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 @@ -45,7 +61,7 @@ github.com/fsnotify/fsnotify # github.com/go-logr/logr v0.4.0 => github.com/go-logr/logr v0.3.0 ## explicit github.com/go-logr/logr -# github.com/go-logr/zapr v0.1.1 => github.com/go-logr/zapr v0.3.0 +# github.com/go-logr/zapr v0.3.0 => github.com/go-logr/zapr v0.3.0 github.com/go-logr/zapr # github.com/gogo/protobuf v1.3.2 => github.com/gogo/protobuf v1.3.2 github.com/gogo/protobuf/proto @@ -69,7 +85,7 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/gofuzz v1.1.0 => github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367 +# github.com/google/gofuzz v1.2.0 => github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367 github.com/google/gofuzz # github.com/google/uuid v1.1.2 => github.com/google/uuid v1.1.2 github.com/google/uuid @@ -148,7 +164,7 @@ go.uber.org/atomic go.uber.org/multierr # go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee => go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee go.uber.org/tools/update-license -# go.uber.org/zap v1.13.0 => go.uber.org/zap v1.13.0 +# go.uber.org/zap v1.16.0 => go.uber.org/zap v1.13.0 ## explicit go.uber.org/zap go.uber.org/zap/buffer