diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0cf8703bf..85f2e5755 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -9,7 +9,7 @@ concurrency: jobs: verify: name: verify - runs-on: self-hosted + runs-on: [self-hosted, ecs] if: ${{ always() }} env: #GOPATH: ${{ github.workspace }} @@ -38,7 +38,7 @@ jobs: build: name: build needs: verify - runs-on: self-hosted + runs-on: [self-hosted, ecs] steps: - name: Checkout code uses: actions/checkout@v3 @@ -54,7 +54,7 @@ jobs: test: name: Unit test needs: build - runs-on: self-hosted + runs-on: [self-hosted, ecs] steps: - name: Checkout code uses: actions/checkout@v3 @@ -68,7 +68,14 @@ jobs: needs: build env: GOPROXY: "https://goproxy.cn,direct" - runs-on: self-hosted + runs-on: [self-hosted, ecs] + strategy: + fail-fast: false + matrix: + # Here support the latest three minor releases of Kubernetes, this can be considered to be roughly + # the same as the End of Life of the Kubernetes release: https://kubernetes.io/releases/ + # Please remember to update the CI Schedule Workflow when we add a new version. + k8s: [ v1.25.3, v1.28.0, v1.29.0 ] steps: # Free up disk space on Ubuntu - name: Free Disk Space (Ubuntu) @@ -92,12 +99,20 @@ jobs: with: go-version: '1.20' - name: Prepare e2e env - run: ./hack/prepare-e2e.sh + env: KIND_IMAGE=kindest/node:${{ matrix.k8s }} + run: hack/local-up-kosmos.sh - name: Run e2e test - run: ./hack/rune2e.sh + env: ARTIFACTS_PATH=${{ github.workspace }}/e2e-test/${{ matrix.k8s }}/ + run: hack/rune2e.sh - name: Upload logs uses: actions/upload-artifact@v3 - if: failure() + if: always() with: - name: kosmos-e2e-logs-${{ github.run_id }} - path: ${{ github.workspace }}/e2e-test/logs-* \ No newline at end of file + name: kosmos-e2e-logs-${{ github.run_id }}_${{ matrix.k8s }} + path: ${{ github.workspace }}/e2e-test/${{ matrix.k8s }}/ + - name: upload environments logs + if: always() + uses: actions/upload-artifact@v3 + with: + name: kosmos_environments_log_${{ matrix.k8s }} + path: ${{ github.workspace }}/environments \ No newline at end of file diff --git a/examples/nginx-demo.yaml b/examples/nginx-demo.yaml new file mode 100644 index 000000000..407ab96c8 --- /dev/null +++ b/examples/nginx-demo.yaml @@ -0,0 +1,25 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + labels: + app: nginx +spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + tolerations: + - key: kosmos.io/node + operator: Equal + value: "true" + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 diff --git a/hack/cluster.sh b/hack/cluster.sh index 1a1ab904b..0e72be0f3 100755 --- a/hack/cluster.sh +++ b/hack/cluster.sh @@ -7,32 +7,74 @@ set -o pipefail HOST_CLUSTER_NAME="cluster-host" CURRENT="$(dirname "${BASH_SOURCE[0]}")" ROOT=$(dirname "${BASH_SOURCE[0]}")/.. -#KIND_IMAGE="ghcr.io/kosmos-io/node:v1.25.3" -KIND_IMAGE="kindest/node:v1.27.2" # true: when cluster is exist, reuse exist one! REUSE=${REUSE:-false} #VERSION=${VERSION:-latest} VERSION="v0.2.0" -# default cert and key for node server https -CERT=$(cat ${ROOT}/pkg/cert/crt.pem | base64 -w 0) -KEY=$(cat ${ROOT}/pkg/cert/key.pem | base64 -w 0) - CN_ZONE=${CN_ZONE:-false} source "$(dirname "${BASH_SOURCE[0]}")/util.sh" +# default cert and key for node server https +CERT=$(util::get_base64_kubeconfig ${ROOT}/pkg/cert/crt.pem) +KEY=$(util::get_base64_kubeconfig ${ROOT}/pkg/cert/crt.pem) + if [ $REUSE == true ]; then echo "!!!!!!!!!!!Warning: Setting REUSE to true will not delete existing clusters.!!!!!!!!!!!" fi source "${ROOT}/hack/util.sh" +# prepare e2e cluster +function prepare_e2e_cluster() { + local -r clustername=$1 + CLUSTER_DIR="${ROOT}/environments/${clustername}" + + kubectl --kubeconfig $CLUSTER_DIR/kubeconfig apply -f "$ROOT"/deploy/crds + + # deploy kosmos-scheduler for e2e test case of mysql-operator + sed -e "s|__VERSION__|$VERSION|g" -e "w ${ROOT}/environments/kosmos-scheduler.yml" "$ROOT"/deploy/scheduler/deployment.yaml + kubectl --kubeconfig $CLUSTER_DIR/kubeconfig apply -f "${ROOT}/environments/kosmos-scheduler.yml" + kubectl --kubeconfig $CLUSTER_DIR/kubeconfig apply -f "$ROOT"/deploy/scheduler/rbac.yaml + + util::wait_for_condition "kosmos scheduler are ready" \ + "kubectl --kubeconfig $CLUSTER_DIR/kubeconfig -n kosmos-system get deploy kosmos-scheduler -o jsonpath='{.status.replicas}{\" \"}{.status.readyReplicas}{\"\n\"}' | awk '{if (\$1 == \$2 && \$1 > 0) exit 0; else exit 1}'" \ + 300 + echo "cluster $clustername deploy kosmos-scheduler success" + + docker exec ${clustername}-control-plane /bin/sh -c "mv /etc/kubernetes/manifests/kube-scheduler.yaml /etc/kubernetes" + + # add the args for e2e test case of mysql-operator + kubectl --kubeconfig $CLUSTER_DIR/kubeconfig -n kosmos-system patch deployment clustertree-cluster-manager --type='json' -p='[{"op": "add", "path": "/spec/template/spec/containers/0/command/-", "value": "--auto-mcs-prefix=kosmos-e2e"}]' + + util::wait_for_condition "kosmos ${clustername} clustertree are ready" \ + "kubectl --kubeconfig $CLUSTER_DIR/kubeconfig -n kosmos-system get deploy clustertree-cluster-manager -o jsonpath='{.status.replicas}{\" \"}{.status.readyReplicas}{\"\n\"}' | awk '{if (\$1 == \$2 && \$1 > 0) exit 0; else exit 1}'" \ + 300 +} + +# prepare docker image +function prepare_docker_image() { + # pull calico image + docker pull calico/apiserver:v3.25.0 + docker pull calico/cni:v3.25.0 + docker pull calico/csi:v3.25.0 + docker pull calico/kube-controllers:v3.25.0 + docker pull calico/node-driver-registrar:v3.25.0 + docker pull calico/node:v3.25.0 + docker pull calico/pod2daemon-flexvol:v3.25.0 + docker pull calico/typha:v3.25.0 + docker pull quay.io/tigera/operator:v1.29.0 + +} + #clustername podcidr servicecidr function create_cluster() { - local -r clustername=$1 - local -r podcidr=$2 - local -r servicecidr=$3 - local -r isDual=${4:-false} + local -r KIND_IMAGE=$1 + local -r hostIpAddress=$2 + local -r clustername=$3 + local -r podcidr=$4 + local -r servicecidr=$5 + local -r isDual=${6:-false} CLUSTER_DIR="${ROOT}/environments/${clustername}" mkdir -p "${CLUSTER_DIR}" @@ -55,105 +97,38 @@ function create_cluster() { sed -e "s|__POD_CIDR__|$podcidr|g" -e "s|__SERVICE_CIDR__|$servicecidr|g" -e "s|__IP_FAMILY__|$ipFamily|g" -e "w ${CLUSTER_DIR}/kindconfig" "${CURRENT}/clustertemplete/kindconfig" sed -e "s|__POD_CIDR__|$podcidr|g" -e "s|__SERVICE_CIDR__|$servicecidr|g" -e "w ${CLUSTER_DIR}/calicoconfig" "${CURRENT}/clustertemplete/calicoconfig" fi + sed -i'' -e "s/__HOST_IPADDRESS__/${hostIpAddress}/g" ${CLUSTER_DIR}/kindconfig if [[ "$(kind get clusters | grep -c "${clustername}")" -eq 1 && "${REUSE}" = true ]]; then echo "cluster ${clustername} exist reuse it" else kind delete clusters $clustername || true - kind create cluster --name $clustername --config ${CLUSTER_DIR}/kindconfig --image $KIND_IMAGE + echo "create cluster ${clustername} with kind image ${KIND_IMAGE}" + kind create cluster --name "${clustername}" --config "${CLUSTER_DIR}/kindconfig" --image "${KIND_IMAGE}" fi + # load docker image to kind cluster + kind load docker-image calico/apiserver:v3.25.0 --name $clustername + kind load docker-image calico/cni:v3.25.0 --name $clustername + kind load docker-image calico/csi:v3.25.0 --name $clustername + kind load docker-image calico/kube-controllers:v3.25.0 --name $clustername + kind load docker-image calico/node-driver-registrar:v3.25.0 --name $clustername + kind load docker-image calico/node:v3.25.0 --name $clustername + kind load docker-image calico/pod2daemon-flexvol:v3.25.0 --name $clustername + kind load docker-image calico/typha:v3.25.0 --name $clustername + kind load docker-image quay.io/tigera/operator:v1.29.0 --name $clustername - dockerip=$(docker inspect "${clustername}-control-plane" --format "{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}") kubectl taint nodes --all node-role.kubernetes.io/control-plane- || true # prepare external kubeconfig - docker exec ${clustername}-control-plane /bin/sh -c "cat /etc/kubernetes/admin.conf" | sed -e "s|${clustername}-control-plane|$dockerip|g" -e "/certificate-authority-data:/d" -e "5s/^/ insecure-skip-tls-verify: true\n/" -e "w ${CLUSTER_DIR}/kubeconfig" - -# # install calico -# if [ "${CN_ZONE}" == false ]; then -# docker pull quay.io/tigera/operator:v1.29.0 -# docker pull docker.io/calico/cni:v3.25.0 -# docker pull docker.io/calico/typha:v3.25.0 -# docker pull docker.io/calico/pod2daemon-flexvol:v3.25.0 -# docker pull docker.io/calico/kube-controllers:v3.25.0 -# docker pull docker.io/calico/node:v3.25.0 -# docker pull docker.io/calico/csi:v3.25.0 -# docker pull docker.io/percona:5.7 -# docker pull docker.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.3 -# docker pull docker.io/library/nginx:latest -# docker pull docker.io/library/busybox:latest -# docker pull docker.io/prom/mysqld-exporter:v0.13.0 -# else -# docker pull quay.m.daocloud.io/tigera/operator:v1.29.0 -# docker pull docker.m.daocloud.io/calico/cni:v3.25.0 -# docker pull docker.m.daocloud.io/calico/typha:v3.25.0 -# docker pull docker.m.daocloud.io/calico/pod2daemon-flexvol:v3.25.0 -# docker pull docker.m.daocloud.io/calico/kube-controllers:v3.25.0 -# docker pull docker.m.daocloud.io/calico/node:v3.25.0 -# docker pull docker.m.daocloud.io/calico/csi:v3.25.0 -# docker pull docker.m.daocloud.io/percona:5.7 -# docker pull docker.m.daocloud.io/library/nginx:latest -# docker pull docker.m.daocloud.io/library/busybox:latest -# docker pull docker.m.daocloud.io/prom/mysqld-exporter:v0.13.0 -# docker pull docker.m.daocloud.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.3 -# -# docker tag quay.m.daocloud.io/tigera/operator:v1.29.0 quay.io/tigera/operator:v1.29.0 -# docker tag docker.m.daocloud.io/calico/cni:v3.25.0 docker.io/calico/cni:v3.25.0 -# docker tag docker.m.daocloud.io/calico/typha:v3.25.0 docker.io/calico/typha:v3.25.0 -# docker tag docker.m.daocloud.io/calico/pod2daemon-flexvol:v3.25.0 docker.io/calico/pod2daemon-flexvol:v3.25.0 -# docker tag docker.m.daocloud.io/calico/kube-controllers:v3.25.0 docker.io/calico/kube-controllers:v3.25.0 -# docker tag docker.m.daocloud.io/calico/node:v3.25.0 docker.io/calico/node:v3.25.0 -# docker tag docker.m.daocloud.io/calico/csi:v3.25.0 docker.io/calico/csi:v3.25.0 -# docker tag docker.m.daocloud.io/percona:5.7 docker.io/percona:5.7 -# docker tag docker.m.daocloud.io/library/nginx:latest docker.io/library/nginx:latest -# docker tag docker.m.daocloud.io/library/busybox:latest docker.io/library/busybox:latest -# docker tag docker.m.daocloud.io/prom/mysqld-exporter:v0.13.0 docker.io/prom/mysqld-exporter:v0.13.0 -# docker tag docker.m.daocloud.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.3 docker.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.3 -# fi -# -# kind load docker-image -n "$clustername" quay.io/tigera/operator:v1.29.0 -# kind load docker-image -n "$clustername" docker.io/calico/cni:v3.25.0 -# kind load docker-image -n "$clustername" docker.io/calico/typha:v3.25.0 -# kind load docker-image -n "$clustername" docker.io/calico/pod2daemon-flexvol:v3.25.0 -# kind load docker-image -n "$clustername" docker.io/calico/kube-controllers:v3.25.0 -# kind load docker-image -n "$clustername" docker.io/calico/node:v3.25.0 -# kind load docker-image -n "$clustername" docker.io/calico/csi:v3.25.0 -# kind load docker-image -n "$clustername" docker.io/percona:5.7 -# kind load docker-image -n "$clustername" docker.io/library/nginx:latest -# kind load docker-image -n "$clustername" docker.io/library/busybox:latest -# kind load docker-image -n "$clustername" docker.io/prom/mysqld-exporter:v0.13.0 -# kind load docker-image -n "$clustername" docker.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.3 -# -# if "${clustername}" == $HOST_CLUSTER_NAME; then -# if [ "${CN_ZONE}" == false ]; then -# docker pull docker.io/bitpoke/mysql-operator-orchestrator:v0.6.3 -# docker pull docker.io/prom/mysqld-exporter:v0.13.0 -# docker pull docker.io/bitpoke/mysql-operator-sidecar-8.0:v0.6.3 -# docker pull docker.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.3 -# docker pull docker.io/bitpoke/mysql-operator:v0.6.3 -# else -# docker pull docker.m.daocloud.io/bitpoke/mysql-operator-orchestrator:v0.6.3 -# docker pull docker.m.daocloud.io/prom/mysqld-exporter:v0.13.0 -# docker pull docker.m.daocloud.io/bitpoke/mysql-operator-sidecar-8.0:v0.6.3 -# docker pull docker.m.daocloud.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.3 -# docker pull docker.m.daocloud.io/bitpoke/mysql-operator:v0.6.3 -# -# docker tag docker.m.daocloud.io/bitpoke/mysql-operator-orchestrator:v0.6.3 docker.io/bitpoke/mysql-operator-orchestrator:v0.6.3 -# docker tag docker.m.daocloud.io/prom/mysqld-exporter:v0.13.0 docker.io/prom/mysqld-exporter:v0.13.0 -# docker tag docker.m.daocloud.io/bitpoke/mysql-operator-sidecar-8.0:v0.6.3 docker.io/bitpoke/mysql-operator-sidecar-8.0:v0.6.3 -# docker tag docker.m.daocloud.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.3 docker.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.3 -# docker tag docker.m.daocloud.io/bitpoke/mysql-operator:v0.6.3 docker.io/bitpoke/mysql-operator:v0.6.3 -# fi -# kind load docker-image -n "$clustername" docker.io/bitpoke/mysql-operator-orchestrator:v0.6.3 -# kind load docker-image -n "$clustername" docker.io/prom/mysqld-exporter:v0.13.0 -# kind load docker-image -n "$clustername" docker.io/bitpoke/mysql-operator-sidecar-8.0:v0.6.3 -# kind load docker-image -n "$clustername" docker.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.3 -# kind load docker-image -n "$clustername" docker.io/bitpoke/mysql-operator:v0.6.3 -# fi - kubectl --context="kind-${clustername}" create -f "$CURRENT/calicooperator/tigera-operator.yaml" || $("${REUSE}" -eq "true") + kind get kubeconfig --name "${clustername}" > "${CLUSTER_DIR}/kubeconfig" + dockerip=$(docker inspect "${clustername}-control-plane" --format "{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}") + echo "get docker ip from pod $dockerip" + docker exec ${clustername}-control-plane /bin/sh -c "cat /etc/kubernetes/admin.conf" | sed -e "s|${clustername}-control-plane|$dockerip|g" -e "/certificate-authority-data:/d" -e "5s/^/ insecure-skip-tls-verify: true\n/" -e "w ${CLUSTER_DIR}/kubeconfig-nodeIp" + + kubectl --kubeconfig $CLUSTER_DIR/kubeconfig create -f "$CURRENT/calicooperator/tigera-operator.yaml" || $("${REUSE}" -eq "true") kind export kubeconfig --name "$clustername" util::wait_for_crd installations.operator.tigera.io - kubectl --context="kind-${clustername}" apply -f "${CLUSTER_DIR}"/calicoconfig + kubectl --kubeconfig $CLUSTER_DIR/kubeconfig apply -f "${CLUSTER_DIR}"/calicoconfig echo "create cluster ${clustername} success" echo "wait all node ready" # N = nodeNum + 1 @@ -168,7 +143,8 @@ function join_cluster() { local host_cluster=$1 local member_cluster=$2 local kubeconfig_path="${ROOT}/environments/${member_cluster}/kubeconfig" - local base64_kubeconfig=$(base64 -w 0 <"$kubeconfig_path") + local hostConfig_path="${ROOT}/environments/${host_cluster}/kubeconfig" + local base64_kubeconfig=$(util::get_base64_kubeconfig <"$kubeconfig_path") echo " base64 kubeconfig successfully converted: $base64_kubeconfig " local common_metadata="" @@ -177,7 +153,7 @@ function join_cluster() { kosmos.io/cluster-role: root" fi - cat < 0) exit 0; else exit 1}'" \ - 300 - echo "cluster $clustername deploy kosmos-scheduler success" - - docker exec ${clustername}-control-plane /bin/sh -c "mv /etc/kubernetes/manifests/kube-scheduler.yaml /etc/kubernetes" - - # add the args for e2e test case of mysql-operator - kubectl --context="kind-${clustername}" -n kosmos-system patch deployment clustertree-cluster-manager --type='json' -p='[{"op": "add", "path": "/spec/template/spec/containers/0/command/-", "value": "--auto-mcs-prefix=kosmos-e2e"}]' - - util::wait_for_condition "kosmos clustertree are ready" \ - "kubectl --context="kind-${clustername}" -n kosmos-system get deploy clustertree-cluster-manager -o jsonpath='{.status.replicas}{\" \"}{.status.readyReplicas}{\"\n\"}' | awk '{if (\$1 == \$2 && \$1 > 0) exit 0; else exit 1}'" \ + util::wait_for_condition "kosmos ${clustername} clustertree are ready" \ + "kubectl --kubeconfig $CLUSTER_DIR/kubeconfig -n kosmos-system get deploy clustertree-cluster-manager -o jsonpath='{.status.replicas}{\" \"}{.status.readyReplicas}{\"\n\"}' | awk '{if (\$1 == \$2 && \$1 > 0) exit 0; else exit 1}'" \ 300 } function deploy_cluster() { local -r clustername=$1 - kubectl config use-context "kind-${clustername}" + CLUSTER_DIR="${ROOT}/environments/${clustername}" + load_cluster_images "$clustername" - kubectl --context="kind-${clustername}" apply -f "$ROOT"/deploy/clusterlink-namespace.yml - kubectl --context="kind-${clustername}" apply -f "$ROOT"/deploy/kosmos-rbac.yml - kubectl --context="kind-${clustername}" apply -f "$ROOT"/deploy/crds + kubectl --kubeconfig $CLUSTER_DIR/kubeconfig apply -f "$ROOT"/deploy/clusterlink-namespace.yml + kubectl --kubeconfig $CLUSTER_DIR/kubeconfig apply -f "$ROOT"/deploy/kosmos-rbac.yml + kubectl --kubeconfig $CLUSTER_DIR/kubeconfig apply -f "$ROOT"/deploy/crds util::wait_for_crd clusternodes.kosmos.io clusters.kosmos.io clusterdistributionpolicies.kosmos.io distributionpolicies.kosmos.io sed -e "s|__VERSION__|$VERSION|g" -e "w ${ROOT}/environments/clusterlink-network-manager.yml" "$ROOT"/deploy/clusterlink-network-manager.yml - kubectl --context="kind-${clustername}" apply -f "${ROOT}/environments/clusterlink-network-manager.yml" + kubectl --kubeconfig $CLUSTER_DIR/kubeconfig apply -f "${ROOT}/environments/clusterlink-network-manager.yml" echo "cluster $clustername deploy clusterlink success" sed -e "s|__VERSION__|$VERSION|g" -e "s|__CERT__|$CERT|g" -e "s|__KEY__|$KEY|g" -e "w ${ROOT}/environments/clustertree-cluster-manager.yml" "$ROOT"/deploy/clustertree-cluster-manager.yml - kubectl --context="kind-${clustername}" apply -f "${ROOT}/environments/clustertree-cluster-manager.yml" + kubectl --kubeconfig $CLUSTER_DIR/kubeconfig apply -f "${ROOT}/environments/clustertree-cluster-manager.yml" echo "cluster $clustername deploy clustertree success" - kubectl --context="kind-${clustername}" -n kosmos-system delete secret controlpanel-config || true - kubectl --context="kind-${clustername}" -n kosmos-system create secret generic controlpanel-config --from-file=kubeconfig="${ROOT}/environments/cluster-host/kubeconfig" + kubectl --kubeconfig $CLUSTER_DIR/kubeconfig -n kosmos-system delete secret controlpanel-config || true + kubectl --kubeconfig $CLUSTER_DIR/kubeconfig -n kosmos-system create secret generic controlpanel-config --from-file=kubeconfig="${ROOT}/environments/cluster-host/kubeconfig" sed -e "s|__VERSION__|$VERSION|g" -e "w ${ROOT}/environments/clusterlink-operator.yml" "$ROOT"/deploy/clusterlink-operator.yml - kubectl --context="kind-${clustername}" apply -f "${ROOT}/environments/clusterlink-operator.yml" + kubectl --kubeconfig $CLUSTER_DIR/kubeconfig apply -f "${ROOT}/environments/clusterlink-operator.yml" echo "cluster $clustername deploy clusterlink-operator success" sed -e "s|__VERSION__|$VERSION|g" -e "w ${ROOT}/environments/kosmos-scheduler.yml" "$ROOT"/deploy/scheduler/deployment.yaml - kubectl --context="kind-${clustername}" apply -f "${ROOT}/environments/kosmos-scheduler.yml" - kubectl --context="kind-${clustername}" apply -f "$ROOT"/deploy/scheduler/rbac.yaml + kubectl --kubeconfig $CLUSTER_DIR/kubeconfig apply -f "${ROOT}/environments/kosmos-scheduler.yml" + kubectl --kubeconfig $CLUSTER_DIR/kubeconfig apply -f "$ROOT"/deploy/scheduler/rbac.yaml util::wait_for_condition "kosmos scheduler are ready" \ - "kubectl --context="kind-${clustername}" -n kosmos-system get deploy kosmos-scheduler -o jsonpath='{.status.replicas}{\" \"}{.status.readyReplicas}{\"\n\"}' | awk '{if (\$1 == \$2 && \$1 > 0) exit 0; else exit 1}'" \ + "kubectl --kubeconfig $CLUSTER_DIR/kubeconfig -n kosmos-system get deploy kosmos-scheduler -o jsonpath='{.status.replicas}{\" \"}{.status.readyReplicas}{\"\n\"}' | awk '{if (\$1 == \$2 && \$1 > 0) exit 0; else exit 1}'" \ 300 echo "cluster $clustername deploy kosmos-scheduler success" @@ -298,9 +259,10 @@ function load_cluster_images() { } function delete_cluster() { - local -r clustername=$1 - kind delete clusters $clustername - CLUSTER_DIR="${ROOT}/environments/${clustername}" - rm -rf "$CLUSTER_DIR" - echo "cluster $clustername delete success" -} + local -r clusterName=$1 + local -r clusterDir=$2 + + kind delete clusters "${clusterName}" + rm -rf "${clusterDir}" + echo "cluster $clusterName delete success" +} \ No newline at end of file diff --git a/hack/clustertemplete/kindconfig b/hack/clustertemplete/kindconfig index 7cae75941..3ffe86a2a 100644 --- a/hack/clustertemplete/kindconfig +++ b/hack/clustertemplete/kindconfig @@ -17,4 +17,5 @@ networking: ipFamily: __IP_FAMILY__ disableDefaultCNI: true # disable kindnet podSubnet: __POD_CIDR__ - serviceSubnet: __SERVICE_CIDR__ \ No newline at end of file + serviceSubnet: __SERVICE_CIDR__ + apiServerAddress: __HOST_IPADDRESS__ diff --git a/hack/local-cleanup-kosmos.sh b/hack/local-cleanup-kosmos.sh new file mode 100755 index 000000000..400c30f8f --- /dev/null +++ b/hack/local-cleanup-kosmos.sh @@ -0,0 +1,81 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + +function usage() { + echo "Usage:" + echo " hack/local-down-kosmos.sh [-k] [-h]" + echo "Args:" + echo " k: keep the local images" + echo " h: print help information" +} + +keep_images="false" +while getopts 'kh' OPT; do + case $OPT in + k) keep_images="true";; + h) + usage + exit 0 + ;; + ?) + usage + exit 1 + ;; + esac +done +HOST_CLUSTER_NAME=${HOST_CLUSTER_NAME:-"cluster-host"} +MEMBER_CLUSTER_1_NAME=${MEMBER_CLUSTER_1_NAME:-"cluster-member1"} +MEMBER_CLUSTER_2_NAME=${MEMBER_CLUSTER_2_NAME:-"cluster-member2"} +MEMBER_CLUSTER_3_NAME=${MEMBER_CLUSTER_3_NAME:-"cluster-member3"} +#step1 remove kind clusters +echo -e "\nStart removing kind clusters" +kind delete cluster --name "${HOST_CLUSTER_NAME}" +kind delete cluster --name "${MEMBER_CLUSTER_1_NAME}" +kind delete cluster --name "${MEMBER_CLUSTER_2_NAME}" +kind delete cluster --name "${MEMBER_CLUSTER_3_NAME}" +echo "Remove kind clusters successfully." + +ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +CLUSTER_DIR="${ROOT}/environments" +source "${ROOT}/hack/cluster.sh" + +#step2. remove kubeconfig +echo -e "\nStart removing kubeconfig, kindconfig, cailcoconfig" +HOST_CLUSTER_CONFIG=${HOST_CLUSTER_CONFIG:-"${CLUSTER_DIR}/${HOST_CLUSTER_NAME}"} +MEMBER1_CLUSTER_CONFIG=${MEMBER_CLUSTER_CONFIG:-"${CLUSTER_DIR}/${MEMBER_CLUSTER_1_NAME}"} +MEMBER2_CLUSTER_CONFIG=${MEMBER_CLUSTER_CONFIG:-"${CLUSTER_DIR}/${MEMBER_CLUSTER_2_NAME}"} +MEMBER3_CLUSTER_CONFIG=${MEMBER_CLUSTER_CONFIG:-"${CLUSTER_DIR}/${MEMBER_CLUSTER_3_NAME}"} +delete_cluster "${HOST_CLUSTER_CONFIG}" "${HOST_CLUSTER_CONFIG}" +delete_cluster "${MEMBER1_CLUSTER_CONFIG}" "${MEMBER1_CLUSTER_CONFIG}" +delete_cluster "${MEMBER2_CLUSTER_CONFIG}" "${MEMBER2_CLUSTER_CONFIG}" +delete_cluster "${MEMBER3_CLUSTER_CONFIG}" "${MEMBER3_CLUSTER_CONFIG}" + +echo "Remove cluster configs successfully." + +#step3. remove docker images +echo -e "\nStart removing images" +version="v0.2.0" +registry="ghcr.io/kosmos-io" +images=( +"${registry}/clusterlink-network-manager:${version}" +"${registry}/clusterlink-controller-manager:${version}" +"${registry}/clusterlink-elector:${version}" +"${registry}/clusterlink-operator:${version}" +"${registry}/clusterlink-agent:${version}" +"${registry}/clusterlink-proxy:${version}" +"${registry}/clustertree-cluster-manager:${version}" +"${registry}/scheduler:${version}" +) +if [[ "${keep_images}" == "false" ]] ; then + for ((i=0;i<${#images[*]};i++)); do + docker rmi ${images[i]} || true + done + echo "Remove images successfully." +else + echo "Skip removing images as required." +fi + +echo -e "\nLocal Kosmos is removed successfully." diff --git a/hack/local-down-clusterlink.sh b/hack/local-down-clusterlink.sh deleted file mode 100755 index 5faad623c..000000000 --- a/hack/local-down-clusterlink.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit -set -o nounset -set -o pipefail - -HOST_CLUSTER_NAME="cluster-host" - -MEMBER1_CLUSTER_NAME="cluster-member1" -MEMBER2_CLUSTER_NAME="cluster-member2" - -ROOT="$(dirname "${BASH_SOURCE[0]}")" -source "$(dirname "${BASH_SOURCE[0]}")/cluster.sh" - -#cluster cluster -delete_cluster $HOST_CLUSTER_NAME -delete_cluster $MEMBER1_CLUSTER_NAME -delete_cluster $MEMBER2_CLUSTER_NAME - - -echo "clusterlink local down success" diff --git a/hack/local-up-clusterlink.sh b/hack/local-up-clusterlink.sh deleted file mode 100755 index 1f1a6db79..000000000 --- a/hack/local-up-clusterlink.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit -set -o nounset -set -o pipefail - -HOST_CLUSTER_NAME="cluster-host" -HOST_CLUSTER_POD_CIDR="10.233.64.0/18" -HOST_CLUSTER_SERVICE_CIDR="10.233.0.0/18" - -MEMBER1_CLUSTER_NAME="cluster-member1" -MEMBER1_CLUSTER_POD_CIDR="10.234.64.0/18" -MEMBER1_CLUSTER_SERVICE_CIDR="10.234.0.0/18" - -MEMBER2_CLUSTER_NAME="cluster-member2" -MEMBER2_CLUSTER_POD_CIDR="10.235.64.0/18" -MEMBER2_CLUSTER_SERVICE_CIDR="10.235.0.0/18" - -export VERSION="latest" -ROOT="$(dirname "${BASH_SOURCE[0]}")" -source "$(dirname "${BASH_SOURCE[0]}")/install_kind_kubectl.sh" -source "$(dirname "${BASH_SOURCE[0]}")/cluster.sh" -make images GOOS="linux" --directory="${ROOT}" - -#cluster cluster -create_cluster $HOST_CLUSTER_NAME $HOST_CLUSTER_POD_CIDR $HOST_CLUSTER_SERVICE_CIDR true -create_cluster $MEMBER1_CLUSTER_NAME $MEMBER1_CLUSTER_POD_CIDR $MEMBER1_CLUSTER_SERVICE_CIDR true -create_cluster $MEMBER2_CLUSTER_NAME $MEMBER2_CLUSTER_POD_CIDR $MEMBER2_CLUSTER_SERVICE_CIDR true - -#deploy cluster -deploy_cluster $HOST_CLUSTER_NAME -load_cluster_images $MEMBER1_CLUSTER_NAME -load_cluster_images $MEMBER2_CLUSTER_NAME - -#join cluster -join_cluster $HOST_CLUSTER_NAME $HOST_CLUSTER_NAME -join_cluster $HOST_CLUSTER_NAME $MEMBER1_CLUSTER_NAME -join_cluster $HOST_CLUSTER_NAME $MEMBER2_CLUSTER_NAME - -echo "cluster local start success enjoy it!" - diff --git a/hack/local-up-kosmos.sh b/hack/local-up-kosmos.sh new file mode 100755 index 000000000..f99d4892c --- /dev/null +++ b/hack/local-up-kosmos.sh @@ -0,0 +1,97 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + + +function usage() { + echo "Usage:" + echo " hack/local-up-kosmos.sh [HOST_IPADDRESS] [-h]" + echo "Args:" + echo " HOST_IPADDRESS: (required) if you want to export clusters' API server port to specific IP address" + echo " h: print help information" +} + +while getopts 'h' OPT; do + case $OPT in + h) + usage + exit 0 + ;; + ?) + usage + exit 1 + ;; + esac +done + + +KUBECONFIG_PATH=${KUBECONFIG_PATH:-"${HOME}/.kube"} +export KUBECONFIG=$KUBECONFIG_PATH/"config" + +KIND_IMAGE=${KIND_IMAGE:-"kindest/node:v1.27.2"} +HOST_IPADDRESS=${1:-} +HOST_CLUSTER_NAME="cluster-host" +HOST_CLUSTER_POD_CIDR="10.233.64.0/18" +HOST_CLUSTER_SERVICE_CIDR="10.233.0.0/18" + +MEMBER1_CLUSTER_NAME="cluster-member1" +MEMBER1_CLUSTER_POD_CIDR="10.234.64.0/18" +MEMBER1_CLUSTER_SERVICE_CIDR="10.234.0.0/18" + +MEMBER2_CLUSTER_NAME="cluster-member2" +MEMBER2_CLUSTER_POD_CIDR="10.235.64.0/18" +MEMBER2_CLUSTER_SERVICE_CIDR="10.235.0.0/18" + +MEMBER3_CLUSTER_NAME="cluster-member3" +MEMBER3_CLUSTER_POD_CIDR="10.236.64.0/18" +MEMBER3_CLUSTER_SERVICE_CIDR="10.236.0.0/18" + +ROOT="$(dirname "${BASH_SOURCE[0]}")" +export VERSION="latest" +source "$(dirname "${BASH_SOURCE[0]}")/install_kind_kubectl.sh" +source "$(dirname "${BASH_SOURCE[0]}")/cluster.sh" +source "$(dirname "${BASH_SOURCE[0]}")/util.sh" + +#step1. create host cluster and member clusters in parallel +# host IP address: script parameter ahead of macOS IP +if [[ -z "${HOST_IPADDRESS}" ]]; then + util::get_macos_ipaddress # Adapt for macOS + HOST_IPADDRESS=${MAC_NIC_IPADDRESS:-} +fi +make images GOOS="linux" --directory="${ROOT}" + +make kosmosctl +os=$(go env GOOS) +arch=$(go env GOARCH) +export PATH=$PATH:"$ROOT"/_output/bin/"$os"/"$arch" + +# prepare docker image +prepare_docker_image + +#cluster cluster concurrent backend +create_cluster "${KIND_IMAGE}" "$HOST_IPADDRESS" $HOST_CLUSTER_NAME $HOST_CLUSTER_POD_CIDR $HOST_CLUSTER_SERVICE_CIDR & +create_cluster "${KIND_IMAGE}" "$HOST_IPADDRESS" $MEMBER1_CLUSTER_NAME $MEMBER1_CLUSTER_POD_CIDR $MEMBER1_CLUSTER_SERVICE_CIDR false & +create_cluster "${KIND_IMAGE}" "$HOST_IPADDRESS" $MEMBER2_CLUSTER_NAME $MEMBER2_CLUSTER_POD_CIDR $MEMBER2_CLUSTER_SERVICE_CIDR false & +create_cluster "${KIND_IMAGE}" "$HOST_IPADDRESS" $MEMBER3_CLUSTER_NAME $MEMBER3_CLUSTER_POD_CIDR $MEMBER3_CLUSTER_SERVICE_CIDR false & + +# wait for finish +wait + +#deploy cluster concurrent backend +deploy_cluster_by_ctl $HOST_CLUSTER_NAME "${ROOT}/environments/${HOST_CLUSTER_NAME}/kubeconfig" "${ROOT}/environments/${HOST_CLUSTER_NAME}/kubeconfig-nodeIp" & +load_cluster_images $MEMBER1_CLUSTER_NAME & +load_cluster_images $MEMBER2_CLUSTER_NAME & +load_cluster_images $MEMBER3_CLUSTER_NAME & + +# wait for finish +wait + +#join cluster +join_cluster_by_ctl $HOST_CLUSTER_NAME $MEMBER1_CLUSTER_NAME "${ROOT}/environments/${HOST_CLUSTER_NAME}" "${ROOT}/environments/${MEMBER1_CLUSTER_NAME}" +join_cluster_by_ctl $HOST_CLUSTER_NAME $MEMBER2_CLUSTER_NAME "${ROOT}/environments/${HOST_CLUSTER_NAME}" "${ROOT}/environments/${MEMBER2_CLUSTER_NAME}" +join_cluster_by_ctl $HOST_CLUSTER_NAME $MEMBER3_CLUSTER_NAME "${ROOT}/environments/${HOST_CLUSTER_NAME}" "${ROOT}/environments/${MEMBER3_CLUSTER_NAME}" + +#add leafnode test taint +addTaint $HOST_CLUSTER_NAME $MEMBER3_CLUSTER_NAME diff --git a/hack/prepare-e2e.sh b/hack/prepare-e2e.sh deleted file mode 100755 index f038ee6c6..000000000 --- a/hack/prepare-e2e.sh +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit -set -o nounset -set -o pipefail - -KUBECONFIG_PATH=${KUBECONFIG_PATH:-"${HOME}/.kube"} -export KUBECONFIG=$KUBECONFIG_PATH/"config" - -HOST_CLUSTER_NAME="cluster-host" -HOST_CLUSTER_POD_CIDR="10.233.64.0/18" -HOST_CLUSTER_SERVICE_CIDR="10.233.0.0/18" - -MEMBER1_CLUSTER_NAME="cluster-member1" -MEMBER1_CLUSTER_POD_CIDR="10.234.64.0/18" -MEMBER1_CLUSTER_SERVICE_CIDR="10.234.0.0/18" - -MEMBER2_CLUSTER_NAME="cluster-member2" -MEMBER2_CLUSTER_POD_CIDR="10.235.64.0/18" -MEMBER2_CLUSTER_SERVICE_CIDR="10.235.0.0/18" - -MEMBER3_CLUSTER_NAME="cluster-member3" -MEMBER3_CLUSTER_POD_CIDR="10.236.64.0/18" -MEMBER3_CLUSTER_SERVICE_CIDR="10.236.0.0/18" - -ROOT="$(dirname "${BASH_SOURCE[0]}")" -export VERSION="latest" -source "$(dirname "${BASH_SOURCE[0]}")/install_kind_kubectl.sh" -source "$(dirname "${BASH_SOURCE[0]}")/cluster.sh" -make images GOOS="linux" --directory="${ROOT}" - -make kosmosctl -os=$(go env GOOS) -arch=$(go env GOARCH) -export PATH=$PATH:"$ROOT"/_output/bin/"$os"/"$arch" - -#cluster cluster -create_cluster $HOST_CLUSTER_NAME $HOST_CLUSTER_POD_CIDR $HOST_CLUSTER_SERVICE_CIDR -create_cluster $MEMBER1_CLUSTER_NAME $MEMBER1_CLUSTER_POD_CIDR $MEMBER1_CLUSTER_SERVICE_CIDR false -create_cluster $MEMBER2_CLUSTER_NAME $MEMBER2_CLUSTER_POD_CIDR $MEMBER2_CLUSTER_SERVICE_CIDR fasle -create_cluster $MEMBER3_CLUSTER_NAME $MEMBER3_CLUSTER_POD_CIDR $MEMBER3_CLUSTER_SERVICE_CIDR fasle - -#deploy cluster -deploy_cluster_by_ctl $HOST_CLUSTER_NAME -load_cluster_images $MEMBER1_CLUSTER_NAME -load_cluster_images $MEMBER2_CLUSTER_NAME -load_cluster_images $MEMBER3_CLUSTER_NAME - -#join cluster -join_cluster_by_ctl $HOST_CLUSTER_NAME $MEMBER1_CLUSTER_NAME -join_cluster_by_ctl $HOST_CLUSTER_NAME $MEMBER2_CLUSTER_NAME -join_cluster_by_ctl $HOST_CLUSTER_NAME $MEMBER3_CLUSTER_NAME - -#add leafnode test taint -addTaint $HOST_CLUSTER_NAME $MEMBER3_CLUSTER_NAME diff --git a/hack/rune2e.sh b/hack/rune2e.sh index 38671b87b..d1b54c48b 100755 --- a/hack/rune2e.sh +++ b/hack/rune2e.sh @@ -6,33 +6,44 @@ set -o pipefail KUBECONFIG_PATH=${KUBECONFIG_PATH:-"${HOME}/.kube"} export KUBECONFIG=$KUBECONFIG_PATH/"config" - E2E_NAMESPACE="kosmos-e2e" +ARTIFACTS_PATH=${ARTIFACTS_PATH:-"${HOME}/${E2E_NAMESPACE}"} + HOST_CLUSTER_NAME="cluster-host" MEMBER1_CLUSTER_NAME="cluster-member1" MEMBER2_CLUSTER_NAME="cluster-member2" MEMBER3_CLUSTER_NAME="cluster-member3" ROOT="$(dirname "${BASH_SOURCE[0]}")" +REPO_ROOT="$(dirname "${BASH_SOURCE[0]}")/.." source "${ROOT}/util.sh" +source "${ROOT}/cluster.sh" +mkdir -p "$ARTIFACTS_PATH" + +# prepare for e2e test +prepare_e2e_cluster "${HOST_CLUSTER_NAME}" & +prepare_e2e_cluster "${MEMBER1_CLUSTER_NAME}" & +prepare_e2e_cluster "${MEMBER2_CLUSTER_NAME}" & +prepare_e2e_cluster "${MEMBER3_CLUSTER_NAME}" & +wait # e2e for nginx and mcs -kubectl --context="kind-${HOST_CLUSTER_NAME}" apply -f "${ROOT}"/../test/e2e/deploy/nginx +kubectl --kubeconfig "${REPO_ROOT}/environments/${HOST_CLUSTER_NAME}/kubeconfig" apply -f "${REPO_ROOT}"/test/e2e/deploy/nginx util::wait_for_condition "nginx are ready" \ - "kubectl --context=kind-${HOST_CLUSTER_NAME} -n ${E2E_NAMESPACE} get pod -l app=nginx | awk 'NR>1 {if (\$3 == \"Running\") exit 0; else exit 1; }'" \ + "kubectl --kubeconfig ${REPO_ROOT}/environments/${HOST_CLUSTER_NAME}/kubeconfig -n ${E2E_NAMESPACE} get pod -l app=nginx | awk 'NR>1 {if (\$3 == \"Running\") exit 0; else exit 1; }'" \ 120 util::wait_for_condition "mcs of member1 are ready" \ - "[ \$(kubectl --context=kind-${MEMBER1_CLUSTER_NAME} -n ${E2E_NAMESPACE} get endpointslices.discovery.k8s.io --no-headers -l kubernetes.io\/service-name=nginx-service | wc -l) -eq 1 ] " \ + "[ \$(kubectl --kubeconfig ${REPO_ROOT}/environments/${MEMBER1_CLUSTER_NAME}/kubeconfig -n ${E2E_NAMESPACE} get endpointslices.discovery.k8s.io --no-headers -l kubernetes.io\/service-name=nginx-service | wc -l) -eq 1 ] " \ 120 util::wait_for_condition "mcs of member2 are ready" \ - "[ \$(kubectl --context=kind-${MEMBER2_CLUSTER_NAME} -n ${E2E_NAMESPACE} get endpointslices.discovery.k8s.io --no-headers -l kubernetes.io\/service-name=nginx-service | wc -l) -eq 1 ] " \ + "[ \$(kubectl --kubeconfig ${REPO_ROOT}/environments/${MEMBER2_CLUSTER_NAME}/kubeconfig -n ${E2E_NAMESPACE} get endpointslices.discovery.k8s.io --no-headers -l kubernetes.io\/service-name=nginx-service | wc -l) -eq 1 ] " \ 120 util::wait_for_condition "mcs of member3 are ready" \ - "[ \$(kubectl --context=kind-${MEMBER3_CLUSTER_NAME} -n ${E2E_NAMESPACE} get endpointslices.discovery.k8s.io --no-headers -l kubernetes.io\/service-name=nginx-service | wc -l) -eq 1 ] " \ + "[ \$(kubectl --kubeconfig ${REPO_ROOT}/environments/${MEMBER3_CLUSTER_NAME}/kubeconfig -n ${E2E_NAMESPACE} get endpointslices.discovery.k8s.io --no-headers -l kubernetes.io\/service-name=nginx-service | wc -l) -eq 1 ] " \ 120 nginx_service_ip=$(kubectl -n kosmos-e2e get svc nginx-service -o=jsonpath='{.spec.clusterIP}') @@ -43,18 +54,19 @@ sleep 100 && docker exec -i ${HOST_CLUSTER_NAME}-control-plane sh -c "curl -sSf exit 1 } -## e2e for mysql-operator -#kubectl --context="kind-cluster-host" apply -f "${ROOT}"/../test/e2e/deploy/mysql-operator -#util::wait_for_condition "mysql operator are ready" \ -# "kubectl --context=kind-${HOST_CLUSTER_NAME} get pods -n mysql-operator mysql-operator-0 | awk 'NR>1 {if (\$3 == \"Running\") exit 0; else exit 1; }'" \ -# 300 -#kubectl --context="kind-${HOST_CLUSTER_NAME}" apply -f "${ROOT}"/../test/e2e/deploy/cr +# e2e for mysql-operator +echo "apply mysql-operator on cluster ${HOST_CLUSTER_NAME} with files in path ${REPO_ROOT}/test/e2e/deploy/mysql-operator" +kubectl --kubeconfig "${REPO_ROOT}/environments/${HOST_CLUSTER_NAME}/kubeconfig" apply -f "${REPO_ROOT}"/test/e2e/deploy/mysql-operator +util::wait_for_condition "mysql operator are ready" \ + "kubectl --kubeconfig "${REPO_ROOT}/environments/${HOST_CLUSTER_NAME}/kubeconfig" get pods -n mysql-operator mysql-operator-0 | awk 'NR>1 {if (\$3 == \"Running\") exit 0; else exit 1; }'" \ + 300 +kubectl --kubeconfig "${REPO_ROOT}/environments/${HOST_CLUSTER_NAME}/kubeconfig" apply -f "${REPO_ROOT}"/test/e2e/deploy/cr -#util::wait_for_condition "mysql cr are ready" \ -# "[ \$(kubectl --context="kind-${HOST_CLUSTER_NAME}" get pods -n kosmos-e2e --field-selector=status.phase=Running -l app.kubernetes.io/name=mysql --no-headers | wc -l) -eq 2 ]" \ -# 1200 +util::wait_for_condition "mysql cr are ready" \ + "[ \$(kubectl --kubeconfig ${REPO_ROOT}/environments/${HOST_CLUSTER_NAME}/kubeconfig get pods -n kosmos-e2e --field-selector=status.phase=Running -l app.kubernetes.io/name=mysql --no-headers | wc -l) -eq 2 ]" \ + 1200 -#echo "E2e test of mysql-operator success" +echo "E2e test of mysql-operator success" # Install ginkgo GO111MODULE=on go install github.com/onsi/ginkgo/v2/ginkgo @@ -63,26 +75,24 @@ set +e ginkgo -v --race --trace --fail-fast -p --randomize-all ./test/e2e/ -- TESTING_RESULT=$? -LOG_PATH=$ROOT/../e2e-logs -echo "Collect logs to $LOG_PATH..." -mkdir -p "$LOG_PATH" +# Collect logs +echo "Collect logs to $ARTIFACTS_PATH..." +cp -r "${REPO_ROOT}/environments" "$ARTIFACTS_PATH" -echo "Collecting $HOST_CLUSTER_NAME logs..." -mkdir -p "$LOG_PATH/$HOST_CLUSTER_NAME" -kind export logs --name="$HOST_CLUSTER_NAME" "$LOG_PATH/$HOST_CLUSTER_NAME" +echo "Collecting Kind logs..." +mkdir -p "$ARTIFACTS_PATH/$HOST_CLUSTER_NAME" +kind export logs --name="$HOST_CLUSTER_NAME" "$ARTIFACTS_PATH/$HOST_CLUSTER_NAME" -echo "Collecting $MEMBER1_CLUSTER_NAME logs..." -mkdir -p "$MEMBER1_CLUSTER_NAME/$MEMBER1_CLUSTER_NAME" -kind export logs --name="$MEMBER1_CLUSTER_NAME" "$LOG_PATH/$MEMBER1_CLUSTER_NAME" +mkdir -p "$ARTIFACTS_PATH/$MEMBER1_CLUSTER_NAME" +kind export logs --name="$MEMBER1_CLUSTER_NAME" "$ARTIFACTS_PATH/$MEMBER1_CLUSTER_NAME" -echo "Collecting $MEMBER2_CLUSTER_NAME logs..." -mkdir -p "$MEMBER2_CLUSTER_NAME/$MEMBER2_CLUSTER_NAME" -kind export logs --name="$MEMBER2_CLUSTER_NAME" "$LOG_PATH/$MEMBER2_CLUSTER_NAME" +mkdir -p "$ARTIFACTS_PATH/$MEMBER2_CLUSTER_NAME" +kind export logs --name="$MEMBER2_CLUSTER_NAME" "$ARTIFACTS_PATH/$MEMBER2_CLUSTER_NAME" -echo "Collecting $MEMBER3_CLUSTER_NAME logs..." -mkdir -p "$MEMBER3_CLUSTER_NAME/$MEMBER3_CLUSTER_NAME" -kind export logs --name="$MEMBER3_CLUSTER_NAME" "$LOG_PATH/$MEMBER3_CLUSTER_NAME" +mkdir -p "$ARTIFACTS_PATH/$MEMBER3_CLUSTER_NAME" +kind export logs --name="$MEMBER3_CLUSTER_NAME" "$ARTIFACTS_PATH/$MEMBER3_CLUSTER_NAME" -#TODO delete cluster +echo "Collected logs at $ARTIFACTS_PATH:" +ls -al "$ARTIFACTS_PATH" exit $TESTING_RESULT \ No newline at end of file diff --git a/hack/util.sh b/hack/util.sh index bca5811c1..b2167f52c 100755 --- a/hack/util.sh +++ b/hack/util.sh @@ -517,7 +517,7 @@ function util::wait_for_crd() { for crd_name in "${crd_names[@]}"; do if kubectl get crd "$crd_name"; then echo "CRD $crd_name has been stored successfully." - # 从要等待的CRD列表中删除已经存储的CRD + # delete crd from waiting list count=$(($count+1)) fi done @@ -542,4 +542,57 @@ function util::go_clean_cache() { go clean -cache set +x +} + +# get base64 from kubeconfig file +function util::get_base64_kubeconfig() { + local os_type=$(uname) + + if [ "$os_type" == "Linux" ]; then + # Linux + base64 -w 0 < "$1" + elif [ "$os_type" == "Darwin" ]; then + # macOS + base64 -b 0 < "$1" + else + echo "Unsupported operating system" + return 1 + fi +} + +# verify input ip is valid or not +function util::verify_ip_address { + IPADDRESS=${1} + if [[ ! "${IPADDRESS}" =~ ^(([1-9]?[0-9]|1[0-9][0-9]|2([0-4][0-9]|5[0-5]))\.){3}([1-9]?[0-9]|1[0-9][0-9]|2([0-4][0-9]|5[0-5]))$ ]]; then + echo -e "\nError: invalid IP address" + exit 1 + fi +} + +# util::get_macos_ipaddress will get ip address on macos interactively, store to 'MAC_NIC_IPADDRESS' if available +MAC_NIC_IPADDRESS='' +function util::get_macos_ipaddress() { + if [[ $(go env GOOS) = "darwin" ]]; then + tmp_ip=$(ipconfig getifaddr en0 || true) + echo "" + echo " Detected that you are installing KOSMOS on macOS " + echo "" + echo "It needs a Macintosh IP address to bind Kind Api Server Address," + echo "so you can access it from you macOS and the inner kubeconfig for cluster should use --inner-kubeconfig" + echo "the --inner-kubeconfig should use nodeIp so the host-cluster and member-cluster can be connected" + echo -n "input an available IP, " + if [[ -z ${tmp_ip} ]]; then + echo "you can use the command 'ifconfig' to look for one" + tips_msg="[Enter IP address]:" + else + echo "default IP will be en0 inet addr if exists" + tips_msg="[Enter for default ${tmp_ip}]:" + fi + read -r -p "${tips_msg}" MAC_NIC_IPADDRESS + MAC_NIC_IPADDRESS=${MAC_NIC_IPADDRESS:-$tmp_ip} + util::verify_ip_address "${MAC_NIC_IPADDRESS}" + echo "Using IP address: ${MAC_NIC_IPADDRESS}" + else # non-macOS + MAC_NIC_IPADDRESS=${MAC_NIC_IPADDRESS:-} + fi } \ No newline at end of file diff --git a/pkg/kosmosctl/install/install.go b/pkg/kosmosctl/install/install.go index fae738b5a..3e6e44f58 100644 --- a/pkg/kosmosctl/install/install.go +++ b/pkg/kosmosctl/install/install.go @@ -3,6 +3,7 @@ package install import ( "context" "fmt" + "strings" "github.com/spf13/cobra" corev1 "k8s.io/api/core/v1" @@ -44,14 +45,17 @@ var installExample = templates.Examples(i18n.T(` kosmosctl install -m coredns`)) type CommandInstallOptions struct { - Namespace string - ImageRegistry string - Version string - Module string - HostKubeConfig string - HostContext string - HostKubeConfigStream []byte - WaitTime int + Namespace string + ImageRegistry string + Version string + Module string + HostKubeConfig string + InnerKubeConfig string + HostContext string + HostKubeConfigStream []byte + InnerKubeConfigStream []byte + + WaitTime int CNI string DefaultNICName string @@ -92,6 +96,7 @@ func NewCmdInstall() *cobra.Command { flags.StringVar(&o.Version, "version", "", "image version for pull images") flags.StringVarP(&o.Module, "module", "m", utils.All, "Kosmos specify the module to install.") flags.StringVar(&o.HostKubeConfig, "kubeconfig", "", "Absolute path to the special kubeconfig file.") + flags.StringVar(&o.InnerKubeConfig, "inner-kubeconfig", "", "Absolute path to the special inner kubeconfig file for cluster resource to use.") flags.StringVar(&o.HostContext, "context", "", "The name of the kubeconfig context.") flags.StringVar(&o.CNI, "cni", "", "The cluster is configured using cni and currently supports calico and flannel.") flags.StringVar(&o.DefaultNICName, "default-nic", "", "Set default network interface card.") @@ -107,6 +112,18 @@ func NewCmdInstall() *cobra.Command { } func (o *CommandInstallOptions) Complete() error { + // if innerKubeconfig is not null init the InnerKubeconfigStream + if o.InnerKubeConfig != "" && len(strings.TrimSpace(o.InnerKubeConfig)) != 0 { + innerRawConfig, err := utils.RawConfig(o.InnerKubeConfig, o.HostContext) + if err != nil { + return fmt.Errorf("kosmosctl install complete error, read inner kubeconfig failed: %s", err) + } + o.InnerKubeConfigStream, err = clientcmd.Write(innerRawConfig) + if err != nil { + return fmt.Errorf("kosmosctl install complete error, generate inner kubeconfig streams failed: %s", err) + } + klog.Infof("inner kubeconfig path %s, kubeconfig stream %s", o.InnerKubeConfig, string(o.InnerKubeConfigStream)) + } config, err := utils.RestConfig(o.HostKubeConfig, o.HostContext) if err != nil { return fmt.Errorf("kosmosctl install complete error, generate host config failed: %s", err) @@ -407,7 +424,12 @@ func (o *CommandInstallOptions) runClustertree() error { Namespace: o.Namespace, }, Data: map[string]string{ - "kubeconfig": string(o.HostKubeConfigStream), + "kubeconfig": string(func() []byte { + if len(o.InnerKubeConfigStream) != 0 { + return o.InnerKubeConfigStream + } + return o.HostKubeConfigStream + }()), }, } _, err = o.K8sClient.CoreV1().ConfigMaps(o.Namespace).Create(context.TODO(), clustertreeConfigMap, metav1.CreateOptions{}) @@ -504,7 +526,12 @@ func (o *CommandInstallOptions) createOperator() error { Namespace: o.Namespace, }, Data: map[string][]byte{ - "kubeconfig": o.HostKubeConfigStream, + "kubeconfig": func() []byte { + if len(o.InnerKubeConfigStream) != 0 { + return o.InnerKubeConfigStream + } + return o.HostKubeConfigStream + }(), }, } _, err = o.K8sClient.CoreV1().Secrets(operatorSecret.Namespace).Create(context.TODO(), operatorSecret, metav1.CreateOptions{}) @@ -557,15 +584,16 @@ func (o *CommandInstallOptions) createControlCluster() error { clusterArgs := []string{"cluster"} joinOptions := join.CommandJoinOptions{ - Name: utils.DefaultClusterName, - Namespace: o.Namespace, - ImageRegistry: o.ImageRegistry, - KubeConfigStream: o.HostKubeConfigStream, - WaitTime: o.WaitTime, - KosmosClient: o.KosmosClient, - K8sClient: o.K8sClient, - K8sExtensionsClient: o.K8sExtensionsClient, - RootFlag: true, + Name: utils.DefaultClusterName, + Namespace: o.Namespace, + ImageRegistry: o.ImageRegistry, + KubeConfigStream: o.HostKubeConfigStream, + InnerKubeconfigStream: o.InnerKubeConfigStream, + WaitTime: o.WaitTime, + KosmosClient: o.KosmosClient, + K8sClient: o.K8sClient, + K8sExtensionsClient: o.K8sExtensionsClient, + RootFlag: true, } switch o.Module { diff --git a/pkg/kosmosctl/join/join.go b/pkg/kosmosctl/join/join.go index 330928092..838322cdb 100644 --- a/pkg/kosmosctl/join/join.go +++ b/pkg/kosmosctl/join/join.go @@ -39,19 +39,21 @@ var joinExample = templates.Examples(i18n.T(` `)) type CommandJoinOptions struct { - Name string - Namespace string - ImageRegistry string - Version string - KubeConfig string - Context string - KubeConfigStream []byte - HostKubeConfig string - HostContext string - HostKubeConfigStream []byte - WaitTime int - RootFlag bool - EnableAll bool + Name string + Namespace string + ImageRegistry string + Version string + KubeConfig string + Context string + KubeConfigStream []byte + InnerKubeconfigStream []byte + HostKubeConfig string + InnerKubeConfig string + HostContext string + HostKubeConfigStream []byte + WaitTime int + RootFlag bool + EnableAll bool EnableLink bool CNI string @@ -93,6 +95,7 @@ func NewCmdJoin(f ctlutil.Factory) *cobra.Command { flags.StringVar(&o.KubeConfig, "kubeconfig", "", "Absolute path to the cluster kubeconfig file.") flags.StringVar(&o.Context, "context", "", "The name of the kubeconfig context.") flags.StringVar(&o.HostKubeConfig, "host-kubeconfig", "", "Absolute path to the special host kubeconfig file.") + flags.StringVar(&o.InnerKubeConfig, "inner-kubeconfig", "", "Absolute path to the leaf cluster inner kubeconfig file.") flags.StringVar(&o.HostContext, "host-context", "", "The name of the host-kubeconfig context.") flags.StringVar(&o.ImageRegistry, "private-image-registry", utils.DefaultImageRepository, "Private image registry where pull images from. If set, all required images will be downloaded from it, it would be useful in offline installation scenarios.") flags.StringVar(&o.Version, "version", "", "image version for pull images") @@ -126,6 +129,17 @@ func (o *CommandJoinOptions) Complete(f ctlutil.Factory) error { return fmt.Errorf("kosmosctl join complete error, generate Kosmos client failed: %v", err) } + // init the inner kubeconfig stream + if len(o.InnerKubeConfig) > 0 { + innerRawConfig, err := utils.RawConfig(o.InnerKubeConfig, o.Context) + if err != nil { + return fmt.Errorf("kosmosctl join complete error, generate inner raw config failed: %s", err) + } + o.InnerKubeconfigStream, err = clientcmd.Write(innerRawConfig) + if err != nil { + return fmt.Errorf("kosmosctl join complete error, wite inner restconfig to streams failed: %s", err) + } + } if len(o.KubeConfig) > 0 { clusterConfig, err := utils.RestConfig(o.KubeConfig, o.Context) if err != nil { @@ -156,15 +170,15 @@ func (o *CommandJoinOptions) Complete(f ctlutil.Factory) error { return fmt.Errorf("kosmosctl join complete error, arg ClusterKubeConfig is required") } - //no enable-all,enable-tree,enable-link found, make 'EnableAll' with other config + // no enable-all,enable-tree,enable-link found, make 'EnableAll' with other config if !o.EnableAll && !o.EnableTree && !o.EnableLink { - //due to NetworkType or IpFamily is not empty, make EnableLink true + // due to NetworkType or IpFamily is not empty, make EnableLink true if o.NetworkType != "" || o.IpFamily != "" { klog.Warning("due to NetworkType or IpFamily is not empty, make EnableLink ture.") o.EnableLink = true } - //due to LeafModel is not empty, make EnableTree true + // due to LeafModel is not empty, make EnableTree true if o.LeafModel != "" { klog.Warning("due to LeafModel is not empty, make EnableTree true.") o.EnableTree = true @@ -202,7 +216,7 @@ func (o *CommandJoinOptions) Validate(args []string) error { return fmt.Errorf("kosmosctl join validate error, namespace is not valid") } - //validate: at least one of [EnableAll,EnableTree,EnableLink] need true + // validate: at least one of [EnableAll,EnableTree,EnableLink] need true if !o.EnableAll && !o.EnableTree && !o.EnableLink { return fmt.Errorf("kosmosctl join validate error, need at least one of enable-all,enable-tree,enable-link") } @@ -270,7 +284,12 @@ func (o *CommandJoinOptions) runCluster() error { Name: o.Name, }, Spec: v1alpha1.ClusterSpec{ - Kubeconfig: o.KubeConfigStream, + Kubeconfig: func() []byte { + if len(o.InnerKubeconfigStream) != 0 { + return o.InnerKubeconfigStream + } + return o.KubeConfigStream + }(), Namespace: o.Namespace, ImageRepository: o.ImageRegistry, ClusterLinkOptions: &v1alpha1.ClusterLinkOptions{ @@ -435,7 +454,7 @@ func (o *CommandJoinOptions) runCluster() error { } klog.Info("ServiceAccount " + kosmosOperatorSA.Name + " has been created.") - //ToDo Wait for all services to be running + // ToDo Wait for all services to be running klog.Info("Cluster [" + o.Name + "] registration successful.") diff --git a/test/e2e/deploy/mysql-operator/mysqll-operator.yaml b/test/e2e/deploy/mysql-operator/mysqll-operator.yaml index b6e696861..13c886bb0 100644 --- a/test/e2e/deploy/mysql-operator/mysqll-operator.yaml +++ b/test/e2e/deploy/mysql-operator/mysqll-operator.yaml @@ -20,6 +20,15 @@ spec: app.kubernetes.io/instance: mysql-operator app.kubernetes.io/name: mysql-operator spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - cluster-host-control-plane containers: - args: - --leader-election-namespace=mysql-operator diff --git a/test/e2e/deploy/nginx/nginx.yaml b/test/e2e/deploy/nginx/nginx.yaml index c1c13b6be..72ee4a739 100644 --- a/test/e2e/deploy/nginx/nginx.yaml +++ b/test/e2e/deploy/nginx/nginx.yaml @@ -23,6 +23,10 @@ spec: operator: "Equal" value: "true" effect: "NoSchedule" + - key: "test-node/e2e" + operator: "Equal" + value: "leafnode" + effect: "NoSchedule" affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: