From cccb8629eea20c13c885cb3d8ce41455e1b91830 Mon Sep 17 00:00:00 2001 From: rambohe-ch Date: Sun, 24 Apr 2022 22:06:09 +0800 Subject: [PATCH] [enhancemnt] support local up openyurt on mac machine --- .github/workflows/ci.yaml | 5 +- Makefile | 110 ++++-- cmd/yurt-node-servant/config/config.go | 78 +++++ cmd/yurt-node-servant/convert/convert.go | 38 +- cmd/yurt-node-servant/node-servant.go | 2 + .../Dockerfile.yurt-controller-manager | 14 + hack/dockerfiles/Dockerfile.yurt-node-servant | 15 + hack/dockerfiles/Dockerfile.yurt-tunnel-agent | 14 + .../dockerfiles/Dockerfile.yurt-tunnel-server | 14 + hack/dockerfiles/Dockerfile.yurthub | 14 + hack/lib/build.sh | 67 ++-- hack/lib/common.sh | 29 +- hack/lib/init.sh | 8 +- hack/lib/release-images.sh | 202 ----------- hack/lib/release-manifest.sh | 65 ---- hack/make-rules/build-e2e.sh | 38 -- hack/make-rules/build.sh | 10 +- hack/make-rules/generate_client.sh | 38 -- hack/make-rules/genyaml.sh | 11 +- .../local-up-openyurt.sh} | 54 +-- hack/make-rules/push-images.sh | 22 -- hack/make-rules/push-manifest.sh | 23 -- hack/make-rules/release-images.sh | 22 -- hack/make-rules/release-manifest.sh | 23 -- hack/{ => make-rules}/run-e2e-tests.sh | 9 +- pkg/node-servant/components/yurthub.go | 10 +- pkg/node-servant/config/control-plane.go | 143 ++++++++ pkg/node-servant/config/options.go | 68 ++++ pkg/node-servant/constant.go | 40 ++- pkg/node-servant/convert/convert.go | 43 ++- pkg/node-servant/convert/options.go | 88 ++--- pkg/node-servant/job.go | 12 +- pkg/node-servant/revert/revert.go | 4 +- pkg/preflight/checks.go | 2 +- pkg/util/file/file.go | 103 ++++++ .../constants/yurt-tunnel-agent-tmpl.go | 1 + .../constants/yurt-tunnel-server-tmpl.go | 2 + pkg/yurtadm/util/edgenode/common.go | 2 + pkg/yurtadm/util/kubernetes/util.go | 45 ++- .../cmd/yurttest/kindinit/converter.go | 63 ++-- pkg/yurtctl/cmd/yurttest/kindinit/init.go | 328 +++++++++++++++--- 41 files changed, 1128 insertions(+), 751 deletions(-) create mode 100644 cmd/yurt-node-servant/config/config.go create mode 100644 hack/dockerfiles/Dockerfile.yurt-controller-manager create mode 100644 hack/dockerfiles/Dockerfile.yurt-node-servant create mode 100644 hack/dockerfiles/Dockerfile.yurt-tunnel-agent create mode 100644 hack/dockerfiles/Dockerfile.yurt-tunnel-server create mode 100644 hack/dockerfiles/Dockerfile.yurthub delete mode 100644 hack/lib/release-images.sh delete mode 100644 hack/lib/release-manifest.sh delete mode 100755 hack/make-rules/build-e2e.sh delete mode 100755 hack/make-rules/generate_client.sh rename hack/{local_up_openyurt.sh => make-rules/local-up-openyurt.sh} (72%) delete mode 100755 hack/make-rules/push-images.sh delete mode 100644 hack/make-rules/push-manifest.sh delete mode 100755 hack/make-rules/release-images.sh delete mode 100644 hack/make-rules/release-manifest.sh rename hack/{ => make-rules}/run-e2e-tests.sh (77%) create mode 100644 pkg/node-servant/config/control-plane.go create mode 100644 pkg/node-servant/config/options.go create mode 100644 pkg/util/file/file.go diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 1d46503b547..c6bd68e9d81 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -131,8 +131,9 @@ jobs: run: | go get sigs.k8s.io/kind@v0.11.1 curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.20.7/bin/linux/amd64/kubectl && sudo install kubectl /usr/local/bin/kubectl + - name: Build Images + run: make docker-build - name: Local Up Openyurt Cluster With Kind - run: bash hack/local_up_openyurt.sh - + run: make local-up-openyurt - name: Run e2e Tests run: make e2e-tests diff --git a/Makefile b/Makefile index 84896ff1087..49b0bcceb30 100644 --- a/Makefile +++ b/Makefile @@ -12,7 +12,24 @@ # See the License for the specific language governing permissions and # limitations under the License. -.PHONY: clean all release build +TARGET_PLATFORMS ?= linux/amd64 +IMAGE_REPO ?= openyurt +IMAGE_TAG ?= $(shell git describe --abbrev=0 --tags) +GIT_COMMIT = $(shell git rev-parse HEAD) + +ifeq ($(shell git tag --points-at ${GIT_COMMIT}),) +GIT_VERSION=$(IMAGE_TAG)-$(shell echo ${GIT_COMMIT} | cut -c 1-7) +else +GIT_VERSION=$(IMAGE_TAG) +endif + +DOCKER_BUILD_ARGS = --build-arg GIT_VERSION=${GIT_VERSION} + +ifeq (${REGION}, cn) +DOCKER_BUILD_ARGS += --build-arg GOPROXY=https://goproxy.cn --build-arg MIRROR_REPO=mirrors.aliyun.com +endif + +.PHONY: clean all build all: test build @@ -37,46 +54,25 @@ fmt: vet: go vet ./pkg/... ./cmd/... -# Build binaries and docker images. -# NOTE: this rule can take time, as we build binaries inside containers -# -# ARGS: -# WHAT: list of components that will be compiled. -# ARCH: list of target architectures. -# REGION: in which region this rule is executed, if in mainland China, -# set it as cn. -# -# Examples: -# # compile yurthub, yurt-controller-manager and yurtctl-servant with -# # architectures arm64 and arm in the mainland China -# make release WHAT="yurthub yurt-controller-manager yurtctl-servant" ARCH="arm64 arm" REGION=cn -# -# # compile all components with all architectures (i.e., amd64, arm64, arm) -# make release -release: - bash hack/make-rules/release-images.sh - -# push generated images during 'make release' -push: - bash hack/make-rules/push-images.sh - clean: -rm -Rf _output - -rm -Rf dockerbuild - -e2e: - hack/make-rules/build-e2e.sh -e2e-tests: - bash hack/run-e2e-tests.sh +# Start up OpenYurt cluster on local machine based on a Kind cluster +# And you can run the following command on different env by specify TARGET_PLATFORMS, default platform is linux/amd64 +# - on centos env: make local-up-openyurt +# - on MACBook Pro M1: make local-up-openyurt TARGET_PLATFORMS=linux/arm64 +local-up-openyurt: + YURT_VERSION=$(GIT_VERSION) bash hack/make-rules/local-up-openyurt.sh -# create multi-arch manifest -manifest: - bash hack/make-rules/release-manifest.sh +# Build all OpenYurt components images and then start up OpenYurt cluster on local machine based on a Kind cluster +# And you can run the following command on different env by specify TARGET_PLATFORMS, default platform is linux/amd64 +# - on centos env: make docker-build-and-up-openyurt +# - on MACBook Pro M1: make docker-build-and-up-openyurt TARGET_PLATFORMS=linux/arm64 +docker-build-and-up-openyurt: docker-build + YURT_VERSION=$(GIT_VERSION) bash hack/make-rules/local-up-openyurt.sh -# push generated manifest during 'make manifest' -push_manifest: - bash hack/make-rules/push-manifest.sh +e2e-tests: + bash hack/make-rules/run-e2e-tests.sh install-golint: ## check golint if not exist install golint tools ifeq (, $(shell which golangci-lint)) @@ -90,4 +86,44 @@ GOLINT_BIN=$(shell which golangci-lint) endif lint: install-golint ## Run go lint against code. - $(GOLINT_BIN) run -v \ No newline at end of file + $(GOLINT_BIN) run -v + +# Build the docker images only one arch(specify arch by TARGET_PLATFORMS env) +# - build linux/amd64 docker images: +# $# make docker-build TARGET_PLATFORMS=linux/amd64 +# - build linux/arm64 docker images: +# $# make docker-build TARGET_PLATFORMS=linux/arm64 +docker-build: docker-build-yurthub docker-build-yurt-controller-manager docker-build-yurt-tunnel-server docker-build-yurt-tunnel-agent docker-build-node-servant + +docker-build-yurthub: + docker buildx build --no-cache --load ${DOCKER_BUILD_ARGS} --platform ${TARGET_PLATFORMS} -f hack/dockerfiles/Dockerfile.yurthub . -t ${IMAGE_REPO}/yurthub:${GIT_VERSION} + +docker-build-yurt-controller-manager: + docker buildx build --no-cache --load ${DOCKER_BUILD_ARGS} --platform ${TARGET_PLATFORMS} -f hack/dockerfiles/Dockerfile.yurt-controller-manager . -t ${IMAGE_REPO}/yurt-controller-manager:${GIT_VERSION} + +docker-build-yurt-tunnel-server: + docker buildx build --no-cache --load ${DOCKER_BUILD_ARGS} --platform ${TARGET_PLATFORMS} -f hack/dockerfiles/Dockerfile.yurt-tunnel-server . -t ${IMAGE_REPO}/yurt-tunnel-server:${GIT_VERSION} + +docker-build-yurt-tunnel-agent: + docker buildx build --no-cache --load ${DOCKER_BUILD_ARGS} --platform ${TARGET_PLATFORMS} -f hack/dockerfiles/Dockerfile.yurt-tunnel-agent . -t ${IMAGE_REPO}/yurt-tunnel-agent:${GIT_VERSION} + +docker-build-node-servant: + docker buildx build --no-cache --load ${DOCKER_BUILD_ARGS} --platform ${TARGET_PLATFORMS} -f hack/dockerfiles/Dockerfile.yurt-node-servant . -t ${IMAGE_REPO}/node-servant:${GIT_VERSION} + +# Build and Push the docker images with multi-arch +docker-push: docker-push-yurthub docker-push-yurt-controller-manager docker-push-yurt-tunnel-server docker-push-yurt-tunnel-agent docker-push-node-servant + +docker-push-yurthub: + docker buildx build --no-cache --push ${DOCKER_BUILD_ARGS} --platform ${TARGET_PLATFORMS} -f hack/dockerfiles/Dockerfile.yurthub . -t ${IMAGE_REPO}/yurthub:${GIT_VERSION} + +docker-push-yurt-controller-manager: + docker buildx build --no-cache --push ${DOCKER_BUILD_ARGS} --platform ${TARGET_PLATFORMS} -f hack/dockerfiles/Dockerfile.yurt-controller-manager . -t ${IMAGE_REPO}/yurt-controller-manager:${GIT_VERSION} + +docker-push-yurt-tunnel-server: + docker buildx build --no-cache --push ${DOCKER_BUILD_ARGS} --platform ${TARGET_PLATFORMS} -f hack/dockerfiles/Dockerfile.yurt-tunnel-server . -t ${IMAGE_REPO}/yurt-tunnel-server:${GIT_VERSION} + +docker-push-yurt-tunnel-agent: + docker buildx build --no-cache --push ${DOCKER_BUILD_ARGS} --platform ${TARGET_PLATFORMS} -f hack/dockerfiles/Dockerfile.yurt-tunnel-agent . -t ${IMAGE_REPO}/yurt-tunnel-agent:${GIT_VERSION} + +docker-push-node-servant: + docker buildx build --no-cache --push ${DOCKER_BUILD_ARGS} --platform ${TARGET_PLATFORMS} -f hack/dockerfiles/Dockerfile.yurt-node-servant . -t ${IMAGE_REPO}/node-servant:${GIT_VERSION} diff --git a/cmd/yurt-node-servant/config/config.go b/cmd/yurt-node-servant/config/config.go new file mode 100644 index 00000000000..dd87927c2a9 --- /dev/null +++ b/cmd/yurt-node-servant/config/config.go @@ -0,0 +1,78 @@ +/* +Copyright 2022 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "fmt" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "k8s.io/klog/v2" + + "github.com/openyurtio/openyurt/pkg/node-servant/config" + "github.com/openyurtio/openyurt/pkg/projectinfo" +) + +// NewConfigCmd generates a new config command +func NewConfigCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "config", + Short: "manage configuration of OpenYurt cluster", + RunE: cobra.OnlyValidArgs, + ValidArgs: []string{"control-plane"}, + Args: cobra.MaximumNArgs(1), + } + cmd.AddCommand(newCmdConfigControlPlane()) + + return cmd +} + +func newCmdConfigControlPlane() *cobra.Command { + o := config.NewControlPlaneOptions() + cmd := &cobra.Command{ + Use: "control-plane", + Short: "configure control-plane components like kube-apiserver and kube-controller-manager", + RunE: func(cmd *cobra.Command, args []string) error { + fmt.Printf("node-servant version: %#v\n", projectinfo.Get()) + if o.Version { + return nil + } + + cmd.Flags().VisitAll(func(flag *pflag.Flag) { + klog.Infof("FLAG: --%s=%q", flag.Name, flag.Value) + }) + + if err := o.Validate(); err != nil { + klog.Fatalf("validate options: %v", err) + } + + runner, err := config.NewControlPlaneRunner(o) + if err != nil { + return err + } + if err := runner.Do(); err != nil { + return fmt.Errorf("failed to config control-plane, %v", err) + } + + klog.Info("node-servant config control-plane success") + return nil + }, + Args: cobra.NoArgs, + } + o.AddFlags(cmd.Flags()) + return cmd +} diff --git a/cmd/yurt-node-servant/convert/convert.go b/cmd/yurt-node-servant/convert/convert.go index eafc2e3e53b..e732f3267f1 100644 --- a/cmd/yurt-node-servant/convert/convert.go +++ b/cmd/yurt-node-servant/convert/convert.go @@ -17,17 +17,14 @@ limitations under the License. package convert import ( - "time" + "fmt" "github.com/spf13/cobra" + "github.com/spf13/pflag" "k8s.io/klog/v2" nodeconverter "github.com/openyurtio/openyurt/pkg/node-servant/convert" -) - -const ( - // defaultYurthubHealthCheckTimeout defines the default timeout for yurthub health check phase - defaultYurthubHealthCheckTimeout = 2 * time.Minute + "github.com/openyurtio/openyurt/pkg/projectinfo" ) // NewConvertCmd generates a new convert command @@ -37,8 +34,17 @@ func NewConvertCmd() *cobra.Command { Use: "convert --working-mode", Short: "", Run: func(cmd *cobra.Command, args []string) { - if err := o.Complete(cmd.Flags()); err != nil { - klog.Fatalf("fail to complete the convert option: %s", err) + fmt.Printf("node-servant version: %#v\n", projectinfo.Get()) + if o.Version { + return + } + + cmd.Flags().VisitAll(func(flag *pflag.Flag) { + klog.Infof("FLAG: --%s=%q", flag.Name, flag.Value) + }) + + if err := o.Validate(); err != nil { + klog.Fatalf("validate options: %v", err) } converter := nodeconverter.NewConverterWithOptions(o) @@ -49,21 +55,7 @@ func NewConvertCmd() *cobra.Command { }, Args: cobra.NoArgs, } - setFlags(cmd) + o.AddFlags(cmd.Flags()) return cmd } - -// setFlags sets flags. -func setFlags(cmd *cobra.Command) { - cmd.Flags().String("yurthub-image", "openyurt/yurthub:latest", - "The yurthub image.") - cmd.Flags().Duration("yurthub-healthcheck-timeout", defaultYurthubHealthCheckTimeout, - "The timeout for yurthub health check.") - cmd.Flags().StringP("kubeadm-conf-path", "k", "", - "The path to kubelet service conf that is used by kubelet component to join the cluster on the work node."+ - "Support multiple values, will search in order until get the file.(e.g -k kbcfg1,kbcfg2)", - ) - cmd.Flags().String("join-token", "", "The token used by yurthub for joining the cluster.") - cmd.Flags().String("working-mode", "edge", "The node type cloud/edge, effect yurthub workingMode.") -} diff --git a/cmd/yurt-node-servant/node-servant.go b/cmd/yurt-node-servant/node-servant.go index 5b8313cd84c..f193ef9747b 100644 --- a/cmd/yurt-node-servant/node-servant.go +++ b/cmd/yurt-node-servant/node-servant.go @@ -24,6 +24,7 @@ import ( "github.com/spf13/cobra" + "github.com/openyurtio/openyurt/cmd/yurt-node-servant/config" "github.com/openyurtio/openyurt/cmd/yurt-node-servant/convert" preflightconvert "github.com/openyurtio/openyurt/cmd/yurt-node-servant/preflight-convert" "github.com/openyurtio/openyurt/cmd/yurt-node-servant/revert" @@ -46,6 +47,7 @@ func main() { rootCmd.AddCommand(convert.NewConvertCmd()) rootCmd.AddCommand(revert.NewRevertCmd()) rootCmd.AddCommand(preflightconvert.NewxPreflightConvertCmd()) + rootCmd.AddCommand(config.NewConfigCmd()) if err := rootCmd.Execute(); err != nil { // run command os.Exit(1) diff --git a/hack/dockerfiles/Dockerfile.yurt-controller-manager b/hack/dockerfiles/Dockerfile.yurt-controller-manager new file mode 100644 index 00000000000..bd4d8d8d461 --- /dev/null +++ b/hack/dockerfiles/Dockerfile.yurt-controller-manager @@ -0,0 +1,14 @@ +# multi-arch image building for yurt-controller-manager + +FROM --platform=${BUILDPLATFORM} golang:1.17.1 as builder +ADD . /build +ARG TARGETOS TARGETARCH GIT_VERSION GOPROXY MIRROR_REPO +WORKDIR /build/ +RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} GIT_VERSION=${GIT_VERSION} make build WHAT=cmd/yurt-controller-manager + +FROM --platform=${TARGETPLATFORM} alpine:3.14 +ARG TARGETOS TARGETARCH MIRROR_REPO +RUN if [ ! -z "${MIRROR_REPO+x}" ]; then sed -i "s/dl-cdn.alpinelinux.org/${MIRROR_REPO}/g" /etc/apk/repositories; fi && \ + apk add ca-certificates bash libc6-compat && update-ca-certificates && rm /var/cache/apk/* +COPY --from=builder /build/_output/local/bin/${TARGETOS}/${TARGETARCH}/yurt-controller-manager /usr/local/bin/yurt-controller-manager +ENTRYPOINT ["/usr/local/bin/yurt-controller-manager"] \ No newline at end of file diff --git a/hack/dockerfiles/Dockerfile.yurt-node-servant b/hack/dockerfiles/Dockerfile.yurt-node-servant new file mode 100644 index 00000000000..a563597cd6f --- /dev/null +++ b/hack/dockerfiles/Dockerfile.yurt-node-servant @@ -0,0 +1,15 @@ +# multi-arch image building for yurt-node-servant + +FROM --platform=${BUILDPLATFORM} golang:1.17.1 as builder +ADD . /build +ARG TARGETOS TARGETARCH GIT_VERSION GOPROXY MIRROR_REPO +WORKDIR /build/ +RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} GIT_VERSION=${GIT_VERSION} make build WHAT=cmd/yurt-node-servant + +FROM --platform=${TARGETPLATFORM} alpine:3.14 +ARG TARGETOS TARGETARCH MIRROR_REPO +RUN if [ ! -z "${MIRROR_REPO+x}" ]; then sed -i "s/dl-cdn.alpinelinux.org/${MIRROR_REPO}/g" /etc/apk/repositories; fi && \ + apk add ca-certificates bash libc6-compat && update-ca-certificates && rm /var/cache/apk/* +COPY --from=builder /build/_output/local/bin/${TARGETOS}/${TARGETARCH}/yurt-node-servant /usr/local/bin/node-servant +COPY hack/lib/node-servant-entry.sh /usr/local/bin/entry.sh +RUN chmod +x /usr/local/bin/entry.sh \ No newline at end of file diff --git a/hack/dockerfiles/Dockerfile.yurt-tunnel-agent b/hack/dockerfiles/Dockerfile.yurt-tunnel-agent new file mode 100644 index 00000000000..f6bf70b616c --- /dev/null +++ b/hack/dockerfiles/Dockerfile.yurt-tunnel-agent @@ -0,0 +1,14 @@ +# multi-arch image building for yurt-tunnel-agent + +FROM --platform=${BUILDPLATFORM} golang:1.17.1 as builder +ADD . /build +ARG TARGETOS TARGETARCH GIT_VERSION GOPROXY MIRROR_REPO +WORKDIR /build/ +RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} GIT_VERSION=${GIT_VERSION} make build WHAT=cmd/yurt-tunnel-agent + +FROM --platform=${TARGETPLATFORM} alpine:3.14 +ARG TARGETOS TARGETARCH MIRROR_REPO +RUN if [ ! -z "${MIRROR_REPO+x}" ]; then sed -i "s/dl-cdn.alpinelinux.org/${MIRROR_REPO}/g" /etc/apk/repositories; fi && \ + apk add ca-certificates bash libc6-compat && update-ca-certificates && rm /var/cache/apk/* +COPY --from=builder /build/_output/local/bin/${TARGETOS}/${TARGETARCH}/yurt-tunnel-agent /usr/local/bin/yurt-tunnel-agent +ENTRYPOINT ["/usr/local/bin/yurt-tunnel-agent"] \ No newline at end of file diff --git a/hack/dockerfiles/Dockerfile.yurt-tunnel-server b/hack/dockerfiles/Dockerfile.yurt-tunnel-server new file mode 100644 index 00000000000..7c685e0d701 --- /dev/null +++ b/hack/dockerfiles/Dockerfile.yurt-tunnel-server @@ -0,0 +1,14 @@ +# multi-arch image building for yurt-tunnel-server + +FROM --platform=${BUILDPLATFORM} golang:1.17.1 as builder +ADD . /build +ARG TARGETOS TARGETARCH GIT_VERSION GOPROXY MIRROR_REPO +WORKDIR /build/ +RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} GIT_VERSION=${GIT_VERSION} make build WHAT=cmd/yurt-tunnel-server + +FROM --platform=${TARGETPLATFORM} alpine:3.14 +ARG TARGETOS TARGETARCH MIRROR_REPO +RUN if [ ! -z "${MIRROR_REPO+x}" ]; then sed -i "s/dl-cdn.alpinelinux.org/${MIRROR_REPO}/g" /etc/apk/repositories; fi && \ + apk add ca-certificates bash libc6-compat iptables ip6tables && update-ca-certificates && rm /var/cache/apk/* +COPY --from=builder /build/_output/local/bin/${TARGETOS}/${TARGETARCH}/yurt-tunnel-server /usr/local/bin/yurt-tunnel-server +ENTRYPOINT ["/usr/local/bin/yurt-tunnel-server"] \ No newline at end of file diff --git a/hack/dockerfiles/Dockerfile.yurthub b/hack/dockerfiles/Dockerfile.yurthub new file mode 100644 index 00000000000..6b0b099459d --- /dev/null +++ b/hack/dockerfiles/Dockerfile.yurthub @@ -0,0 +1,14 @@ +# multi-arch image building for yurthub + +FROM --platform=${BUILDPLATFORM} golang:1.17.1 as builder +ADD . /build +ARG TARGETOS TARGETARCH GIT_VERSION GOPROXY MIRROR_REPO +WORKDIR /build/ +RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} GIT_VERSION=${GIT_VERSION} make build WHAT=cmd/yurthub + +FROM --platform=${TARGETPLATFORM} alpine:3.14 +ARG TARGETOS TARGETARCH MIRROR_REPO +RUN if [ ! -z "${MIRROR_REPO+x}" ]; then sed -i "s/dl-cdn.alpinelinux.org/${MIRROR_REPO}/g" /etc/apk/repositories; fi && \ + apk add ca-certificates bash libc6-compat iptables ip6tables && update-ca-certificates && rm /var/cache/apk/* +COPY --from=builder /build/_output/local/bin/${TARGETOS}/${TARGETARCH}/yurthub /usr/local/bin/yurthub +ENTRYPOINT ["/usr/local/bin/yurthub"] \ No newline at end of file diff --git a/hack/lib/build.sh b/hack/lib/build.sh index 01f2231afdf..e7a0d87fabe 100644 --- a/hack/lib/build.sh +++ b/hack/lib/build.sh @@ -16,47 +16,6 @@ set -x -readonly YURT_ALL_TARGETS=( - yurtadm - yurtctl - yurt-node-servant - yurthub - yurt-controller-manager - yurt-tunnel-server - yurt-tunnel-agent -) - -# we will generates setup yaml files for following components -readonly YURT_YAML_TARGETS=( - yurthub - yurt-controller-manager - yurt-tunnel-server - yurt-tunnel-agent -) - -#PROJECT_PREFIX=${PROJECT_PREFIX:-yurt} -#LABEL_PREFIX=${LABEL_PREFIX:-openyurt.io} -#GIT_VERSION="v0.1.1" -#GIT_COMMIT=$(git rev-parse HEAD) -#BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ') - -# project_info generates the project information and the corresponding value -# for 'ldflags -X' option -project_info() { - PROJECT_INFO_PKG=${YURT_MOD}/pkg/projectinfo - echo "-X ${PROJECT_INFO_PKG}.projectPrefix=${PROJECT_PREFIX}" - echo "-X ${PROJECT_INFO_PKG}.labelPrefix=${LABEL_PREFIX}" - echo "-X ${PROJECT_INFO_PKG}.gitVersion=${GIT_VERSION}" - echo "-X ${PROJECT_INFO_PKG}.gitCommit=${GIT_COMMIT}" - echo "-X ${PROJECT_INFO_PKG}.buildDate=${BUILD_DATE}" -} - -# get_binary_dir_with_arch generated the binary's directory with GOOS and GOARCH. -# eg: ./_output/bin/darwin/arm64/ -get_binary_dir_with_arch(){ - echo $1/$(go env GOOS)/$(go env GOARCH)/ -} - build_binaries() { local goflags goldflags gcflags goldflags="${GOLDFLAGS:--s -w $(project_info)}" @@ -80,6 +39,7 @@ build_binaries() { fi local target_bin_dir=$(get_binary_dir_with_arch ${YURT_LOCAL_BIN_DIR}) + rm -rf ${target_bin_dir} mkdir -p ${target_bin_dir} cd ${target_bin_dir} for binary in "${targets[@]}"; do @@ -89,9 +49,13 @@ build_binaries() { -gcflags "${gcflags:-}" ${goflags} $YURT_ROOT/cmd/$(canonicalize_target $binary) done - if [[ $(host_platform) == ${HOST_PLATFORM} ]]; then - rm -f "${YURT_BIN_DIR}" - ln -s "${target_bin_dir}" "${YURT_BIN_DIR}" + local yurtctl_binary=$(get_output_name yurtctl) + if is_build_on_host; then + if [ -f ${target_bin_dir}/${yurtctl_binary} ]; then + rm -rf "${YURT_BIN_DIR}" + mkdir -p "${YURT_BIN_DIR}" + ln -s "${target_bin_dir}/${yurtctl_binary}" "${YURT_BIN_DIR}/${yurtctl_binary}" + fi fi } @@ -127,3 +91,18 @@ gen_yamls() { $yaml_dir/$oup_file.yaml done } + +function build_e2e() { + local goflags goldflags gcflags + goldflags="${GOLDFLAGS:--s -w $(project_info)}" + gcflags="${GOGCFLAGS:-}" + goflags=${GOFLAGS:-} + + local target_bin_dir=$(get_binary_dir_with_arch ${YURT_LOCAL_BIN_DIR}) + mkdir -p ${target_bin_dir} + cd ${target_bin_dir} + echo "Building ${YURT_E2E_TARGETS}" + local testpkg="$(dirname ${YURT_E2E_TARGETS})" + local filename="$(basename ${YURT_E2E_TARGETS})" + go test -c -gcflags "${gcflags:-}" ${goflags} -o $filename "$YURT_ROOT/${testpkg}" +} diff --git a/hack/lib/common.sh b/hack/lib/common.sh index f5c0281f7c3..b47cc216db3 100644 --- a/hack/lib/common.sh +++ b/hack/lib/common.sh @@ -44,9 +44,15 @@ canonicalize_target() { echo $target } -# host_platform returns the host platform determined by golang -host_platform() { - echo "$(go env GOHOSTOS)/$(go env GOHOSTARCH)" +# is_build_on_host is used to verify binary build on host or not +is_build_on_host() { + if [[ "$(go env GOOS)" == "$(go env GOHOSTOS)" && "$(go env GOARCH)" == "$(go env GOHOSTARCH)" ]]; then + # build binary on the host + return 0 + else + # do not build binary on the host + return 1 + fi } # Parameters @@ -64,3 +70,20 @@ get_component_name() { fi echo $yurt_component_name } + +# project_info generates the project information and the corresponding value +# for 'ldflags -X' option +project_info() { + PROJECT_INFO_PKG=${YURT_MOD}/pkg/projectinfo + echo "-X ${PROJECT_INFO_PKG}.projectPrefix=${PROJECT_PREFIX}" + echo "-X ${PROJECT_INFO_PKG}.labelPrefix=${LABEL_PREFIX}" + echo "-X ${PROJECT_INFO_PKG}.gitVersion=${GIT_VERSION}" + echo "-X ${PROJECT_INFO_PKG}.gitCommit=${GIT_COMMIT}" + echo "-X ${PROJECT_INFO_PKG}.buildDate=${BUILD_DATE}" +} + +# get_binary_dir_with_arch generated the binary's directory with GOOS and GOARCH. +# eg: ./_output/bin/darwin/arm64/ +get_binary_dir_with_arch(){ + echo $1/$(go env GOOS)/$(go env GOARCH) +} \ No newline at end of file diff --git a/hack/lib/init.sh b/hack/lib/init.sh index 44c9dbf23e1..bb15af2a65b 100644 --- a/hack/lib/init.sh +++ b/hack/lib/init.sh @@ -18,7 +18,6 @@ set -o errexit set -o nounset set -o pipefail -YURT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd -P)" YURT_MOD="$(head -1 $YURT_ROOT/go.mod | awk '{print $2}')" YURT_OUTPUT_DIR=${YURT_ROOT}/_output YURT_BIN_DIR=${YURT_OUTPUT_DIR}/bin @@ -29,10 +28,5 @@ LABEL_PREFIX=${LABEL_PREFIX:-openyurt.io} GIT_VERSION=${GIT_VERSION:-$(git describe --abbrev=0 --tags)} GIT_COMMIT=$(git rev-parse HEAD) BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ') -REPO=${REPO:-openyurt} -TAG=$GIT_VERSION -source "${YURT_ROOT}/hack/lib/common.sh" -source "${YURT_ROOT}/hack/lib/build.sh" -source "${YURT_ROOT}/hack/lib/release-images.sh" -source "${YURT_ROOT}/hack/lib/release-manifest.sh" +source "${YURT_ROOT}/hack/lib/common.sh" \ No newline at end of file diff --git a/hack/lib/release-images.sh b/hack/lib/release-images.sh deleted file mode 100644 index 111530493c9..00000000000 --- a/hack/lib/release-images.sh +++ /dev/null @@ -1,202 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2020 The OpenYurt Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -x - -YURT_IMAGE_DIR=${YURT_OUTPUT_DIR}/images -YURTCTL_SERVANT_DIR=${YURT_ROOT}/config/yurtctl-servant -DOCKER_BUILD_BASE_DIR=$YURT_ROOT/dockerbuild -YURT_BUILD_IMAGE="golang:1.16-alpine" -#REPO="openyurt" -#TAG="v0.2.0" - -readonly -a YURT_BIN_TARGETS=( - yurthub - yurt-controller-manager - yurtctl - yurt-node-servant - yurt-tunnel-server - yurt-tunnel-agent -) - -readonly -a SUPPORTED_ARCH=( - amd64 - arm - arm64 -) - -readonly SUPPORTED_OS=linux - -readonly -a bin_targets=(${WHAT[@]:-${YURT_BIN_TARGETS[@]}}) -readonly -a bin_targets_process_servant=("${bin_targets[@]/yurtctl-servant/yurtctl}") -readonly -a target_arch=(${ARCH[@]:-${SUPPORTED_ARCH[@]}}) -readonly region=${REGION:-us} - -# Parameters -# $1: component name -# $2: arch -function get_image_name { - tag=$(get_version $2) - echo "${REPO}/$1:${tag}" -} - -# Parameters -# $1: arch -# The format is like: -# "v0.6.0-amd64-a955ecc" if the HEAD is not at a tag, -# "v0.6.0-amd64" otherwise. -function get_version { - # If ${GIT_COMMIT} does not point at a tag, add commit suffix to the image tag. - if [[ -z $(git tag --points-at ${GIT_COMMIT}) ]]; then - tag="${TAG}-$1-$(echo ${GIT_COMMIT} | cut -c 1-7)" - else - tag="${TAG}-$1" - fi - - echo "${tag}" -} - - -function build_multi_arch_binaries() { - local docker_yurt_root="/opt/src" - local docker_run_opts=( - "-i" - "--rm" - "--network host" - "-v ${YURT_ROOT}:${docker_yurt_root}" - "--env CGO_ENABLED=0" - "--env GOOS=${SUPPORTED_OS}" - "--env PROJECT_PREFIX=${PROJECT_PREFIX}" - "--env LABEL_PREFIX=${LABEL_PREFIX}" - "--env GIT_VERSION=${GIT_VERSION}" - "--env GIT_COMMIT=${GIT_COMMIT}" - "--env BUILD_DATE=${BUILD_DATE}" - "--env HOST_PLATFORM=$(host_platform)" - ) - # use goproxy if build from inside mainland China - [[ $region == "cn" ]] && docker_run_opts+=("--env GOPROXY=https://goproxy.cn") - - # use proxy if set - [[ -n ${http_proxy+x} ]] && docker_run_opts+=("--env http_proxy=${http_proxy}") - [[ -n ${https_proxy+x} ]] && docker_run_opts+=("--env https_proxy=${https_proxy}") - - local docker_run_cmd=( - "/bin/sh" - "-xe" - "-c" - ) - - local sub_commands="sed -i 's/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g' /etc/apk/repositories; \ - apk --no-cache add bash git; \ - cd ${docker_yurt_root}; umask 0022; \ - rm -rf ${YURT_LOCAL_BIN_DIR}/* ; \ - git config --global --add safe.directory ${docker_yurt_root};" - for arch in ${target_arch[@]}; do - sub_commands+="GOARCH=$arch bash ./hack/make-rules/build.sh $(echo ${bin_targets_process_servant[@]}); " - done - sub_commands+="chown -R $(id -u):$(id -g) ${docker_yurt_root}/_output" - - docker run ${docker_run_opts[@]} ${YURT_BUILD_IMAGE} ${docker_run_cmd[@]} "${sub_commands}" -} - -function build_docker_image() { - for arch in ${target_arch[@]}; do - for binary in "${bin_targets_process_servant[@]}"; do - local binary_name=$(get_output_name $binary) - local binary_path=${YURT_LOCAL_BIN_DIR}/${SUPPORTED_OS}/${arch}/${binary_name} - if [ -f ${binary_path} ]; then - local docker_build_path=${DOCKER_BUILD_BASE_DIR}/${SUPPORTED_OS}/${arch} - local docker_file_path=${docker_build_path}/Dockerfile.${binary_name}-${arch} - mkdir -p ${docker_build_path} - local yurt_component_name=$(get_component_name $binary_name) - local base_image - if [[ ${binary} =~ yurtctl ]] - then - case $arch in - amd64) - base_image="amd64/alpine:3.9" - ;; - arm64) - base_image="arm64v8/alpine:3.9" - ;; - arm) - base_image="arm32v7/alpine:3.9" - ;; - *) - echo unknown arch $arch - exit 1 - esac - cat << EOF > $docker_file_path -FROM ${base_image} -ADD ${binary_name} /usr/local/bin/yurtctl -EOF - elif [[ ${binary} =~ yurt-node-servant ]]; - then - case $arch in - amd64) - base_image="amd64/alpine:3.9" - ;; - arm64) - base_image="arm64v8/alpine:3.9" - ;; - arm) - base_image="arm32v7/alpine:3.9" - ;; - *) - echo unknown arch $arch - exit 1 - esac - ln ./hack/lib/node-servant-entry.sh "${docker_build_path}/entry.sh" - cat << EOF > $docker_file_path -FROM ${base_image} -ADD entry.sh /usr/local/bin/entry.sh -RUN chmod +x /usr/local/bin/entry.sh -ADD ${binary_name} /usr/local/bin/node-servant -EOF - else - base_image="k8s.gcr.io/debian-iptables-${arch}:v11.0.2" - cat < "${docker_file_path}" -FROM ${base_image} -COPY ${binary_name} /usr/local/bin/${binary_name} -ENTRYPOINT ["/usr/local/bin/${binary_name}"] -EOF - fi - - yurt_component_image=$(get_image_name ${yurt_component_name} ${arch}) - ln "${binary_path}" "${docker_build_path}/${binary_name}" - docker build --no-cache -t "${yurt_component_image}" -f "${docker_file_path}" ${docker_build_path} - echo ${yurt_component_image} >> ${DOCKER_BUILD_BASE_DIR}/images.list - docker save ${yurt_component_image} > ${YURT_IMAGE_DIR}/${yurt_component_name}-${SUPPORTED_OS}-${arch}.tar - fi - done - done -} - -build_images() { - # Always clean first - rm -Rf ${YURT_OUTPUT_DIR} - rm -Rf ${DOCKER_BUILD_BASE_DIR} - mkdir -p ${YURT_LOCAL_BIN_DIR} - mkdir -p ${YURT_IMAGE_DIR} - mkdir -p ${DOCKER_BUILD_BASE_DIR} - - build_multi_arch_binaries - build_docker_image -} - -push_images() { - cat ${DOCKER_BUILD_BASE_DIR}/images.list | xargs -I % sh -c 'echo pushing %; docker push %; echo' -} diff --git a/hack/lib/release-manifest.sh b/hack/lib/release-manifest.sh deleted file mode 100644 index 263353a71ac..00000000000 --- a/hack/lib/release-manifest.sh +++ /dev/null @@ -1,65 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2020 The OpenYurt Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -x - - -# Parameters -# $1: component name -function get_manifest_name() { - # If ${GIT_COMMIT} is not at a tag, add commit to the image tag. - if [[ -z $(git tag --points-at ${GIT_COMMIT}) ]]; then - yurt_component_manifest="${REPO}/$1:${TAG}-$(echo ${GIT_COMMIT} | cut -c 1-7)" - else - yurt_component_manifest="${REPO}/$1:${TAG}" - fi - echo ${yurt_component_manifest} -} - -function build_docker_manifest() { - # Always clean first - rm -Rf ${DOCKER_BUILD_BASE_DIR} - mkdir -p ${DOCKER_BUILD_BASE_DIR} - - for binary in "${bin_targets_process_servant[@]}"; do - local binary_name=$(get_output_name $binary) - local yurt_component_name=$(get_component_name $binary_name) - local yurt_component_manifest=$(get_manifest_name $yurt_component_name) - echo ${yurt_component_manifest} >> ${DOCKER_BUILD_BASE_DIR}/manifest.list - # Remove existing manifest. - docker manifest rm ${yurt_component_manifest} || true - for arch in ${target_arch[@]}; do - case $arch in - amd64) - ;; - arm64) - ;; - arm) - ;; - *) - echo unknown arch $arch - exit 1 - esac - yurt_component_image=$(get_image_name ${yurt_component_name} ${arch}) - docker manifest create ${yurt_component_manifest} --amend ${yurt_component_image} - docker manifest annotate ${yurt_component_manifest} ${yurt_component_image} --os ${SUPPORTED_OS} --arch ${arch} - done - done -} - -push_manifest() { - cat ${DOCKER_BUILD_BASE_DIR}/manifest.list | xargs -I % sh -c 'echo pushing manifest %; docker manifest push --purge %; echo' -} diff --git a/hack/make-rules/build-e2e.sh b/hack/make-rules/build-e2e.sh deleted file mode 100755 index 521818e715e..00000000000 --- a/hack/make-rules/build-e2e.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2020 The OpenYurt Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -YURT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd -P)" -source "${YURT_ROOT}/hack/lib/init.sh" - -readonly YURT_E2E_TARGETS="test/e2e/yurt-e2e-test" - -function build_e2e() { - local goflags goldflags gcflags - goldflags="${GOLDFLAGS:--s -w $(project_info)}" - gcflags="${GOGCFLAGS:-}" - goflags=${GOFLAGS:-} - - - local target_bin_dir=$(get_binary_dir_with_arch ${YURT_LOCAL_BIN_DIR}) - mkdir -p ${target_bin_dir} - cd ${target_bin_dir} - echo "Building ${YURT_E2E_TARGETS}" - local testpkg="$(dirname ${YURT_E2E_TARGETS})" - local filename="$(basename ${YURT_E2E_TARGETS})" - go test -c -gcflags "${gcflags:-}" ${goflags} -o $filename "$YURT_ROOT/${testpkg}" -} - -build_e2e diff --git a/hack/make-rules/build.sh b/hack/make-rules/build.sh index 1a895e68030..62b1f36fff8 100755 --- a/hack/make-rules/build.sh +++ b/hack/make-rules/build.sh @@ -18,7 +18,15 @@ set -x YURT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd -P)" source "${YURT_ROOT}/hack/lib/init.sh" +source "${YURT_ROOT}/hack/lib/build.sh" -HOST_PLATFORM=${HOST_PLATFORM:-"$(go env GOOS)/$(go env GOARCH)"} +readonly YURT_ALL_TARGETS=( + yurtctl + yurt-node-servant + yurthub + yurt-controller-manager + yurt-tunnel-server + yurt-tunnel-agent +) build_binaries "$@" diff --git a/hack/make-rules/generate_client.sh b/hack/make-rules/generate_client.sh deleted file mode 100755 index 6527a32fb44..00000000000 --- a/hack/make-rules/generate_client.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2020 The OpenYurt Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -x -set -e - -YURT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd -P)" - -TMP_DIR=$(mktemp -d) -mkdir -p "${TMP_DIR}"/src/github.com/openyurtio/openyurt/pkg/yurtappmanager/client -cp -r ${YURT_ROOT}/{go.mod,go.sum} "${TMP_DIR}"/src/github.com/openyurtio/openyurt/ -cp -r ${YURT_ROOT}/pkg/yurtappmanager/{apis,hack} "${TMP_DIR}"/src/github.com/openyurtio/openyurt/pkg/yurtappmanager/ - -( - cd "${TMP_DIR}"/src/github.com/openyurtio/openyurt/; - HOLD_GO="${TMP_DIR}/src/github.com/openyurtio/openyurt/pkg/yurtappmanager/hack/hold.go" - printf 'package hack\nimport "k8s.io/code-generator"\n' > ${HOLD_GO} - go mod vendor - GOPATH=${TMP_DIR} GO111MODULE=off /bin/bash vendor/k8s.io/code-generator/generate-groups.sh all \ - github.com/openyurtio/openyurt/pkg/yurtappmanager/client github.com/openyurtio/openyurt/pkg/yurtappmanager/apis apps:v1alpha1 -h ./pkg/yurtappmanager/hack/boilerplate.go.txt -) - -rm -rf ./pkg/yurtappmanager/client/{clientset,informers,listers} -mv "${TMP_DIR}"/src/github.com/openyurtio/openyurt/pkg/yurtappmanager/client/* ./pkg/yurtappmanager/client - diff --git a/hack/make-rules/genyaml.sh b/hack/make-rules/genyaml.sh index 790535e095b..7c06f86209f 100755 --- a/hack/make-rules/genyaml.sh +++ b/hack/make-rules/genyaml.sh @@ -15,6 +15,15 @@ # limitations under the License. YURT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd -P)" -source "${YURT_ROOT}/hack/lib/init.sh" +source "${YURT_ROOT}/hack/lib/init.sh" +source "${YURT_ROOT}/hack/lib/build.sh" + +# we will generates setup yaml files for following components +readonly YURT_YAML_TARGETS=( + yurthub + yurt-controller-manager + yurt-tunnel-server + yurt-tunnel-agent +) gen_yamls "$@" diff --git a/hack/local_up_openyurt.sh b/hack/make-rules/local-up-openyurt.sh similarity index 72% rename from hack/local_up_openyurt.sh rename to hack/make-rules/local-up-openyurt.sh index 3275acf9648..c895438d86c 100755 --- a/hack/local_up_openyurt.sh +++ b/hack/make-rules/local-up-openyurt.sh @@ -19,10 +19,6 @@ # automatically deployed, and the autonomous mode will be active. # # It uses the following env variables: -# REGION -# REGION affects the GOPROXY to use. You can set it to "cn" to use GOPROXY="https://goproxy.cn". -# Default value is "us", which means using GOPROXY="https://goproxy.io". -# # KIND_KUBECONFIG # KIND_KUBECONFIG represents the path to store the kubeconfig file of the cluster # which is created by this shell. The default value is "$HOME/.kube/config". @@ -41,7 +37,11 @@ set -x set -e set -u -YURT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd -P)" +YURT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd -P)" + +source "${YURT_ROOT}/hack/lib/init.sh" +source "${YURT_ROOT}/hack/lib/build.sh" +YURT_VERSION=${YURT_VERSION:-${GIT_VERSION}} readonly REQUIRED_CMD=( go @@ -50,13 +50,12 @@ readonly REQUIRED_CMD=( kind ) -readonly BUILD_TARGETS=( - yurthub - yurt-controller-manager - yurtctl - yurt-tunnel-server - yurt-tunnel-agent - yurt-node-servant +readonly REQUIRED_IMAGES=( + openyurt/node-servant + openyurt/yurt-tunnel-agent + openyurt/yurt-tunnel-server + openyurt/yurt-controller-manager + openyurt/yurthub ) readonly LOCAL_ARCH=$(go env GOHOSTARCH) @@ -65,6 +64,10 @@ readonly CLUSTER_NAME="openyurt-e2e-test" readonly KUBERNETESVERSION=${KUBERNETESVERSION:-"v1.21"} readonly NODES_NUM=${NODES_NUM:-2} readonly KIND_KUBECONFIG=${KIND_KUBECONFIG:-${HOME}/.kube/config} +ENABLE_DUMMY_IF=true +if [[ "${LOCAL_OS}" == darwin ]]; then + ENABLE_DUMMY_IF=false +fi function install_kind { echo "Begin to install kind" @@ -99,29 +102,30 @@ function preflight { fi fi done -} - -function build_target_binaries_and_images { - echo "Begin to build binaries and images" - export WHAT=${BUILD_TARGETS[@]} - export ARCH=${LOCAL_ARCH} + for image in "${REQUIRED_IMAGES[@]}"; do + if [[ "$(docker image inspect --format='ignore me' ${image}:${YURT_VERSION})" != "ignore me" ]]; then + echo "image ${image}:${YURT_VERSION} is not exist locally" + exit -1 + fi + done +} - source ${YURT_ROOT}/hack/make-rules/release-images.sh +function build_yurtctl_binary { + echo "Begin to build yurtctl binary" + GOOS=${LOCAL_OS} GOARCH=${LOCAL_ARCH} build_binaries cmd/yurtctl } function local_up_openyurt { - echo "Begin to setup OpenYurt cluster" - openyurt_version=$(get_version ${LOCAL_ARCH}) + echo "Begin to setup OpenYurt cluster(version=${YURT_VERSION})" ${YURT_LOCAL_BIN_DIR}/${LOCAL_OS}/${LOCAL_ARCH}/yurtctl test init \ --kubernetes-version=${KUBERNETESVERSION} --kube-config=${KIND_KUBECONFIG} \ - --cluster-name=${CLUSTER_NAME} --openyurt-version=${openyurt_version} --use-local-images --ignore-error \ - --node-num=${NODES_NUM} + --cluster-name=${CLUSTER_NAME} --openyurt-version=${YURT_VERSION} --use-local-images --ignore-error \ + --node-num=${NODES_NUM} --enable-dummy-if=${ENABLE_DUMMY_IF} } function cleanup { rm -rf ${YURT_ROOT}/_output - rm -rf ${YURT_ROOT}/dockerbuild kind delete clusters ${CLUSTER_NAME} } @@ -136,5 +140,5 @@ trap cleanup_on_err EXIT cleanup preflight -build_target_binaries_and_images +build_yurtctl_binary local_up_openyurt \ No newline at end of file diff --git a/hack/make-rules/push-images.sh b/hack/make-rules/push-images.sh deleted file mode 100755 index 3d3af3b601c..00000000000 --- a/hack/make-rules/push-images.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2020 The OpenYurt Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -x - -YURT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd -P)" -source "${YURT_ROOT}/hack/lib/init.sh" - -push_images diff --git a/hack/make-rules/push-manifest.sh b/hack/make-rules/push-manifest.sh deleted file mode 100644 index 65798f34dfd..00000000000 --- a/hack/make-rules/push-manifest.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2020 The OpenYurt Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -x - -YURT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd -P)" -source "${YURT_ROOT}/hack/lib/init.sh" -export DOCKER_CLI_EXPERIMENTAL=enabled - -push_manifest \ No newline at end of file diff --git a/hack/make-rules/release-images.sh b/hack/make-rules/release-images.sh deleted file mode 100755 index f64bba8b516..00000000000 --- a/hack/make-rules/release-images.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2020 The OpenYurt Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -x - -YURT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd -P)" -source "${YURT_ROOT}/hack/lib/init.sh" - -build_images diff --git a/hack/make-rules/release-manifest.sh b/hack/make-rules/release-manifest.sh deleted file mode 100644 index feff1a5d4b3..00000000000 --- a/hack/make-rules/release-manifest.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2020 The OpenYurt Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -x - -YURT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd -P)" -source "${YURT_ROOT}/hack/lib/init.sh" -export DOCKER_CLI_EXPERIMENTAL=enabled - -build_docker_manifest \ No newline at end of file diff --git a/hack/run-e2e-tests.sh b/hack/make-rules/run-e2e-tests.sh similarity index 77% rename from hack/run-e2e-tests.sh rename to hack/make-rules/run-e2e-tests.sh index bd5b5f577b3..11563e4f760 100755 --- a/hack/run-e2e-tests.sh +++ b/hack/make-rules/run-e2e-tests.sh @@ -18,9 +18,13 @@ set -x set -e set -u -YURT_ROOT=$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd -P) -source ${YURT_ROOT}/hack/make-rules/build-e2e.sh +YURT_ROOT=$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd -P) +source "${YURT_ROOT}/hack/lib/init.sh" +source "${YURT_ROOT}/hack/lib/build.sh" +readonly LOCAL_ARCH=$(go env GOHOSTARCH) +readonly LOCAL_OS=$(go env GOHOSTOS) +readonly YURT_E2E_TARGETS="test/e2e/yurt-e2e-test" KUBECONFIG=${KUBECONFIG:-${HOME}/.kube/config} # run e2e tests @@ -36,4 +40,5 @@ function run_e2e_tests { ${target_bin_dir}/${e2e_test_file_name} -kubeconfig ${KUBECONFIG} } +GOOS=${LOCAL_OS} GOARCH=${LOCAL_ARCH} build_e2e run_e2e_tests \ No newline at end of file diff --git a/pkg/node-servant/components/yurthub.go b/pkg/node-servant/components/yurthub.go index bd04f66b9e6..98544c9a523 100644 --- a/pkg/node-servant/components/yurthub.go +++ b/pkg/node-servant/components/yurthub.go @@ -23,6 +23,7 @@ import ( "net/url" "os" "path/filepath" + "strconv" "strings" "time" @@ -47,17 +48,21 @@ type yurtHubOperator struct { joinToken string workingMode util.WorkingMode yurthubHealthCheckTimeout time.Duration + enableDummyIf bool + enableNodePool bool } // NewYurthubOperator new yurtHubOperator struct func NewYurthubOperator(apiServerAddr string, yurthubImage string, joinToken string, - workingMode util.WorkingMode, yurthubHealthCheckTimeout time.Duration) *yurtHubOperator { + workingMode util.WorkingMode, yurthubHealthCheckTimeout time.Duration, enableDummyIf, enableNodePool bool) *yurtHubOperator { return &yurtHubOperator{ apiServerAddr: apiServerAddr, yurthubImage: yurthubImage, joinToken: joinToken, workingMode: workingMode, yurthubHealthCheckTimeout: yurthubHealthCheckTimeout, + enableDummyIf: enableDummyIf, + enableNodePool: enableNodePool, } } @@ -66,7 +71,6 @@ func (op *yurtHubOperator) Install() error { // 1. put yurt-hub yaml into /etc/kubernetes/manifests klog.Infof("setting up yurthub on node") - // 1-1. replace variables in yaml file klog.Infof("setting up yurthub apiServer addr") yurthubTemplate, err := templates.SubsituteTemplate(enutil.YurthubTemplate, map[string]string{ @@ -74,6 +78,8 @@ func (op *yurtHubOperator) Install() error { "image": op.yurthubImage, "joinToken": op.joinToken, "workingMode": string(op.workingMode), + "enableDummyIf": strconv.FormatBool(op.enableDummyIf), + "enableNodePool": strconv.FormatBool(op.enableNodePool), }) if err != nil { return err diff --git a/pkg/node-servant/config/control-plane.go b/pkg/node-servant/config/control-plane.go new file mode 100644 index 00000000000..d0bfcab2f75 --- /dev/null +++ b/pkg/node-servant/config/control-plane.go @@ -0,0 +1,143 @@ +/* +Copyright 2022 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "fmt" + "path/filepath" + "strings" + + v1 "k8s.io/api/core/v1" + + fileutil "github.com/openyurtio/openyurt/pkg/util/file" +) + +// ControlPlaneConfig has the information that required by node-servant config control-plane operation +type ControlPlaneConfig struct { + RunMode string + KASStaticPodPath string + KCMStaticPodPath string +} + +type Runner interface { + Do() error +} + +func NewControlPlaneRunner(o *ControlPlaneOptions) (Runner, error) { + switch o.RunMode { + case "pod": + return newStaticPodRunner(o.PodManifestsPath) + default: + return nil, fmt.Errorf("%s mode is not supported, only static pod mode is implemented", o.RunMode) + } +} + +type staticPodRunner struct { + kasStaticPodPath string + kcmStaticPodPath string +} + +func newStaticPodRunner(podManifestsPath string) (Runner, error) { + kasStaticPodPath := filepath.Join(podManifestsPath, "kube-apiserver.yaml") + kcmStaticPodPath := filepath.Join(podManifestsPath, "kube-controller-manager.yaml") + if exist, _ := fileutil.FileExists(kasStaticPodPath); !exist { + return nil, fmt.Errorf("%s file is not exist", kasStaticPodPath) + } + + if exist, _ := fileutil.FileExists(kcmStaticPodPath); !exist { + return nil, fmt.Errorf("%s file is not exist", kcmStaticPodPath) + } + + return &staticPodRunner{ + kasStaticPodPath: kasStaticPodPath, + kcmStaticPodPath: kcmStaticPodPath, + }, nil +} + +func (spr *staticPodRunner) Do() error { + var kasPodUpdated bool + var kcmPodUpdated bool + // read kube-apiserver static pod + kasObj, err := fileutil.ReadObjectFromYamlFile(spr.kasStaticPodPath) + if err != nil { + return err + } + kasPod, ok := kasObj.(*v1.Pod) + if !ok { + return fmt.Errorf("manifest file(%s) is not a static pod", spr.kasStaticPodPath) + } + + // remove --kubelet-preferred-address-types parameter in order to make sure kube-apiserver + // to use hostname to access nodes on edge node + for i := range kasPod.Spec.Containers { + for j := range kasPod.Spec.Containers[i].Command { + if strings.Contains(kasPod.Spec.Containers[i].Command[j], "kubelet-preferred-address-types=") { + // remove --kubelet-preferred-address-types parameter setting + kasPod.Spec.Containers[i].Command = append(kasPod.Spec.Containers[i].Command[:j], kasPod.Spec.Containers[i].Command[j+1:]...) + kasPodUpdated = true + break + } + } + } + // set dnsPolicy to ClusterFirstWithHostNet in order to make sure kube-apiserver + // will use coredns to resolve hostname. by the way, hostname of edge nodes will be resolved + // to the service(x-tunnel-server-internal-svc) clusterIP of yurt-tunnel-server + if kasPod.Spec.DNSPolicy != v1.DNSClusterFirstWithHostNet { + kasPod.Spec.DNSPolicy = v1.DNSClusterFirstWithHostNet + kasPodUpdated = true + } + + // read kube-controller-manager static pod + kcmObj, err := fileutil.ReadObjectFromYamlFile(spr.kcmStaticPodPath) + if err != nil { + return err + } + kcmPod, ok := kcmObj.(*v1.Pod) + if !ok { + return fmt.Errorf("manifest file(%s) is not a static pod", spr.kcmStaticPodPath) + } + + // disable NodeLifeCycle controller + for i := range kcmPod.Spec.Containers { + for j := range kcmPod.Spec.Containers[i].Command { + if strings.Contains(kcmPod.Spec.Containers[i].Command[j], "--controllers=") { + if !strings.Contains(kcmPod.Spec.Containers[i].Command[j], "-nodelifecycle,") { + // insert -nodelifecycle, after = + insertPoint := strings.Index(kcmPod.Spec.Containers[i].Command[j], "=") + 1 + kcmPod.Spec.Containers[i].Command[j] = kcmPod.Spec.Containers[i].Command[j][:insertPoint] + "-nodelifecycle," + kcmPod.Spec.Containers[i].Command[j][insertPoint:] + kcmPodUpdated = true + break + } + } + } + } + + // update static pod files + if kasPodUpdated { + if err := fileutil.WriteObjectToYamlFile(kasPod, spr.kasStaticPodPath); err != nil { + return err + } + } + + if kcmPodUpdated { + if err := fileutil.WriteObjectToYamlFile(kcmPod, spr.kcmStaticPodPath); err != nil { + return err + } + } + + return nil +} diff --git a/pkg/node-servant/config/options.go b/pkg/node-servant/config/options.go new file mode 100644 index 00000000000..b6ea7544308 --- /dev/null +++ b/pkg/node-servant/config/options.go @@ -0,0 +1,68 @@ +/* +Copyright 2022 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "fmt" + "os" + + "github.com/spf13/pflag" +) + +const ( + RunAsStaticPod = "pod" + RunAsControllerPod = "controller" +) + +// ControlPlaneOptions has the information that required by node-servant config control-plane +type ControlPlaneOptions struct { + RunMode string + PodManifestsPath string + Version bool +} + +// NewControlPlaneOptions creates a new Options +func NewControlPlaneOptions() *ControlPlaneOptions { + return &ControlPlaneOptions{ + RunMode: RunAsStaticPod, + PodManifestsPath: "/etc/kubernetes/manifests", + } +} + +// Validate validates Options +func (o *ControlPlaneOptions) Validate() error { + switch o.RunMode { + case RunAsStaticPod, RunAsControllerPod: + default: + return fmt.Errorf("run mode(%s) is not supported, only pod and controller are supported", o.RunMode) + } + + if info, err := os.Stat(o.PodManifestsPath); err != nil { + return err + } else if !info.IsDir() { + return fmt.Errorf("pod mainifests path(%s) should be a directory", o.PodManifestsPath) + } + + return nil +} + +// AddFlags sets flags. +func (o *ControlPlaneOptions) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&o.RunMode, "run-mode", o.RunMode, "The run mode of control-plane components, only pod and controller modes are supported") + fs.StringVar(&o.PodManifestsPath, "pod-manifests-path", o.PodManifestsPath, "The path of pod manifests on the worker node.") + fs.BoolVar(&o.Version, "version", o.Version, "print the version information.") +} diff --git a/pkg/node-servant/constant.go b/pkg/node-servant/constant.go index e594906e767..5eb5c78eb84 100644 --- a/pkg/node-servant/constant.go +++ b/pkg/node-servant/constant.go @@ -26,6 +26,9 @@ const ( //ConvertPreflightJobNameBase is the prefix of the preflight-convert ServantJob name ConvertPreflightJobNameBase = "node-servant-preflight-convert" + // ConfigControlPlaneJobNameBase is the prefix of the config control-plane ServantJob name + ConfigControlPlaneJobNameBase = "config-control-plane" + // ConvertServantJobTemplate defines the yurtctl convert servant job in yaml format ConvertServantJobTemplate = ` apiVersion: batch/v1 @@ -53,7 +56,7 @@ spec: - /bin/sh - -c args: - - "/usr/local/bin/entry.sh convert --working-mode {{.working_mode}} --yurthub-image {{.yurthub_image}} {{if .yurthub_healthcheck_timeout}}--yurthub-healthcheck-timeout {{.yurthub_healthcheck_timeout}} {{end}}--join-token {{.joinToken}}" + - "/usr/local/bin/entry.sh convert --working-mode={{.working_mode}} --yurthub-image={{.yurthub_image}} {{if .yurthub_healthcheck_timeout}}--yurthub-healthcheck-timeout={{.yurthub_healthcheck_timeout}} {{end}}--join-token={{.joinToken}} {{if .enable_dummy_if}}--enable-dummy-if={{.enable_dummy_if}}{{end}} {{if .enable_node_pool}}--enable-node-pool={{.enable_node_pool}}{{end}}" securityContext: privileged: true volumeMounts: @@ -155,4 +158,39 @@ spec: value: {{.kubeadm_conf_path}} {{end}} ` + + // ConfigControlPlaneJobTemplate defines the node-servant config control-plane for configuring kube-apiserver and kube-controller-manager + ConfigControlPlaneJobTemplate = ` +apiVersion: batch/v1 +kind: Job +metadata: + name: {{.jobName}} + namespace: kube-system +spec: + template: + spec: + hostPID: true + hostNetwork: true + restartPolicy: OnFailure + nodeName: {{.nodeName}} + volumes: + - name: host-root + hostPath: + path: / + type: Directory + containers: + - name: node-servant + image: {{.node_servant_image}} + imagePullPolicy: IfNotPresent + command: + - /bin/sh + - -c + args: + - "/usr/local/bin/entry.sh config control-plane" + securityContext: + privileged: true + volumeMounts: + - mountPath: /openyurt + name: host-root +` ) diff --git a/pkg/node-servant/convert/convert.go b/pkg/node-servant/convert/convert.go index a588252bf2e..78c0fad8809 100644 --- a/pkg/node-servant/convert/convert.go +++ b/pkg/node-servant/convert/convert.go @@ -18,30 +18,49 @@ package convert import ( "fmt" + "strings" + "time" "github.com/openyurtio/openyurt/pkg/node-servant/components" "github.com/openyurtio/openyurt/pkg/yurthub/util" ) +// Config has the information that required by convert operation +type Config struct { + yurthubImage string + yurthubHealthCheckTimeout time.Duration + workingMode util.WorkingMode + joinToken string + kubeadmConfPaths []string + openyurtDir string + enableDummyIf bool + enableNodePool bool +} + // NodeConverter do the convert job type nodeConverter struct { - Options + Config } // NewConverterWithOptions create nodeConverter func NewConverterWithOptions(o *Options) *nodeConverter { return &nodeConverter{ - *o, + Config: Config{ + yurthubImage: o.yurthubImage, + yurthubHealthCheckTimeout: o.yurthubHealthCheckTimeout, + workingMode: util.WorkingMode(o.workingMode), + joinToken: o.joinToken, + kubeadmConfPaths: strings.Split(o.kubeadmConfPaths, ","), + openyurtDir: o.openyurtDir, + enableDummyIf: o.enableDummyIf, + enableNodePool: o.enableNodePool, + }, } } -// Do, do the convert job. +// Do is used for the convert job. // shall be implemented as idempotent, can execute multiple times with no side-affect. func (n *nodeConverter) Do() error { - if err := n.validateOptions(); err != nil { - return err - } - if err := n.installYurtHub(); err != nil { return err } @@ -52,14 +71,6 @@ func (n *nodeConverter) Do() error { return nil } -func (n *nodeConverter) validateOptions() error { - if !util.IsSupportedWorkingMode(n.workingMode) { - return fmt.Errorf("workingMode must be pointed out as cloud or edge. got %s", n.workingMode) - } - - return nil -} - func (n *nodeConverter) installYurtHub() error { apiServerAddress, err := components.GetApiServerAddress(n.kubeadmConfPaths) if err != nil { @@ -69,7 +80,7 @@ func (n *nodeConverter) installYurtHub() error { return fmt.Errorf("get apiServerAddress empty") } op := components.NewYurthubOperator(apiServerAddress, n.yurthubImage, n.joinToken, - n.workingMode, n.yurthubHealthCheckTimeout) + n.workingMode, n.yurthubHealthCheckTimeout, n.enableDummyIf, n.enableNodePool) return op.Install() } diff --git a/pkg/node-servant/convert/options.go b/pkg/node-servant/convert/options.go index c2dd1eec815..69167b010cc 100644 --- a/pkg/node-servant/convert/options.go +++ b/pkg/node-servant/convert/options.go @@ -18,7 +18,6 @@ package convert import ( "fmt" - "os" "strings" "time" @@ -29,71 +28,58 @@ import ( hubutil "github.com/openyurtio/openyurt/pkg/yurthub/util" ) +const ( + // defaultYurthubHealthCheckTimeout defines the default timeout for yurthub health check phase + defaultYurthubHealthCheckTimeout = 2 * time.Minute +) + // Options has the information that required by convert operation type Options struct { yurthubImage string yurthubHealthCheckTimeout time.Duration - workingMode hubutil.WorkingMode - - joinToken string - kubeadmConfPaths []string - openyurtDir string + workingMode string + joinToken string + kubeadmConfPaths string + openyurtDir string + enableDummyIf bool + enableNodePool bool + Version bool } // NewConvertOptions creates a new Options func NewConvertOptions() *Options { return &Options{ - kubeadmConfPaths: components.GetDefaultKubeadmConfPath(), + yurthubImage: "openyurt/yurthub:latest", + yurthubHealthCheckTimeout: defaultYurthubHealthCheckTimeout, + workingMode: string(hubutil.WorkingModeEdge), + kubeadmConfPaths: strings.Join(components.GetDefaultKubeadmConfPath(), ","), + openyurtDir: enutil.OpenyurtDir, + enableDummyIf: true, + enableNodePool: true, } } -// Complete completes all the required options. -func (o *Options) Complete(flags *pflag.FlagSet) error { - yurthubImage, err := flags.GetString("yurthub-image") - if err != nil { - return err - } - o.yurthubImage = yurthubImage - - yurthubHealthCheckTimeout, err := flags.GetDuration("yurthub-healthcheck-timeout") - if err != nil { - return err +// Validate validates Options +func (o *Options) Validate() error { + if len(o.joinToken) == 0 { + return fmt.Errorf("join token(bootstrap token) is empty") } - o.yurthubHealthCheckTimeout = yurthubHealthCheckTimeout - kubeadmConfPaths, err := flags.GetString("kubeadm-conf-path") - if err != nil { - return err - } - if kubeadmConfPaths != "" { - o.kubeadmConfPaths = strings.Split(kubeadmConfPaths, ",") - } - - joinToken, err := flags.GetString("join-token") - if err != nil { - return err - } - if joinToken == "" { - return fmt.Errorf("get joinToken empty") + if !hubutil.IsSupportedWorkingMode(hubutil.WorkingMode(o.workingMode)) { + return fmt.Errorf("workingMode must be pointed out as cloud or edge. got %s", o.workingMode) } - o.joinToken = joinToken - - openyurtDir := os.Getenv("OPENYURT_DIR") - if openyurtDir == "" { - openyurtDir = enutil.OpenyurtDir - } - o.openyurtDir = openyurtDir - - workingMode, err := flags.GetString("working-mode") - if err != nil { - return err - } - - wm := hubutil.WorkingMode(workingMode) - if !hubutil.IsSupportedWorkingMode(wm) { - return fmt.Errorf("invalid working mode: %s", workingMode) - } - o.workingMode = wm return nil } + +// AddFlags sets flags. +func (o *Options) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&o.yurthubImage, "yurthub-image", o.yurthubImage, "The yurthub image.") + fs.DurationVar(&o.yurthubHealthCheckTimeout, "yurthub-healthcheck-timeout", o.yurthubHealthCheckTimeout, "The timeout for yurthub health check.") + fs.StringVarP(&o.kubeadmConfPaths, "kubeadm-conf-path", "k", o.kubeadmConfPaths, "The path to kubelet service conf that is used by kubelet component to join the cluster on the work node. Support multiple values, will search in order until get the file.(e.g -k kbcfg1,kbcfg2)") + fs.StringVar(&o.joinToken, "join-token", o.joinToken, "The token used by yurthub for joining the cluster.") + fs.StringVar(&o.workingMode, "working-mode", o.workingMode, "The node type cloud/edge, effect yurthub workingMode.") + fs.BoolVar(&o.enableDummyIf, "enable-dummy-if", o.enableDummyIf, "Enable dummy interface for yurthub or not.") + fs.BoolVar(&o.enableNodePool, "enable-node-pool", o.enableNodePool, "Enable list/watch nodepools for yurthub or not.") + fs.BoolVar(&o.Version, "version", o.Version, "print the version information.") +} diff --git a/pkg/node-servant/job.go b/pkg/node-servant/job.go index 4c199e343d2..40c187a860c 100644 --- a/pkg/node-servant/job.go +++ b/pkg/node-servant/job.go @@ -29,7 +29,11 @@ import ( // RenderNodeServantJob return k8s job // to start k8s job to run convert/revert on specific node -func RenderNodeServantJob(action string, tmplCtx map[string]string, nodeName string) (*batchv1.Job, error) { +func RenderNodeServantJob(action string, renderCtx map[string]string, nodeName string) (*batchv1.Job, error) { + tmplCtx := make(map[string]string) + for k, v := range renderCtx { + tmplCtx[k] = v + } if err := validate(action, tmplCtx, nodeName); err != nil { return nil, err } @@ -45,6 +49,9 @@ func RenderNodeServantJob(action string, tmplCtx map[string]string, nodeName str case "preflight-convert": servantJobTemplate = ConvertPreflightJobTemplate jobBaseName = ConvertPreflightJobNameBase + case "config-control-plane": + servantJobTemplate = ConfigControlPlaneJobTemplate + jobBaseName = ConfigControlPlaneJobNameBase } tmplCtx["jobName"] = jobBaseName + "-" + nodeName @@ -88,9 +95,10 @@ func validate(action string, tmplCtx map[string]string, nodeName string) error { case "revert": keysMustHave := []string{"node_servant_image"} return checkKeys(keysMustHave, tmplCtx) - case "preflight-convert": + case "preflight-convert", "config-control-plane": keysMustHave := []string{"node_servant_image"} return checkKeys(keysMustHave, tmplCtx) + default: return fmt.Errorf("action invalied: %s ", action) } diff --git a/pkg/node-servant/revert/revert.go b/pkg/node-servant/revert/revert.go index afb08539399..5d5edf21397 100644 --- a/pkg/node-servant/revert/revert.go +++ b/pkg/node-servant/revert/revert.go @@ -35,7 +35,7 @@ func NewReverterWithOptions(o *Options) *nodeReverter { } } -// Do, do the revert job +// Do is used for the revert job // shall be implemented as idempotent, can execute multiple times with no side-affect. func (n *nodeReverter) Do() error { @@ -62,7 +62,7 @@ func (n *nodeReverter) revertKubelet() error { func (n *nodeReverter) unInstallYurtHub() error { op := components.NewYurthubOperator("", "", "", - util.WorkingModeCloud, time.Duration(1)) // params is not important here + util.WorkingModeCloud, time.Duration(1), true, true) // params is not important here return op.UnInstall() } diff --git a/pkg/preflight/checks.go b/pkg/preflight/checks.go index ab84f05f72e..235a12dead3 100644 --- a/pkg/preflight/checks.go +++ b/pkg/preflight/checks.go @@ -180,7 +180,7 @@ func (nc NodeServantJobCheck) Check() (warnings []error, errorList []error) { go func() { defer wg.Done() if err := kubeutil.RunJobAndCleanup(nc.cliSet, &entity, - nc.waitServantJobTimeout, nc.checkServantJobPeriod); err != nil { + nc.waitServantJobTimeout, nc.checkServantJobPeriod, false); err != nil { msg := fmt.Errorf("fail to run servant job(%s): %w\n", entity.GetName(), err) res <- msg } else { diff --git a/pkg/util/file/file.go b/pkg/util/file/file.go new file mode 100644 index 00000000000..1b7e372ed03 --- /dev/null +++ b/pkg/util/file/file.go @@ -0,0 +1,103 @@ +/* +Copyright 2022 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package file + +import ( + "fmt" + "io" + "os" + "path/filepath" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + clientsetscheme "k8s.io/client-go/kubernetes/scheme" +) + +// FileExists checks if specified file exists. +func FileExists(filename string) (bool, error) { + if _, err := os.Stat(filename); os.IsNotExist(err) { + return false, nil + } else if err != nil { + return false, err + } + return true, nil +} + +func ReadObjectFromYamlFile(path string) (runtime.Object, error) { + buf, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("failed to read object from yaml file(%s) with error: %v", path, err) + } + + const mediaType = runtime.ContentTypeYAML + info, ok := runtime.SerializerInfoForMediaType(clientsetscheme.Codecs.SupportedMediaTypes(), mediaType) + if !ok { + return nil, fmt.Errorf("unsupported media type %q", mediaType) + } + + decoder := clientsetscheme.Codecs.DecoderToVersion(info.Serializer, v1.SchemeGroupVersion) + return runtime.Decode(decoder, buf) +} + +func WriteObjectToYamlFile(obj runtime.Object, path string) error { + const mediaType = runtime.ContentTypeYAML + info, ok := runtime.SerializerInfoForMediaType(clientsetscheme.Codecs.SupportedMediaTypes(), mediaType) + if !ok { + return fmt.Errorf("unsupported media type %q", mediaType) + } + + encoder := clientsetscheme.Codecs.EncoderForVersion(info.Serializer, v1.SchemeGroupVersion) + buf, err := runtime.Encode(encoder, obj) + if err != nil { + return fmt.Errorf("failed to encode object, %v", err) + } + + tmpPath := fmt.Sprintf("%s.tmp", path) + if err := os.WriteFile(tmpPath, buf, 0600); err != nil { + return fmt.Errorf("failed to write object into manifest file(%s), %v", tmpPath, err) + } + + if err := backupFile(path); err != nil { + os.Remove(tmpPath) + return err + } + + if err := os.Remove(path); err != nil { + os.Remove(tmpPath) + return err + } + + // rename tmp path file to path file + return os.Rename(tmpPath, path) +} + +func backupFile(path string) error { + src, err := os.Open(path) + if err != nil { + return err + } + + fileName := filepath.Base(path) + bakFile := filepath.Join("/tmp", fileName) + dst, err := os.Create(bakFile) + if err != nil { + return err + } + + _, err = io.Copy(dst, src) + return err +} diff --git a/pkg/yurtadm/constants/yurt-tunnel-agent-tmpl.go b/pkg/yurtadm/constants/yurt-tunnel-agent-tmpl.go index 2bc17da6a27..3b7572f8d44 100644 --- a/pkg/yurtadm/constants/yurt-tunnel-agent-tmpl.go +++ b/pkg/yurtadm/constants/yurt-tunnel-agent-tmpl.go @@ -41,6 +41,7 @@ spec: - command: - yurt-tunnel-agent args: + - --v=2 - --node-name=$(NODE_NAME) {{if .tunnelServerAddress }} - --tunnelserver-addr={{.tunnelServerAddress}} diff --git a/pkg/yurtadm/constants/yurt-tunnel-server-tmpl.go b/pkg/yurtadm/constants/yurt-tunnel-server-tmpl.go index 7f50789e943..ea874bd25ec 100644 --- a/pkg/yurtadm/constants/yurt-tunnel-server-tmpl.go +++ b/pkg/yurtadm/constants/yurt-tunnel-server-tmpl.go @@ -203,9 +203,11 @@ spec: command: - yurt-tunnel-server args: + - --v=2 - --bind-address=$(NODE_IP) - --insecure-bind-address=$(NODE_IP) - --server-count=1 + - --enable-iptables=false {{if .certIP }} - --cert-ips={{.certIP}} {{end}} diff --git a/pkg/yurtadm/util/edgenode/common.go b/pkg/yurtadm/util/edgenode/common.go index 7cdaa9a8c35..11e9bed82d2 100644 --- a/pkg/yurtadm/util/edgenode/common.go +++ b/pkg/yurtadm/util/edgenode/common.go @@ -94,6 +94,8 @@ spec: - --node-name=$(NODE_NAME) - --join-token={{.joinToken}} - --working-mode={{.workingMode}} + - --enable-dummy-if={{.enableDummyIf}} + - --enable-node-pool={{.enableNodePool}} {{if .organizations }} - --hub-cert-organizations={{.organizations}} {{end}} diff --git a/pkg/yurtadm/util/kubernetes/util.go b/pkg/yurtadm/util/kubernetes/util.go index b37f9f2ba73..5feae18ece7 100644 --- a/pkg/yurtadm/util/kubernetes/util.go +++ b/pkg/yurtadm/util/kubernetes/util.go @@ -47,6 +47,7 @@ import ( k8sruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/runtime/serializer/yaml" + utilerrors "k8s.io/apimachinery/pkg/util/errors" yamlutil "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/client-go/discovery" "k8s.io/client-go/dynamic" @@ -436,7 +437,7 @@ func AddEdgeWorkerLabelAndAutonomyAnnotation(cliSet *kubernetes.Clientset, node } // RunJobAndCleanup runs the job, wait for it to be complete, and delete it -func RunJobAndCleanup(cliSet *kubernetes.Clientset, job *batchv1.Job, timeout, period time.Duration) error { +func RunJobAndCleanup(cliSet *kubernetes.Clientset, job *batchv1.Job, timeout, period time.Duration, waitForTimeout bool) error { job, err := cliSet.BatchV1().Jobs(job.GetNamespace()).Create(context.Background(), job, metav1.CreateOptions{}) if err != nil { return err @@ -447,25 +448,30 @@ func RunJobAndCleanup(cliSet *kubernetes.Clientset, job *batchv1.Job, timeout, p case <-waitJobTimeout: return errors.New("wait for job to be complete timeout") case <-time.After(period): - job, err := cliSet.BatchV1().Jobs(job.GetNamespace()). + newJob, err := cliSet.BatchV1().Jobs(job.GetNamespace()). Get(context.Background(), job.GetName(), metav1.GetOptions{}) if err != nil { - klog.Errorf("fail to get job(%s) when waiting for it to be succeeded: %s", - job.GetName(), err) + if apierrors.IsNotFound(err) { + return err + } + + if waitForTimeout { + klog.Infof("continue to wait for job(%s) to complete until timeout, even if failed to get job, %v", job.GetName(), err) + continue + } return err } - if job.Status.Succeeded == *job.Spec.Completions { + + if newJob.Status.Succeeded == *newJob.Spec.Completions { if err := cliSet.BatchV1().Jobs(job.GetNamespace()). Delete(context.Background(), job.GetName(), metav1.DeleteOptions{ PropagationPolicy: &PropagationPolicy, }); err != nil { - klog.Errorf("fail to delete succeeded servant job(%s): %s", - job.GetName(), err) + klog.Errorf("fail to delete succeeded servant job(%s): %s", job.GetName(), err) return err } return nil } - continue } } } @@ -505,7 +511,8 @@ func RunServantJobs( cliSet *kubernetes.Clientset, waitServantJobTimeout time.Duration, getJob func(nodeName string) (*batchv1.Job, error), - nodeNames []string, ww io.Writer) error { + nodeNames []string, ww io.Writer, + waitForTimeout bool) error { var wg sync.WaitGroup jobByNodeName := make(map[string]*batchv1.Job) @@ -518,26 +525,34 @@ func RunServantJobs( } res := make(chan string, len(nodeNames)) + errCh := make(chan error, len(nodeNames)) for _, nodeName := range nodeNames { wg.Add(1) job := jobByNodeName[nodeName] go func() { defer wg.Done() - if err := RunJobAndCleanup(cliSet, job, - waitServantJobTimeout, CheckServantJobPeriod); err != nil { - msg := fmt.Sprintf("\t[ERROR] fail to run servant job(%s): %s\n", job.GetName(), err) - res <- msg + if err := RunJobAndCleanup(cliSet, job, waitServantJobTimeout, CheckServantJobPeriod, waitForTimeout); err != nil { + errCh <- fmt.Errorf("[ERROR] fail to run servant job(%s): %w", job.GetName(), err) } else { - msg := fmt.Sprintf("\t[INFO] servant job(%s) has succeeded\n", job.GetName()) - res <- msg + res <- fmt.Sprintf("\t[INFO] servant job(%s) has succeeded\n", job.GetName()) } }() } wg.Wait() close(res) + close(errCh) for m := range res { io.WriteString(ww, m) } + + errs := []error{} + for err := range errCh { + errs = append(errs, err) + } + if len(errs) != 0 { + return utilerrors.NewAggregate(errs) + } + return nil } diff --git a/pkg/yurtctl/cmd/yurttest/kindinit/converter.go b/pkg/yurtctl/cmd/yurttest/kindinit/converter.go index a087afe5615..256297bbe40 100644 --- a/pkg/yurtctl/cmd/yurttest/kindinit/converter.go +++ b/pkg/yurtctl/cmd/yurttest/kindinit/converter.go @@ -26,6 +26,7 @@ import ( batchv1 "k8s.io/api/batch/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/kubernetes" bootstrapapi "k8s.io/cluster-bootstrap/token/api" "k8s.io/klog/v2" @@ -49,13 +50,13 @@ type ClusterConverter struct { EdgeNodes []string WaitServantJobTimeout time.Duration YurthubHealthCheckTimeout time.Duration - PodManifestPath string KubeConfigPath string YurtTunnelAgentImage string YurtTunnelServerImage string YurtControllerManagerImage string NodeServantImage string YurthubImage string + EnableDummyIf bool } func (c *ClusterConverter) Run() error { @@ -87,12 +88,6 @@ func (c *ClusterConverter) Run() error { } klog.Info("Running jobs for convert. Job running may take a long time, and job failure will not affect the execution of the next stage") - //disable native node-lifecycle-controller - klog.Info("Running disable-node-controller jobs to disable node-controller") - if err := c.disableNativeNodeLifecycleController(); err != nil { - klog.Errorf("failed to disable native node-lifecycle-controller, %v", err) - return err - } klog.Info("Running node-servant-convert jobs to deploy the yurt-hub and reset the kubelet service on edge and cloud nodes") if err := c.deployYurthub(); err != nil { @@ -132,24 +127,6 @@ func (c *ClusterConverter) deployYurtTunnel() error { return nil } -func (c *ClusterConverter) disableNativeNodeLifecycleController() error { - kcmNodeNames, err := kubeutil.GetKubeControllerManagerHANodes(c.ClientSet) - if err != nil { - return err - } - - if err = kubeutil.RunServantJobs(c.ClientSet, c.WaitServantJobTimeout, func(nodeName string) (*batchv1.Job, error) { - ctx := map[string]string{ - "node_servant_image": c.NodeServantImage, - "pod_manifest_path": c.PodManifestPath, - } - return kubeutil.RenderServantJob("disable", ctx, nodeName) - }, kcmNodeNames, os.Stderr); err != nil { - return err - } - return nil -} - func (c *ClusterConverter) deployYurthub() error { // deploy yurt-hub and reset the kubelet service on edge nodes. joinToken, err := prepareYurthubStart(c.ClientSet, c.KubeConfigPath) @@ -165,24 +142,34 @@ func (c *ClusterConverter) deployYurthub() error { // or "/etc/systemd/system/kubelet.service.d/10-kubeadm.conf". "kubeadm_conf_path": "", "working_mode": string(util.WorkingModeEdge), + "enable_dummy_if": strconv.FormatBool(c.EnableDummyIf), } if c.YurthubHealthCheckTimeout != defaultYurthubHealthCheckTimeout { convertCtx["yurthub_healthcheck_timeout"] = c.YurthubHealthCheckTimeout.String() } + + npExist, err := nodePoolResourceExists(c.ClientSet) + if err != nil { + return err + } + convertCtx["enable_node_pool"] = strconv.FormatBool(npExist) + klog.Infof("convert context for edge nodes(%q): %#+v", c.EdgeNodes, convertCtx) + if len(c.EdgeNodes) != 0 { convertCtx["working_mode"] = string(util.WorkingModeEdge) if err = kubeutil.RunServantJobs(c.ClientSet, c.WaitServantJobTimeout, func(nodeName string) (*batchv1.Job, error) { return nodeservant.RenderNodeServantJob("convert", convertCtx, nodeName) - }, c.EdgeNodes, os.Stderr); err != nil { + }, c.EdgeNodes, os.Stderr, false); err != nil { return err } } // deploy yurt-hub and reset the kubelet service on cloud nodes convertCtx["working_mode"] = string(util.WorkingModeCloud) + klog.Infof("convert context for cloud nodes(%q): %#+v", c.CloudNodes, convertCtx) if err = kubeutil.RunServantJobs(c.ClientSet, c.WaitServantJobTimeout, func(nodeName string) (*batchv1.Job, error) { return nodeservant.RenderNodeServantJob("convert", convertCtx, nodeName) - }, c.CloudNodes, os.Stderr); err != nil { + }, c.CloudNodes, os.Stderr, false); err != nil { return err } @@ -230,3 +217,25 @@ func prepareClusterInfoConfigMap(client *kubernetes.Clientset, file string) erro return nil } + +func nodePoolResourceExists(client *kubernetes.Clientset) (bool, error) { + groupVersion := schema.GroupVersion{ + Group: "apps.openyurt.io", + Version: "v1alpha1", + } + apiResourceList, err := client.Discovery().ServerResourcesForGroupVersion(groupVersion.String()) + if err != nil && !apierrors.IsNotFound(err) { + klog.Errorf("failed to discover nodepool resource, %v", err) + return false, err + } else if apiResourceList == nil { + return false, nil + } + + for i := range apiResourceList.APIResources { + if apiResourceList.APIResources[i].Name == "nodepools" && apiResourceList.APIResources[i].Kind == "NodePool" { + return true, nil + } + } + + return false, nil +} diff --git a/pkg/yurtctl/cmd/yurttest/kindinit/init.go b/pkg/yurtctl/cmd/yurttest/kindinit/init.go index a71ac3d8ae3..0e78fca7d76 100644 --- a/pkg/yurtctl/cmd/yurttest/kindinit/init.go +++ b/pkg/yurtctl/cmd/yurttest/kindinit/init.go @@ -17,24 +17,30 @@ limitations under the License. package kindinit import ( + "context" "fmt" "os" "os/exec" "path/filepath" "strings" + "time" "github.com/spf13/cobra" "github.com/spf13/pflag" + batchv1 "k8s.io/api/batch/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" - enutil "github.com/openyurtio/openyurt/pkg/yurtadm/util/edgenode" + nodeservant "github.com/openyurtio/openyurt/pkg/node-servant" kubeutil "github.com/openyurtio/openyurt/pkg/yurtadm/util/kubernetes" strutil "github.com/openyurtio/openyurt/pkg/yurtadm/util/strings" tmplutil "github.com/openyurtio/openyurt/pkg/yurtadm/util/templates" "github.com/openyurtio/openyurt/pkg/yurtctl/constants" + "github.com/openyurtio/openyurt/pkg/yurthub/filter/servicetopology" ) var ( @@ -68,6 +74,13 @@ var ( nodeServantImageFormat = "openyurt/node-servant:%s" yurtTunnelServerImageFormat = "openyurt/yurt-tunnel-server:%s" yurtTunnelAgentImageFormat = "openyurt/yurt-tunnel-agent:%s" + + hostsSettingForCoreFile = []string{ + " hosts /etc/edge/tunnel-nodes {", + " reload 300ms", + " fallthrough", + " }", + } ) func NewKindInitCMD() *cobra.Command { @@ -103,6 +116,7 @@ type kindOptions struct { UseLocalImages bool KubeConfig string IgnoreError bool + EnableDummyIf bool } func newKindOptions() *kindOptions { @@ -114,6 +128,7 @@ func newKindOptions() *kindOptions { KubernetesVersion: "v1.21", UseLocalImages: false, IgnoreError: false, + EnableDummyIf: true, } } @@ -188,6 +203,7 @@ func (o *kindOptions) Config() *initializerConfig { NodeServantImage: fmt.Sprintf(nodeServantImageFormat, o.OpenYurtVersion), YurtTunnelServerImage: fmt.Sprintf(yurtTunnelServerImageFormat, o.OpenYurtVersion), YurtTunnelAgentImage: fmt.Sprintf(yurtTunnelAgentImageFormat, o.OpenYurtVersion), + EnableDummyIf: o.EnableDummyIf, } } @@ -210,7 +226,9 @@ func addFlags(flagset *pflag.FlagSet, o *kindOptions) { flagset.StringVar(&o.KubeConfig, "kube-config", o.KubeConfig, "Path where the kubeconfig file of new cluster will be stored. The default is ${HOME}/.kube/config.") flagset.BoolVar(&o.IgnoreError, "ignore-error", o.IgnoreError, - "Igore error when using openyurt version that is not officially released.") + "Ignore error when using openyurt version that is not officially released.") + flagset.BoolVar(&o.EnableDummyIf, "enable-dummy-if", o.EnableDummyIf, + "Enable dummy interface for yurthub component or not. and recommend to set false on mac env") } type initializerConfig struct { @@ -228,11 +246,13 @@ type initializerConfig struct { NodeServantImage string YurtTunnelServerImage string YurtTunnelAgentImage string + EnableDummyIf bool } type Initializer struct { initializerConfig - operator *KindOperator + operator *KindOperator + kubeClient *kubernetes.Clientset } func newKindInitializer(cfg *initializerConfig) *Initializer { @@ -242,77 +262,97 @@ func newKindInitializer(cfg *initializerConfig) *Initializer { } } -func (i *Initializer) Run() error { +func (ki *Initializer) Run() error { klog.Info("Start to install kind") - if err := i.operator.KindInstall(); err != nil { + if err := ki.operator.KindInstall(); err != nil { return err } klog.Info("Start to prepare config file for kind") - if err := i.prepareKindConfigFile(i.KindConfigPath); err != nil { + if err := ki.prepareKindConfigFile(ki.KindConfigPath); err != nil { return err } klog.Info("Start to create cluster with kind") - if err := i.operator.KindCreateClusterWithConfig(i.KindConfigPath); err != nil { + if err := ki.operator.KindCreateClusterWithConfig(ki.KindConfigPath); err != nil { + return err + } + + klog.Info("Start to prepare kube client") + kubeconfig, err := clientcmd.BuildConfigFromFlags("", ki.KubeConfig) + if err != nil { + return err + } + ki.kubeClient, err = kubernetes.NewForConfig(kubeconfig) + if err != nil { + return err + } + + klog.Info("Start to prepare OpenYurt images for kind cluster") + if err := ki.prepareImages(); err != nil { return err } - klog.Infof("Start to prepare OpenYurt images for kind cluster") - if err := i.prepareImages(); err != nil { + klog.Info("Start to configure kube-apiserver and kube-controller-manager") + if err := ki.configureControlPlane(); err != nil { return err } - klog.Infof("Start to deploy OpenYurt components") - if err := i.deployOpenYurt(); err != nil { + klog.Info("Start to deploy OpenYurt components") + if err := ki.deployOpenYurt(); err != nil { + return err + } + + klog.Infof("Start to configure coredns and kube-proxy to adapt OpenYurt") + if err := ki.configureAddons(); err != nil { return err } return nil } -func (i *Initializer) prepareImages() error { - if !i.UseLocalImage { +func (ki *Initializer) prepareImages() error { + if !ki.UseLocalImage { return nil } // load images of cloud components to cloud nodes - if err := i.loadImagesToKindNodes([]string{ - i.YurtHubImage, - i.YurtControllerManagerImage, - i.NodeServantImage, - i.YurtTunnelServerImage, - }, i.CloudNodes); err != nil { + if err := ki.loadImagesToKindNodes([]string{ + ki.YurtHubImage, + ki.YurtControllerManagerImage, + ki.NodeServantImage, + ki.YurtTunnelServerImage, + }, ki.CloudNodes); err != nil { return err } // load images of edge components to edge nodes - if err := i.loadImagesToKindNodes([]string{ - i.YurtHubImage, - i.NodeServantImage, - i.YurtTunnelAgentImage, - }, i.EdgeNodes); err != nil { + if err := ki.loadImagesToKindNodes([]string{ + ki.YurtHubImage, + ki.NodeServantImage, + ki.YurtTunnelAgentImage, + }, ki.EdgeNodes); err != nil { return err } return nil } -func (i *Initializer) prepareKindConfigFile(kindConfigPath string) error { +func (ki *Initializer) prepareKindConfigFile(kindConfigPath string) error { kindConfigDir := filepath.Dir(kindConfigPath) if err := os.MkdirAll(kindConfigDir, constants.DirMode); err != nil { return err } kindConfigContent, err := tmplutil.SubsituteTemplate(constants.OpenYurtKindConfig, map[string]string{ - "kind_node_image": i.NodeImage, - "cluster_name": i.ClusterName, + "kind_node_image": ki.NodeImage, + "cluster_name": ki.ClusterName, }) if err != nil { return err } // add additional worker entries into kind config file according to NodesNum - for num := 1; num < i.NodesNum; num++ { + for num := 1; num < ki.NodesNum; num++ { worker, err := tmplutil.SubsituteTemplate(constants.KindWorkerRole, map[string]string{ - "kind_node_image": i.NodeImage, + "kind_node_image": ki.NodeImage, }) if err != nil { return err @@ -327,29 +367,208 @@ func (i *Initializer) prepareKindConfigFile(kindConfigPath string) error { return nil } -func (i *Initializer) deployOpenYurt() error { - kubeconfig, err := clientcmd.BuildConfigFromFlags("", i.KubeConfig) +func (ki *Initializer) configureControlPlane() error { + convertCtx := map[string]string{ + "node_servant_image": ki.NodeServantImage, + } + + return kubeutil.RunServantJobs(ki.kubeClient, kubeutil.DefaultWaitServantJobTimeout, func(nodeName string) (*batchv1.Job, error) { + return nodeservant.RenderNodeServantJob("config-control-plane", convertCtx, nodeName) + }, ki.CloudNodes, os.Stderr, true) +} + +func (ki *Initializer) configureAddons() error { + if err := ki.configureCoreDnsAddon(); err != nil { + return err + } + + if err := ki.ConfigureKubeProxyAddon(); err != nil { + return err + } + + // re-construct kube-proxy pods + podList, err := ki.kubeClient.CoreV1().Pods("kube-system").List(context.TODO(), metav1.ListOptions{}) if err != nil { return err } - client, err := kubernetes.NewForConfig(kubeconfig) + for i := range podList.Items { + switch { + case strings.HasPrefix(podList.Items[i].Name, "kube-proxy"): + // delete pod + propagation := metav1.DeletePropagationForeground + err = ki.kubeClient.CoreV1().Pods("kube-system").Delete(context.TODO(), podList.Items[i].Name, metav1.DeleteOptions{ + PropagationPolicy: &propagation, + }) + if err != nil { + klog.Errorf("failed to delete pod(%s), %v", podList.Items[i].Name, err) + } + default: + } + } + + // wait for coredns pods available + for { + select { + case <-time.After(10 * time.Second): + dnsDp, err := ki.kubeClient.AppsV1().Deployments("kube-system").Get(context.TODO(), "coredns", metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get coredns deployment when waiting for available, %v", err) + } + + if dnsDp.Status.ObservedGeneration < dnsDp.Generation { + klog.Infof("waiting for coredns generation(%d) to be observed. now observed generation is %d", dnsDp.Generation, dnsDp.Status.ObservedGeneration) + continue + } + + if *dnsDp.Spec.Replicas != dnsDp.Status.AvailableReplicas { + klog.Infof("waiting for coredns replicas(%d) to be ready, now %d pods available", *dnsDp.Spec.Replicas, dnsDp.Status.AvailableReplicas) + continue + } + klog.Info("coredns deployment configuration is completed") + return nil + } + } +} + +func (ki *Initializer) configureCoreDnsAddon() error { + // config configmap kube-system/coredns in order to add hosts setting for resolving hostname to x-tunnel-server-internal-svc service + cm, err := ki.kubeClient.CoreV1().ConfigMaps("kube-system").Get(context.TODO(), "coredns", metav1.GetOptions{}) if err != nil { return err } + if cm != nil && !strings.Contains(cm.Data["Corefile"], "hosts /etc/edge/tunnel-nodes") { + lines := strings.Split(cm.Data["Corefile"], "\n") + for i := range lines { + if strings.Contains(lines[i], "kubernetes cluster.local") && strings.Contains(lines[i], "{") { + lines = append(lines[:i], append(hostsSettingForCoreFile, lines[i:]...)...) + break + } + } + cm.Data["Corefile"] = strings.Join(lines, "\n") + + // update coredns configmap + _, err = ki.kubeClient.CoreV1().ConfigMaps("kube-system").Update(context.TODO(), cm, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("failed to configure coredns configmap, %w", err) + } + } + // add annotation(openyurt.io/topologyKeys=kubernetes.io/hostname) for service kube-system/kube-dns in order to use the + // local coredns instance for resolving. + svc, err := ki.kubeClient.CoreV1().Services("kube-system").Get(context.TODO(), "kube-dns", metav1.GetOptions{}) + if err != nil { + return err + } + if svc != nil && len(svc.Annotations[servicetopology.AnnotationServiceTopologyKey]) == 0 { + svc.Annotations[servicetopology.AnnotationServiceTopologyKey] = servicetopology.AnnotationServiceTopologyValueNode + if _, err := ki.kubeClient.CoreV1().Services("kube-system").Update(context.TODO(), svc, metav1.UpdateOptions{}); err != nil { + return err + } + } + + // kubectl patch deployment coredns -n kube-system -p '{"spec": {"template": {"spec": {"volumes": [{"configMap":{"name":"yurt-tunnel-nodes"},"name": "edge"}]}}}}' + // kubectl patch deployment coredns -n kube-system -p '{"spec": { "template": { "spec": { "containers": [{"name":"coredns","volumeMounts": [{"mountPath": "/etc/edge", "name": "edge", "readOnly": true }]}]}}}}' + dp, err := ki.kubeClient.AppsV1().Deployments("kube-system").Get(context.TODO(), "coredns", metav1.GetOptions{}) + if err != nil { + return err + } + + if dp != nil { + dp.Spec.Template.Spec.HostNetwork = true + hasEdgeVolume := false + for i := range dp.Spec.Template.Spec.Volumes { + if dp.Spec.Template.Spec.Volumes[i].Name == "edge" { + hasEdgeVolume = true + break + } + } + if !hasEdgeVolume { + dp.Spec.Template.Spec.Volumes = append(dp.Spec.Template.Spec.Volumes, v1.Volume{ + Name: "edge", + VolumeSource: v1.VolumeSource{ + ConfigMap: &v1.ConfigMapVolumeSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "yurt-tunnel-nodes", + }, + }, + }, + }) + } + hasEdgeVolumeMount := false + containerIndex := 0 + for i := range dp.Spec.Template.Spec.Containers { + if dp.Spec.Template.Spec.Containers[i].Name == "coredns" { + for j := range dp.Spec.Template.Spec.Containers[i].VolumeMounts { + if dp.Spec.Template.Spec.Containers[i].VolumeMounts[j].Name == "edge" { + hasEdgeVolumeMount = true + containerIndex = i + break + } + } + } + if hasEdgeVolumeMount { + break + } + } + if !hasEdgeVolumeMount { + dp.Spec.Template.Spec.Containers[containerIndex].VolumeMounts = append(dp.Spec.Template.Spec.Containers[containerIndex].VolumeMounts, + v1.VolumeMount{ + Name: "edge", + MountPath: "/etc/edge", + ReadOnly: true, + }) + } + + if !hasEdgeVolume || !hasEdgeVolumeMount { + _, err = ki.kubeClient.AppsV1().Deployments("kube-system").Update(context.TODO(), dp, metav1.UpdateOptions{}) + if err != nil { + return err + } + } + } + + return nil +} + +func (ki *Initializer) ConfigureKubeProxyAddon() error { + // configure configmap kube-system/kube-proxy in order to make kube-proxy access kube-apiserver by going through yurthub + cm, err := ki.kubeClient.CoreV1().ConfigMaps("kube-system").Get(context.TODO(), "kube-proxy", metav1.GetOptions{}) + if err != nil { + return err + } + if cm != nil && strings.Contains(cm.Data["config.conf"], "kubeconfig") { + lines := strings.Split(cm.Data["config.conf"], "\n") + for i := range lines { + if strings.Contains(lines[i], "kubeconfig:") { + lines = append(lines[:i], lines[i+1:]...) + break + } + } + cm.Data["config.conf"] = strings.Join(lines, "\n") + + // update kube-proxy configmap + _, err = ki.kubeClient.CoreV1().ConfigMaps("kube-system").Update(context.TODO(), cm, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("failed to configure kube-proxy configmap, %w", err) + } + } + return nil +} + +func (ki *Initializer) deployOpenYurt() error { converter := &ClusterConverter{ - ClientSet: client, - CloudNodes: i.CloudNodes, - EdgeNodes: i.EdgeNodes, + ClientSet: ki.kubeClient, + CloudNodes: ki.CloudNodes, + EdgeNodes: ki.EdgeNodes, WaitServantJobTimeout: kubeutil.DefaultWaitServantJobTimeout, YurthubHealthCheckTimeout: defaultYurthubHealthCheckTimeout, - PodManifestPath: enutil.GetPodManifestPath(), - KubeConfigPath: i.KubeConfig, - YurtTunnelAgentImage: i.YurtTunnelAgentImage, - YurtTunnelServerImage: i.YurtTunnelServerImage, - YurtControllerManagerImage: i.YurtControllerManagerImage, - NodeServantImage: i.NodeServantImage, - YurthubImage: i.YurtHubImage, + KubeConfigPath: ki.KubeConfig, + YurtTunnelAgentImage: ki.YurtTunnelAgentImage, + YurtTunnelServerImage: ki.YurtTunnelServerImage, + YurtControllerManagerImage: ki.YurtControllerManagerImage, + NodeServantImage: ki.NodeServantImage, + YurthubImage: ki.YurtHubImage, + EnableDummyIf: ki.EnableDummyIf, } if err := converter.Run(); err != nil { klog.Errorf("errors occurred when deploying openyurt components") @@ -358,13 +577,13 @@ func (i *Initializer) deployOpenYurt() error { return nil } -func (i *Initializer) loadImagesToKindNodes(images, nodes []string) error { +func (ki *Initializer) loadImagesToKindNodes(images, nodes []string) error { for _, image := range images { if image == "" { // if image == "", it's the responsibility of kind to pull images from registry. continue } - if err := i.operator.KindLoadDockerImage(i.ClusterName, image, nodes); err != nil { + if err := ki.operator.KindLoadDockerImage(ki.ClusterName, image, nodes); err != nil { return err } } @@ -388,25 +607,22 @@ func checkIfKindAt(path string) (bool, string) { func findKindPath() (string, error) { var kindPath string - switch { - case true: - if exist, path := checkIfKindAt("kind"); exist { - kindPath = path - break - } - fallthrough - case true: + if exist, path := checkIfKindAt("kind"); exist { + kindPath = path + return kindPath, nil + } else { goBinPath, err := getGoBinPath() if err != nil { klog.Fatal("failed to get go bin path, %s", err) } + if exist, path := checkIfKindAt(goBinPath + "/kind"); exist { kindPath = path - break } - fallthrough - default: - return "", fmt.Errorf("cannot find valid kind cmd, try to install it") + } + + if len(kindPath) == 0 { + return kindPath, fmt.Errorf("cannot find valid kind cmd, try to install it") } if err := validateKindVersion(kindPath); err != nil {