From 771cc58b0fb116cfa6fed11994085cf37f4f526a Mon Sep 17 00:00:00 2001 From: Waleed Malik Date: Wed, 12 Apr 2023 16:51:17 +0500 Subject: [PATCH] chore: migrate from k8s.gcr.io to registry.k8s.io --- docs/user-manuals/network/edge-ingress.md | 16 ++++++++-------- docs/user-manuals/workload/yurt-app-daemon.md | 10 +++++----- .../user-manuals/network/edge-ingress.md | 16 ++++++++-------- .../developer-manuals/how-to-build-and-test.md | 18 +++++++++--------- .../developer-manuals/how-to-build-and-test.md | 18 +++++++++--------- .../user-manuals/network/edge-ingress.md | 16 ++++++++-------- .../user-manuals/workload/yurt-app-daemon.md | 11 +++++------ .../user-manuals/network/edge-ingress.md | 16 ++++++++-------- .../user-manuals/network/edge-ingress.md | 16 ++++++++-------- .../user-manuals/network/edge-ingress.md | 16 ++++++++-------- .../user-manuals/network/edge-ingress.md | 16 ++++++++-------- .../developer-manuals/how-to-build-and-test.md | 18 +++++++++--------- .../developer-manuals/how-to-build-and-test.md | 18 +++++++++--------- .../user-manuals/network/edge-ingress.md | 16 ++++++++-------- .../user-manuals/workload/yurt-app-daemon.md | 11 +++++------ .../user-manuals/network/edge-ingress.md | 16 ++++++++-------- .../user-manuals/workload/yurt-app-daemon.md | 10 +++++----- .../user-manuals/network/edge-ingress.md | 16 ++++++++-------- .../user-manuals/workload/yurt-app-daemon.md | 10 +++++----- .../user-manuals/network/edge-ingress.md | 16 ++++++++-------- .../user-manuals/workload/yurt-app-daemon.md | 10 +++++----- .../user-manuals/network/edge-ingress.md | 16 ++++++++-------- .../user-manuals/workload/yurt-app-daemon.md | 10 +++++----- 23 files changed, 167 insertions(+), 169 deletions(-) diff --git a/docs/user-manuals/network/edge-ingress.md b/docs/user-manuals/network/edge-ingress.md index 7bea6fda56..acf3b66490 100644 --- a/docs/user-manuals/network/edge-ingress.md +++ b/docs/user-manuals/network/edge-ingress.md @@ -20,7 +20,7 @@ YurtIngress operator is responsible for orchestrating multi ingress controllers Suppose you have created 4 NodePools in your OpenYurt cluster: pool01, pool02, pool03, pool04, and you want to enable edge ingress feature on pool01 and pool03, you can create the YurtIngress CR as below: -1). Create the YurtIngress CR yaml file: +1). Create the YurtIngress CR yaml file: 1.1). A simple CR definition with some default configurations: @@ -33,8 +33,8 @@ enable edge ingress feature on pool01 and pool03, you can create the YurtIngress - name: pool01 - name: pool03 -The default nginx ingress controller replicas per pool is 1. -The default nginx ingress controller image is controller:v0.48.1 from dockerhub. +The default nginx ingress controller replicas per pool is 1. +The default nginx ingress controller image is controller:v0.48.1 from dockerhub. The default nginx ingress webhook certgen image is kube-webhook-certgen:v0.48.1 from dockerhub. 1.2). If users want to make personalized configurations about the default options, the YurtIngress CR can be defined as below: @@ -45,8 +45,8 @@ The default nginx ingress webhook certgen image is kube-webhook-certgen:v0.48.1 name: yurtingress-test spec: ingress_controller_replicas_per_pool: 2 - ingress_controller_image: k8s.gcr.io/ingress-nginx/controller:v0.49.0 - ingress_webhook_certgen_image: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v0.49.0 + ingress_controller_image: registry.k8s.io/ingress-nginx/controller:v0.49.0 + ingress_webhook_certgen_image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v0.49.0 pools: - name: pool01 ingress_ips: @@ -67,7 +67,7 @@ c). In spec, the "pools" represents the pools list on which you want to enable i Currently it supports the pool name and the nginx ingress controller service externalIPs. -2). Apply the YurtIngress CR yaml file: +2). Apply the YurtIngress CR yaml file: Assume the file name is yurtingress-test.yaml: #kubectl apply -f yurtingress-test.yaml @@ -172,7 +172,7 @@ Suppose your app workload is deployed to several NodePools and it exposes a glob If you want to access the service provided by pool01: -1). Create the ingress rule yaml file: +1). Create the ingress rule yaml file: apiVersion: extensions/v1beta1 kind: Ingress @@ -197,7 +197,7 @@ a). Ingress class decides which NodePool to provide the ingress capability, so y b). The ingress CR definition may be different for different K8S versions, so you need ensure the CR definition matches with your cluster K8S version. -2). Apply the ingress rule yaml file: +2). Apply the ingress rule yaml file: Assume the file name is ingress-myapp.yaml: #kubectl apply -f ingress-myapp.yaml diff --git a/docs/user-manuals/workload/yurt-app-daemon.md b/docs/user-manuals/workload/yurt-app-daemon.md index fe8fc5036a..1cefb4ab1f 100644 --- a/docs/user-manuals/workload/yurt-app-daemon.md +++ b/docs/user-manuals/workload/yurt-app-daemon.md @@ -235,7 +235,7 @@ spec: - args: - -conf - /etc/coredns/Corefile - image: k8s.gcr.io/coredns:1.6.7 + image: registry.k8s.io/coredns:1.6.7 imagePullPolicy: IfNotPresent name: coredns resources: @@ -251,7 +251,7 @@ spec: - NET_BIND_SERVICE drop: - all - readOnlyRootFilesystem: true + readOnlyRootFilesystem: true livenessProbe: failureThreshold: 5 httpGet: @@ -261,7 +261,7 @@ spec: initialDelaySeconds: 60 periodSeconds: 10 successThreshold: 1 - timeoutSeconds: 5 + timeoutSeconds: 5 volumeMounts: - mountPath: /etc/coredns name: config-volume @@ -302,7 +302,7 @@ spec: selector: k8s-app: kube-dns sessionAffinity: None - type: ClusterIP + type: ClusterIP --- apiVersion: v1 data: @@ -384,4 +384,4 @@ subjects: EOF -``` +``` diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/user-manuals/network/edge-ingress.md b/i18n/zh/docusaurus-plugin-content-docs/current/user-manuals/network/edge-ingress.md index 7ca8544351..0a2c882aaf 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/user-manuals/network/edge-ingress.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/user-manuals/network/edge-ingress.md @@ -34,9 +34,9 @@ YurtIngress opeator负责将nginx ingress controller编排到需要启用边缘I - name: pool01 - name: pool03 -默认为每个节点池创建的nginx ingress控制器副本数为1 -默认的ingress控制器docker image为:k8s.gcr.io/ingress-nginx/controller:v0.48.1 -默认的生成ingress控制器webhook证书的docker image为:k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v0.48.1 +默认为每个节点池创建的nginx ingress控制器副本数为1 +默认的ingress控制器docker image为:registry.k8s.io/ingress-nginx/controller:v0.48.1 +默认的生成ingress控制器webhook证书的docker image为:registry.k8s.io/ingress-nginx/kube-webhook-certgen:v0.48.1 1.2). 如果用户不想使用默认的配置,而是想对节点池做一些个性化配置,可以如下定义CR: @@ -46,15 +46,15 @@ YurtIngress opeator负责将nginx ingress controller编排到需要启用边缘I name: yurtingress-test spec: ingress_controller_replicas_per_pool: 2 - ingress_controller_image: k8s.gcr.io/ingress-nginx/controller:v0.49.0 - ingress_webhook_certgen_image: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v0.49.0 + ingress_controller_image: registry.k8s.io/ingress-nginx/controller:v0.49.0 + ingress_webhook_certgen_image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v0.49.0 pools: - name: pool01 ingress_ips: - xxx.xxx.xxx.xxx - name: pool03 -其中: +其中: `igress_controller_replicas_per_pool`/`ingress_controller_image`/`ingress_webhook_certgen_image`可供用户自定义相关默认配置, `ingress_ips` 代表如果用户想通过externalIPs的方式为某个特定的节点池对外暴露nginx ingress控制器服务的公网IP地址。 @@ -67,7 +67,7 @@ b). 在spec中,“ingress_controller_replicas_per_pool”表示部署在每个 c). 在spec中,“pools”表示要在其上开启ingress功能的节点池列表,目前支持节点池名及针对该节点池的ingress服务公网IP配置。 -2). 部署YurtIngress CR yaml文件: +2). 部署YurtIngress CR yaml文件: 假定CR文件名为yurtingress-test.yaml: #kubectl apply -f yurtingress-test.yaml @@ -194,7 +194,7 @@ a). 由哪个节点池提供ingress功能是由ingress class决定的,因此 b). 不同K8S版本的ingress CR定义可能不同,您需要确保ingress CR的定义与集群K8S版本匹配。 -2). 部署ingress规则yaml文件: +2). 部署ingress规则yaml文件: 假定yaml文件名为ingress-myapp.yaml: #kubectl apply -f ingress-myapp.yaml diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v0.5.0/developer-manuals/how-to-build-and-test.md b/i18n/zh/docusaurus-plugin-content-docs/version-v0.5.0/developer-manuals/how-to-build-and-test.md index d221f68f11..7d40797adc 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-v0.5.0/developer-manuals/how-to-build-and-test.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v0.5.0/developer-manuals/how-to-build-and-test.md @@ -9,7 +9,7 @@ OpenYurt主仓库([openyurtio/openyurt](https://github.com/openyurtio/openyurt 3. yurt-tunnel-server 4. yurt-tunnel-agent 5. yurtctl -6. yurt-node-servant +6. yurt-node-servant 本文主要介绍了在完成对上述组件的开发后,如何进行编译源码、打包镜像、运行组件和测试验证等工作。 @@ -57,14 +57,14 @@ GOOS=${target_os} GOARCH=${target_arch} CGO_ENABLED=0 make build WHAT=yurtctl 由于在Windows上通常没有make命令(如果你没有用Cygwin的话),需要自己执行`go build`,powershell中执行方法如下(以管理员权限运行): -1. 设置环境变量 +1. 设置环境变量 在运行前需要设置环境变量`target_os`和`target_arch`,设为需要的操作系统和架构。 ```powershell $Env:GOOS = $Env:target_os $Env:GOARCH = $Env:target_arch $Env:CGO_ENABLED = 0 -$Env:GOLDFLAGS = "-s -w +$Env:GOLDFLAGS = "-s -w -X github.com/openyurtio/openyurt/pkg/projectinfo.projectPrefix=yurt -X github.com/openyurtio/openyurt/pkg/projectinfo.labelPrefix=openyurt.io -X github.com/openyurtio/openyurt/pkg/projectinfo.gitVersion=$(git describe --abbrev=0) @@ -72,7 +72,7 @@ $Env:GOLDFLAGS = "-s -w -X github.com/openyurtio/openyurt/pkg/projectinfo.buildDate=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" ``` -2. 使用go build进行编译 +2. 使用go build进行编译 运行go build命令进行编译,这里需要加上`-ldflags=$Env:GOLDFLAGS`选项,还可以通过-o来调整编译好的yurtctl的保存位置。 ```powershell @@ -81,7 +81,7 @@ go build -ldflags=$Env:GOLDFLAGS cmd/yurtctl/yurtctl.go ### 手动打包镜像 -本节描述各个组件的dockerfile,便于通过`docker build`命令来手动打包组件镜像,下面是yurtctl和yurt-node-servant的架构与基础镜像之间的关系表。 +本节描述各个组件的dockerfile,便于通过`docker build`命令来手动打包组件镜像,下面是yurtctl和yurt-node-servant的架构与基础镜像之间的关系表。 | 架构 | 基础镜像 | | ----- | ------------------ | @@ -112,7 +112,7 @@ ADD yurt-node-servant /usr/local/bin/node-servant 其他组件的基础镜像和上述两个不同。其中arch为架构名称,包括amd64,arm和arm64;component表示组件名称,包括yurthub,yurt-controller-manager,yurt-tunnel-server和yurt-tunnel-agent。 ```dockerfile -FROM k8s.gcr.io/debian-iptables-${arch}:v11.0.2 +FROM registry.k8s.io/debian-iptables-${arch}:v11.0.2 COPY ${component} /usr/local/bin/${component} ENTRYPOINT ["/usr/local/bin/${component}"] ``` @@ -183,11 +183,11 @@ $ ./_output/bin/linux/amd64/yurt-e2e-test --kubeconfig=$HOME/.kube/config --rep ## 常见问题 -1. 编译时出现"go: github.com...unknown revision xxx" +1. 编译时出现"go: github.com...unknown revision xxx" 通常是git的版本过低造成的,可以尝试升级git版本。 -2. 编译时出现"unsupported GOOS/GOARCH pair xxx/xxx" +2. 编译时出现"unsupported GOOS/GOARCH pair xxx/xxx" go不能支持所有的GOOS/GOARCH组合,如go1.17.3不支持windows/arm64。可以通过`go tool dist list`来查看支持的GOOS/GOARCH组合。 -3. 运行交叉编译的可执行二进制文件时出现"cannot execute binary file: Exec format error" +3. 运行交叉编译的可执行二进制文件时出现"cannot execute binary file: Exec format error" 通常是没有成功完成交叉编译,导致运行平台与当前平台不同,无法识别文件格式。在Windows上进行交叉编译尤其需要注意开启管理员权限。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v0.6.0/developer-manuals/how-to-build-and-test.md b/i18n/zh/docusaurus-plugin-content-docs/version-v0.6.0/developer-manuals/how-to-build-and-test.md index d221f68f11..7d40797adc 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-v0.6.0/developer-manuals/how-to-build-and-test.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v0.6.0/developer-manuals/how-to-build-and-test.md @@ -9,7 +9,7 @@ OpenYurt主仓库([openyurtio/openyurt](https://github.com/openyurtio/openyurt 3. yurt-tunnel-server 4. yurt-tunnel-agent 5. yurtctl -6. yurt-node-servant +6. yurt-node-servant 本文主要介绍了在完成对上述组件的开发后,如何进行编译源码、打包镜像、运行组件和测试验证等工作。 @@ -57,14 +57,14 @@ GOOS=${target_os} GOARCH=${target_arch} CGO_ENABLED=0 make build WHAT=yurtctl 由于在Windows上通常没有make命令(如果你没有用Cygwin的话),需要自己执行`go build`,powershell中执行方法如下(以管理员权限运行): -1. 设置环境变量 +1. 设置环境变量 在运行前需要设置环境变量`target_os`和`target_arch`,设为需要的操作系统和架构。 ```powershell $Env:GOOS = $Env:target_os $Env:GOARCH = $Env:target_arch $Env:CGO_ENABLED = 0 -$Env:GOLDFLAGS = "-s -w +$Env:GOLDFLAGS = "-s -w -X github.com/openyurtio/openyurt/pkg/projectinfo.projectPrefix=yurt -X github.com/openyurtio/openyurt/pkg/projectinfo.labelPrefix=openyurt.io -X github.com/openyurtio/openyurt/pkg/projectinfo.gitVersion=$(git describe --abbrev=0) @@ -72,7 +72,7 @@ $Env:GOLDFLAGS = "-s -w -X github.com/openyurtio/openyurt/pkg/projectinfo.buildDate=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" ``` -2. 使用go build进行编译 +2. 使用go build进行编译 运行go build命令进行编译,这里需要加上`-ldflags=$Env:GOLDFLAGS`选项,还可以通过-o来调整编译好的yurtctl的保存位置。 ```powershell @@ -81,7 +81,7 @@ go build -ldflags=$Env:GOLDFLAGS cmd/yurtctl/yurtctl.go ### 手动打包镜像 -本节描述各个组件的dockerfile,便于通过`docker build`命令来手动打包组件镜像,下面是yurtctl和yurt-node-servant的架构与基础镜像之间的关系表。 +本节描述各个组件的dockerfile,便于通过`docker build`命令来手动打包组件镜像,下面是yurtctl和yurt-node-servant的架构与基础镜像之间的关系表。 | 架构 | 基础镜像 | | ----- | ------------------ | @@ -112,7 +112,7 @@ ADD yurt-node-servant /usr/local/bin/node-servant 其他组件的基础镜像和上述两个不同。其中arch为架构名称,包括amd64,arm和arm64;component表示组件名称,包括yurthub,yurt-controller-manager,yurt-tunnel-server和yurt-tunnel-agent。 ```dockerfile -FROM k8s.gcr.io/debian-iptables-${arch}:v11.0.2 +FROM registry.k8s.io/debian-iptables-${arch}:v11.0.2 COPY ${component} /usr/local/bin/${component} ENTRYPOINT ["/usr/local/bin/${component}"] ``` @@ -183,11 +183,11 @@ $ ./_output/bin/linux/amd64/yurt-e2e-test --kubeconfig=$HOME/.kube/config --rep ## 常见问题 -1. 编译时出现"go: github.com...unknown revision xxx" +1. 编译时出现"go: github.com...unknown revision xxx" 通常是git的版本过低造成的,可以尝试升级git版本。 -2. 编译时出现"unsupported GOOS/GOARCH pair xxx/xxx" +2. 编译时出现"unsupported GOOS/GOARCH pair xxx/xxx" go不能支持所有的GOOS/GOARCH组合,如go1.17.3不支持windows/arm64。可以通过`go tool dist list`来查看支持的GOOS/GOARCH组合。 -3. 运行交叉编译的可执行二进制文件时出现"cannot execute binary file: Exec format error" +3. 运行交叉编译的可执行二进制文件时出现"cannot execute binary file: Exec format error" 通常是没有成功完成交叉编译,导致运行平台与当前平台不同,无法识别文件格式。在Windows上进行交叉编译尤其需要注意开启管理员权限。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v0.6.0/user-manuals/network/edge-ingress.md b/i18n/zh/docusaurus-plugin-content-docs/version-v0.6.0/user-manuals/network/edge-ingress.md index 7ca8544351..0a2c882aaf 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-v0.6.0/user-manuals/network/edge-ingress.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v0.6.0/user-manuals/network/edge-ingress.md @@ -34,9 +34,9 @@ YurtIngress opeator负责将nginx ingress controller编排到需要启用边缘I - name: pool01 - name: pool03 -默认为每个节点池创建的nginx ingress控制器副本数为1 -默认的ingress控制器docker image为:k8s.gcr.io/ingress-nginx/controller:v0.48.1 -默认的生成ingress控制器webhook证书的docker image为:k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v0.48.1 +默认为每个节点池创建的nginx ingress控制器副本数为1 +默认的ingress控制器docker image为:registry.k8s.io/ingress-nginx/controller:v0.48.1 +默认的生成ingress控制器webhook证书的docker image为:registry.k8s.io/ingress-nginx/kube-webhook-certgen:v0.48.1 1.2). 如果用户不想使用默认的配置,而是想对节点池做一些个性化配置,可以如下定义CR: @@ -46,15 +46,15 @@ YurtIngress opeator负责将nginx ingress controller编排到需要启用边缘I name: yurtingress-test spec: ingress_controller_replicas_per_pool: 2 - ingress_controller_image: k8s.gcr.io/ingress-nginx/controller:v0.49.0 - ingress_webhook_certgen_image: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v0.49.0 + ingress_controller_image: registry.k8s.io/ingress-nginx/controller:v0.49.0 + ingress_webhook_certgen_image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v0.49.0 pools: - name: pool01 ingress_ips: - xxx.xxx.xxx.xxx - name: pool03 -其中: +其中: `igress_controller_replicas_per_pool`/`ingress_controller_image`/`ingress_webhook_certgen_image`可供用户自定义相关默认配置, `ingress_ips` 代表如果用户想通过externalIPs的方式为某个特定的节点池对外暴露nginx ingress控制器服务的公网IP地址。 @@ -67,7 +67,7 @@ b). 在spec中,“ingress_controller_replicas_per_pool”表示部署在每个 c). 在spec中,“pools”表示要在其上开启ingress功能的节点池列表,目前支持节点池名及针对该节点池的ingress服务公网IP配置。 -2). 部署YurtIngress CR yaml文件: +2). 部署YurtIngress CR yaml文件: 假定CR文件名为yurtingress-test.yaml: #kubectl apply -f yurtingress-test.yaml @@ -194,7 +194,7 @@ a). 由哪个节点池提供ingress功能是由ingress class决定的,因此 b). 不同K8S版本的ingress CR定义可能不同,您需要确保ingress CR的定义与集群K8S版本匹配。 -2). 部署ingress规则yaml文件: +2). 部署ingress规则yaml文件: 假定yaml文件名为ingress-myapp.yaml: #kubectl apply -f ingress-myapp.yaml diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v0.6.0/user-manuals/workload/yurt-app-daemon.md b/i18n/zh/docusaurus-plugin-content-docs/version-v0.6.0/user-manuals/workload/yurt-app-daemon.md index 575584e2ac..8eec3a892b 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-v0.6.0/user-manuals/workload/yurt-app-daemon.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v0.6.0/user-manuals/workload/yurt-app-daemon.md @@ -236,7 +236,7 @@ spec: - args: - -conf - /etc/coredns/Corefile - image: k8s.gcr.io/coredns:1.6.7 + image: registry.k8s.io/coredns:1.6.7 imagePullPolicy: IfNotPresent name: coredns resources: @@ -252,7 +252,7 @@ spec: - NET_BIND_SERVICE drop: - all - readOnlyRootFilesystem: true + readOnlyRootFilesystem: true livenessProbe: failureThreshold: 5 httpGet: @@ -262,7 +262,7 @@ spec: initialDelaySeconds: 60 periodSeconds: 10 successThreshold: 1 - timeoutSeconds: 5 + timeoutSeconds: 5 volumeMounts: - mountPath: /etc/coredns name: config-volume @@ -303,7 +303,7 @@ spec: selector: k8s-app: kube-dns sessionAffinity: None - type: ClusterIP + type: ClusterIP --- apiVersion: v1 data: @@ -385,5 +385,4 @@ subjects: EOF -``` - +``` diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v0.7.0/user-manuals/network/edge-ingress.md b/i18n/zh/docusaurus-plugin-content-docs/version-v0.7.0/user-manuals/network/edge-ingress.md index 7ca8544351..0a2c882aaf 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-v0.7.0/user-manuals/network/edge-ingress.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v0.7.0/user-manuals/network/edge-ingress.md @@ -34,9 +34,9 @@ YurtIngress opeator负责将nginx ingress controller编排到需要启用边缘I - name: pool01 - name: pool03 -默认为每个节点池创建的nginx ingress控制器副本数为1 -默认的ingress控制器docker image为:k8s.gcr.io/ingress-nginx/controller:v0.48.1 -默认的生成ingress控制器webhook证书的docker image为:k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v0.48.1 +默认为每个节点池创建的nginx ingress控制器副本数为1 +默认的ingress控制器docker image为:registry.k8s.io/ingress-nginx/controller:v0.48.1 +默认的生成ingress控制器webhook证书的docker image为:registry.k8s.io/ingress-nginx/kube-webhook-certgen:v0.48.1 1.2). 如果用户不想使用默认的配置,而是想对节点池做一些个性化配置,可以如下定义CR: @@ -46,15 +46,15 @@ YurtIngress opeator负责将nginx ingress controller编排到需要启用边缘I name: yurtingress-test spec: ingress_controller_replicas_per_pool: 2 - ingress_controller_image: k8s.gcr.io/ingress-nginx/controller:v0.49.0 - ingress_webhook_certgen_image: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v0.49.0 + ingress_controller_image: registry.k8s.io/ingress-nginx/controller:v0.49.0 + ingress_webhook_certgen_image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v0.49.0 pools: - name: pool01 ingress_ips: - xxx.xxx.xxx.xxx - name: pool03 -其中: +其中: `igress_controller_replicas_per_pool`/`ingress_controller_image`/`ingress_webhook_certgen_image`可供用户自定义相关默认配置, `ingress_ips` 代表如果用户想通过externalIPs的方式为某个特定的节点池对外暴露nginx ingress控制器服务的公网IP地址。 @@ -67,7 +67,7 @@ b). 在spec中,“ingress_controller_replicas_per_pool”表示部署在每个 c). 在spec中,“pools”表示要在其上开启ingress功能的节点池列表,目前支持节点池名及针对该节点池的ingress服务公网IP配置。 -2). 部署YurtIngress CR yaml文件: +2). 部署YurtIngress CR yaml文件: 假定CR文件名为yurtingress-test.yaml: #kubectl apply -f yurtingress-test.yaml @@ -194,7 +194,7 @@ a). 由哪个节点池提供ingress功能是由ingress class决定的,因此 b). 不同K8S版本的ingress CR定义可能不同,您需要确保ingress CR的定义与集群K8S版本匹配。 -2). 部署ingress规则yaml文件: +2). 部署ingress规则yaml文件: 假定yaml文件名为ingress-myapp.yaml: #kubectl apply -f ingress-myapp.yaml diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.0/user-manuals/network/edge-ingress.md b/i18n/zh/docusaurus-plugin-content-docs/version-v1.0/user-manuals/network/edge-ingress.md index 7ca8544351..0a2c882aaf 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-v1.0/user-manuals/network/edge-ingress.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.0/user-manuals/network/edge-ingress.md @@ -34,9 +34,9 @@ YurtIngress opeator负责将nginx ingress controller编排到需要启用边缘I - name: pool01 - name: pool03 -默认为每个节点池创建的nginx ingress控制器副本数为1 -默认的ingress控制器docker image为:k8s.gcr.io/ingress-nginx/controller:v0.48.1 -默认的生成ingress控制器webhook证书的docker image为:k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v0.48.1 +默认为每个节点池创建的nginx ingress控制器副本数为1 +默认的ingress控制器docker image为:registry.k8s.io/ingress-nginx/controller:v0.48.1 +默认的生成ingress控制器webhook证书的docker image为:registry.k8s.io/ingress-nginx/kube-webhook-certgen:v0.48.1 1.2). 如果用户不想使用默认的配置,而是想对节点池做一些个性化配置,可以如下定义CR: @@ -46,15 +46,15 @@ YurtIngress opeator负责将nginx ingress controller编排到需要启用边缘I name: yurtingress-test spec: ingress_controller_replicas_per_pool: 2 - ingress_controller_image: k8s.gcr.io/ingress-nginx/controller:v0.49.0 - ingress_webhook_certgen_image: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v0.49.0 + ingress_controller_image: registry.k8s.io/ingress-nginx/controller:v0.49.0 + ingress_webhook_certgen_image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v0.49.0 pools: - name: pool01 ingress_ips: - xxx.xxx.xxx.xxx - name: pool03 -其中: +其中: `igress_controller_replicas_per_pool`/`ingress_controller_image`/`ingress_webhook_certgen_image`可供用户自定义相关默认配置, `ingress_ips` 代表如果用户想通过externalIPs的方式为某个特定的节点池对外暴露nginx ingress控制器服务的公网IP地址。 @@ -67,7 +67,7 @@ b). 在spec中,“ingress_controller_replicas_per_pool”表示部署在每个 c). 在spec中,“pools”表示要在其上开启ingress功能的节点池列表,目前支持节点池名及针对该节点池的ingress服务公网IP配置。 -2). 部署YurtIngress CR yaml文件: +2). 部署YurtIngress CR yaml文件: 假定CR文件名为yurtingress-test.yaml: #kubectl apply -f yurtingress-test.yaml @@ -194,7 +194,7 @@ a). 由哪个节点池提供ingress功能是由ingress class决定的,因此 b). 不同K8S版本的ingress CR定义可能不同,您需要确保ingress CR的定义与集群K8S版本匹配。 -2). 部署ingress规则yaml文件: +2). 部署ingress规则yaml文件: 假定yaml文件名为ingress-myapp.yaml: #kubectl apply -f ingress-myapp.yaml diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.1/user-manuals/network/edge-ingress.md b/i18n/zh/docusaurus-plugin-content-docs/version-v1.1/user-manuals/network/edge-ingress.md index 7ca8544351..0a2c882aaf 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-v1.1/user-manuals/network/edge-ingress.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.1/user-manuals/network/edge-ingress.md @@ -34,9 +34,9 @@ YurtIngress opeator负责将nginx ingress controller编排到需要启用边缘I - name: pool01 - name: pool03 -默认为每个节点池创建的nginx ingress控制器副本数为1 -默认的ingress控制器docker image为:k8s.gcr.io/ingress-nginx/controller:v0.48.1 -默认的生成ingress控制器webhook证书的docker image为:k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v0.48.1 +默认为每个节点池创建的nginx ingress控制器副本数为1 +默认的ingress控制器docker image为:registry.k8s.io/ingress-nginx/controller:v0.48.1 +默认的生成ingress控制器webhook证书的docker image为:registry.k8s.io/ingress-nginx/kube-webhook-certgen:v0.48.1 1.2). 如果用户不想使用默认的配置,而是想对节点池做一些个性化配置,可以如下定义CR: @@ -46,15 +46,15 @@ YurtIngress opeator负责将nginx ingress controller编排到需要启用边缘I name: yurtingress-test spec: ingress_controller_replicas_per_pool: 2 - ingress_controller_image: k8s.gcr.io/ingress-nginx/controller:v0.49.0 - ingress_webhook_certgen_image: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v0.49.0 + ingress_controller_image: registry.k8s.io/ingress-nginx/controller:v0.49.0 + ingress_webhook_certgen_image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v0.49.0 pools: - name: pool01 ingress_ips: - xxx.xxx.xxx.xxx - name: pool03 -其中: +其中: `igress_controller_replicas_per_pool`/`ingress_controller_image`/`ingress_webhook_certgen_image`可供用户自定义相关默认配置, `ingress_ips` 代表如果用户想通过externalIPs的方式为某个特定的节点池对外暴露nginx ingress控制器服务的公网IP地址。 @@ -67,7 +67,7 @@ b). 在spec中,“ingress_controller_replicas_per_pool”表示部署在每个 c). 在spec中,“pools”表示要在其上开启ingress功能的节点池列表,目前支持节点池名及针对该节点池的ingress服务公网IP配置。 -2). 部署YurtIngress CR yaml文件: +2). 部署YurtIngress CR yaml文件: 假定CR文件名为yurtingress-test.yaml: #kubectl apply -f yurtingress-test.yaml @@ -194,7 +194,7 @@ a). 由哪个节点池提供ingress功能是由ingress class决定的,因此 b). 不同K8S版本的ingress CR定义可能不同,您需要确保ingress CR的定义与集群K8S版本匹配。 -2). 部署ingress规则yaml文件: +2). 部署ingress规则yaml文件: 假定yaml文件名为ingress-myapp.yaml: #kubectl apply -f ingress-myapp.yaml diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.2/user-manuals/network/edge-ingress.md b/i18n/zh/docusaurus-plugin-content-docs/version-v1.2/user-manuals/network/edge-ingress.md index 7ca8544351..0a2c882aaf 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-v1.2/user-manuals/network/edge-ingress.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.2/user-manuals/network/edge-ingress.md @@ -34,9 +34,9 @@ YurtIngress opeator负责将nginx ingress controller编排到需要启用边缘I - name: pool01 - name: pool03 -默认为每个节点池创建的nginx ingress控制器副本数为1 -默认的ingress控制器docker image为:k8s.gcr.io/ingress-nginx/controller:v0.48.1 -默认的生成ingress控制器webhook证书的docker image为:k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v0.48.1 +默认为每个节点池创建的nginx ingress控制器副本数为1 +默认的ingress控制器docker image为:registry.k8s.io/ingress-nginx/controller:v0.48.1 +默认的生成ingress控制器webhook证书的docker image为:registry.k8s.io/ingress-nginx/kube-webhook-certgen:v0.48.1 1.2). 如果用户不想使用默认的配置,而是想对节点池做一些个性化配置,可以如下定义CR: @@ -46,15 +46,15 @@ YurtIngress opeator负责将nginx ingress controller编排到需要启用边缘I name: yurtingress-test spec: ingress_controller_replicas_per_pool: 2 - ingress_controller_image: k8s.gcr.io/ingress-nginx/controller:v0.49.0 - ingress_webhook_certgen_image: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v0.49.0 + ingress_controller_image: registry.k8s.io/ingress-nginx/controller:v0.49.0 + ingress_webhook_certgen_image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v0.49.0 pools: - name: pool01 ingress_ips: - xxx.xxx.xxx.xxx - name: pool03 -其中: +其中: `igress_controller_replicas_per_pool`/`ingress_controller_image`/`ingress_webhook_certgen_image`可供用户自定义相关默认配置, `ingress_ips` 代表如果用户想通过externalIPs的方式为某个特定的节点池对外暴露nginx ingress控制器服务的公网IP地址。 @@ -67,7 +67,7 @@ b). 在spec中,“ingress_controller_replicas_per_pool”表示部署在每个 c). 在spec中,“pools”表示要在其上开启ingress功能的节点池列表,目前支持节点池名及针对该节点池的ingress服务公网IP配置。 -2). 部署YurtIngress CR yaml文件: +2). 部署YurtIngress CR yaml文件: 假定CR文件名为yurtingress-test.yaml: #kubectl apply -f yurtingress-test.yaml @@ -194,7 +194,7 @@ a). 由哪个节点池提供ingress功能是由ingress class决定的,因此 b). 不同K8S版本的ingress CR定义可能不同,您需要确保ingress CR的定义与集群K8S版本匹配。 -2). 部署ingress规则yaml文件: +2). 部署ingress规则yaml文件: 假定yaml文件名为ingress-myapp.yaml: #kubectl apply -f ingress-myapp.yaml diff --git a/versioned_docs/version-v0.5.0/developer-manuals/how-to-build-and-test.md b/versioned_docs/version-v0.5.0/developer-manuals/how-to-build-and-test.md index c0578e1aa7..d0225ce288 100644 --- a/versioned_docs/version-v0.5.0/developer-manuals/how-to-build-and-test.md +++ b/versioned_docs/version-v0.5.0/developer-manuals/how-to-build-and-test.md @@ -21,8 +21,8 @@ Many approaches of building OpenYurt have been provided in the [Makefile](https: make release WHAT="${components[@]}" ARCH="${archs[@]}" REGION="${your_region}" ``` -`WHAT` represents components that you want to build, as mentioned at the beginning of the artical. -`ARCH` represents archtectures of target platforms, including `amd64`, `arm` and `arm64`. +`WHAT` represents components that you want to build, as mentioned at the beginning of the artical. +`ARCH` represents archtectures of target platforms, including `amd64`, `arm` and `arm64`. `REGION` will affect the `GOPROXY` when compiling the code. Currently, `cn` and `us` are supported, representing using `GOPROXY=https://goproxy.cn` and `GOPROXY=https://proxy.golang.org` respectively. Its default value is `us`. It's recommanded that developers in China should always set `REGION=cn` explicitly to ensure the successful build. eg. @@ -64,7 +64,7 @@ Because there's no make command on Windows(if you don't have Cygwin), we have to $Env:GOOS = $Env:target_os $Env:GOARCH = $Env:target_arch $Env:CGO_ENABLED = 0 -$Env:GOLDFLAGS = "-s -w +$Env:GOLDFLAGS = "-s -w -X github.com/openyurtio/openyurt/pkg/projectinfo.projectPrefix=yurt -X github.com/openyurtio/openyurt/pkg/projectinfo.labelPrefix=openyurt.io -X github.com/openyurtio/openyurt/pkg/projectinfo.gitVersion=$(git describe --abbrev=0) @@ -81,7 +81,7 @@ go build -ldflags=$Env:GOLDFLAGS cmd/yurtctl/yurtctl.go ### Build images manually -In this section, we can find the dockerfile for each component. It will help you use `docker build` to build images. Here's the table giving base images `yurtctl` and `yurt-node-servant` will use. +In this section, we can find the dockerfile for each component. It will help you use `docker build` to build images. Here's the table giving base images `yurtctl` and `yurt-node-servant` will use. | Arch | Base Image | | ----- | ------------------ | @@ -110,7 +110,7 @@ ADD yurt-node-servant /usr/local/bin/node-servant Other components use the different base image. We use `${arch}` to represent the target arch(including amd64, arm and arm64), `${component}` to represent the component to built(as mentioned at the beginning of this artical). Then the dockerfile is as follows: ```dockerfile -FROM k8s.gcr.io/debian-iptables-${arch}:v11.0.2 +FROM registry.k8s.io/debian-iptables-${arch}:v11.0.2 COPY ${component} /usr/local/bin/${component} ENTRYPOINT ["/usr/local/bin/${component}"] ``` @@ -177,18 +177,18 @@ $ ./_output/bin/linux/amd64/yurt-e2e-test --kubeconfig=$HOME/.kube/config --rep 3) If you want to test yurt node autonomy on aliyun ecs or aliyun ens with binary of yurt-e2e-test, TBD. -Note: +Note: The path of yurt-e2e-test binary depends on the platform of your local host. Finally, you can check test result in stdout or in file yurt-e2e-test-report_01.xml(specified by the `--report-dir` option). ## Troubleshooting -1. "go: github.com...unknown revision xxx" occurs during build +1. "go: github.com...unknown revision xxx" occurs during build It's often caused for too low git version on your host. You can try to update git. -2. "unsupported GOOS/GOARCH pair xxx/xxx" occurs during compilation +2. "unsupported GOOS/GOARCH pair xxx/xxx" occurs during compilation Not all GOOS/GOARCH pairs are supported by go, such as go1.17.3 cannot support windows/arm64. You can check all supported pairs through `go tool dist list`. -3. "cannot execute binary file: Exec format error" occurs when running binaries built on other platform. +3. "cannot execute binary file: Exec format error" occurs when running binaries built on other platform. It's often caused by an unsuccessful cross compilation, and the OS cannot recoginze the file format. When you run cross compilation on Windows, please ensure that you run it as an administractor. diff --git a/versioned_docs/version-v0.6.0/developer-manuals/how-to-build-and-test.md b/versioned_docs/version-v0.6.0/developer-manuals/how-to-build-and-test.md index c0578e1aa7..d0225ce288 100644 --- a/versioned_docs/version-v0.6.0/developer-manuals/how-to-build-and-test.md +++ b/versioned_docs/version-v0.6.0/developer-manuals/how-to-build-and-test.md @@ -21,8 +21,8 @@ Many approaches of building OpenYurt have been provided in the [Makefile](https: make release WHAT="${components[@]}" ARCH="${archs[@]}" REGION="${your_region}" ``` -`WHAT` represents components that you want to build, as mentioned at the beginning of the artical. -`ARCH` represents archtectures of target platforms, including `amd64`, `arm` and `arm64`. +`WHAT` represents components that you want to build, as mentioned at the beginning of the artical. +`ARCH` represents archtectures of target platforms, including `amd64`, `arm` and `arm64`. `REGION` will affect the `GOPROXY` when compiling the code. Currently, `cn` and `us` are supported, representing using `GOPROXY=https://goproxy.cn` and `GOPROXY=https://proxy.golang.org` respectively. Its default value is `us`. It's recommanded that developers in China should always set `REGION=cn` explicitly to ensure the successful build. eg. @@ -64,7 +64,7 @@ Because there's no make command on Windows(if you don't have Cygwin), we have to $Env:GOOS = $Env:target_os $Env:GOARCH = $Env:target_arch $Env:CGO_ENABLED = 0 -$Env:GOLDFLAGS = "-s -w +$Env:GOLDFLAGS = "-s -w -X github.com/openyurtio/openyurt/pkg/projectinfo.projectPrefix=yurt -X github.com/openyurtio/openyurt/pkg/projectinfo.labelPrefix=openyurt.io -X github.com/openyurtio/openyurt/pkg/projectinfo.gitVersion=$(git describe --abbrev=0) @@ -81,7 +81,7 @@ go build -ldflags=$Env:GOLDFLAGS cmd/yurtctl/yurtctl.go ### Build images manually -In this section, we can find the dockerfile for each component. It will help you use `docker build` to build images. Here's the table giving base images `yurtctl` and `yurt-node-servant` will use. +In this section, we can find the dockerfile for each component. It will help you use `docker build` to build images. Here's the table giving base images `yurtctl` and `yurt-node-servant` will use. | Arch | Base Image | | ----- | ------------------ | @@ -110,7 +110,7 @@ ADD yurt-node-servant /usr/local/bin/node-servant Other components use the different base image. We use `${arch}` to represent the target arch(including amd64, arm and arm64), `${component}` to represent the component to built(as mentioned at the beginning of this artical). Then the dockerfile is as follows: ```dockerfile -FROM k8s.gcr.io/debian-iptables-${arch}:v11.0.2 +FROM registry.k8s.io/debian-iptables-${arch}:v11.0.2 COPY ${component} /usr/local/bin/${component} ENTRYPOINT ["/usr/local/bin/${component}"] ``` @@ -177,18 +177,18 @@ $ ./_output/bin/linux/amd64/yurt-e2e-test --kubeconfig=$HOME/.kube/config --rep 3) If you want to test yurt node autonomy on aliyun ecs or aliyun ens with binary of yurt-e2e-test, TBD. -Note: +Note: The path of yurt-e2e-test binary depends on the platform of your local host. Finally, you can check test result in stdout or in file yurt-e2e-test-report_01.xml(specified by the `--report-dir` option). ## Troubleshooting -1. "go: github.com...unknown revision xxx" occurs during build +1. "go: github.com...unknown revision xxx" occurs during build It's often caused for too low git version on your host. You can try to update git. -2. "unsupported GOOS/GOARCH pair xxx/xxx" occurs during compilation +2. "unsupported GOOS/GOARCH pair xxx/xxx" occurs during compilation Not all GOOS/GOARCH pairs are supported by go, such as go1.17.3 cannot support windows/arm64. You can check all supported pairs through `go tool dist list`. -3. "cannot execute binary file: Exec format error" occurs when running binaries built on other platform. +3. "cannot execute binary file: Exec format error" occurs when running binaries built on other platform. It's often caused by an unsuccessful cross compilation, and the OS cannot recoginze the file format. When you run cross compilation on Windows, please ensure that you run it as an administractor. diff --git a/versioned_docs/version-v0.6.0/user-manuals/network/edge-ingress.md b/versioned_docs/version-v0.6.0/user-manuals/network/edge-ingress.md index 7bea6fda56..acf3b66490 100644 --- a/versioned_docs/version-v0.6.0/user-manuals/network/edge-ingress.md +++ b/versioned_docs/version-v0.6.0/user-manuals/network/edge-ingress.md @@ -20,7 +20,7 @@ YurtIngress operator is responsible for orchestrating multi ingress controllers Suppose you have created 4 NodePools in your OpenYurt cluster: pool01, pool02, pool03, pool04, and you want to enable edge ingress feature on pool01 and pool03, you can create the YurtIngress CR as below: -1). Create the YurtIngress CR yaml file: +1). Create the YurtIngress CR yaml file: 1.1). A simple CR definition with some default configurations: @@ -33,8 +33,8 @@ enable edge ingress feature on pool01 and pool03, you can create the YurtIngress - name: pool01 - name: pool03 -The default nginx ingress controller replicas per pool is 1. -The default nginx ingress controller image is controller:v0.48.1 from dockerhub. +The default nginx ingress controller replicas per pool is 1. +The default nginx ingress controller image is controller:v0.48.1 from dockerhub. The default nginx ingress webhook certgen image is kube-webhook-certgen:v0.48.1 from dockerhub. 1.2). If users want to make personalized configurations about the default options, the YurtIngress CR can be defined as below: @@ -45,8 +45,8 @@ The default nginx ingress webhook certgen image is kube-webhook-certgen:v0.48.1 name: yurtingress-test spec: ingress_controller_replicas_per_pool: 2 - ingress_controller_image: k8s.gcr.io/ingress-nginx/controller:v0.49.0 - ingress_webhook_certgen_image: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v0.49.0 + ingress_controller_image: registry.k8s.io/ingress-nginx/controller:v0.49.0 + ingress_webhook_certgen_image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v0.49.0 pools: - name: pool01 ingress_ips: @@ -67,7 +67,7 @@ c). In spec, the "pools" represents the pools list on which you want to enable i Currently it supports the pool name and the nginx ingress controller service externalIPs. -2). Apply the YurtIngress CR yaml file: +2). Apply the YurtIngress CR yaml file: Assume the file name is yurtingress-test.yaml: #kubectl apply -f yurtingress-test.yaml @@ -172,7 +172,7 @@ Suppose your app workload is deployed to several NodePools and it exposes a glob If you want to access the service provided by pool01: -1). Create the ingress rule yaml file: +1). Create the ingress rule yaml file: apiVersion: extensions/v1beta1 kind: Ingress @@ -197,7 +197,7 @@ a). Ingress class decides which NodePool to provide the ingress capability, so y b). The ingress CR definition may be different for different K8S versions, so you need ensure the CR definition matches with your cluster K8S version. -2). Apply the ingress rule yaml file: +2). Apply the ingress rule yaml file: Assume the file name is ingress-myapp.yaml: #kubectl apply -f ingress-myapp.yaml diff --git a/versioned_docs/version-v0.6.0/user-manuals/workload/yurt-app-daemon.md b/versioned_docs/version-v0.6.0/user-manuals/workload/yurt-app-daemon.md index 19c8508ff1..0538126d03 100644 --- a/versioned_docs/version-v0.6.0/user-manuals/workload/yurt-app-daemon.md +++ b/versioned_docs/version-v0.6.0/user-manuals/workload/yurt-app-daemon.md @@ -238,7 +238,7 @@ spec: - args: - -conf - /etc/coredns/Corefile - image: k8s.gcr.io/coredns:1.6.7 + image: registry.k8s.io/coredns:1.6.7 imagePullPolicy: IfNotPresent name: coredns resources: @@ -254,7 +254,7 @@ spec: - NET_BIND_SERVICE drop: - all - readOnlyRootFilesystem: true + readOnlyRootFilesystem: true livenessProbe: failureThreshold: 5 httpGet: @@ -264,7 +264,7 @@ spec: initialDelaySeconds: 60 periodSeconds: 10 successThreshold: 1 - timeoutSeconds: 5 + timeoutSeconds: 5 volumeMounts: - mountPath: /etc/coredns name: config-volume @@ -305,7 +305,7 @@ spec: selector: k8s-app: kube-dns sessionAffinity: None - type: ClusterIP + type: ClusterIP --- apiVersion: v1 data: @@ -387,5 +387,4 @@ subjects: EOF -``` - +``` diff --git a/versioned_docs/version-v0.7.0/user-manuals/network/edge-ingress.md b/versioned_docs/version-v0.7.0/user-manuals/network/edge-ingress.md index 7bea6fda56..acf3b66490 100644 --- a/versioned_docs/version-v0.7.0/user-manuals/network/edge-ingress.md +++ b/versioned_docs/version-v0.7.0/user-manuals/network/edge-ingress.md @@ -20,7 +20,7 @@ YurtIngress operator is responsible for orchestrating multi ingress controllers Suppose you have created 4 NodePools in your OpenYurt cluster: pool01, pool02, pool03, pool04, and you want to enable edge ingress feature on pool01 and pool03, you can create the YurtIngress CR as below: -1). Create the YurtIngress CR yaml file: +1). Create the YurtIngress CR yaml file: 1.1). A simple CR definition with some default configurations: @@ -33,8 +33,8 @@ enable edge ingress feature on pool01 and pool03, you can create the YurtIngress - name: pool01 - name: pool03 -The default nginx ingress controller replicas per pool is 1. -The default nginx ingress controller image is controller:v0.48.1 from dockerhub. +The default nginx ingress controller replicas per pool is 1. +The default nginx ingress controller image is controller:v0.48.1 from dockerhub. The default nginx ingress webhook certgen image is kube-webhook-certgen:v0.48.1 from dockerhub. 1.2). If users want to make personalized configurations about the default options, the YurtIngress CR can be defined as below: @@ -45,8 +45,8 @@ The default nginx ingress webhook certgen image is kube-webhook-certgen:v0.48.1 name: yurtingress-test spec: ingress_controller_replicas_per_pool: 2 - ingress_controller_image: k8s.gcr.io/ingress-nginx/controller:v0.49.0 - ingress_webhook_certgen_image: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v0.49.0 + ingress_controller_image: registry.k8s.io/ingress-nginx/controller:v0.49.0 + ingress_webhook_certgen_image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v0.49.0 pools: - name: pool01 ingress_ips: @@ -67,7 +67,7 @@ c). In spec, the "pools" represents the pools list on which you want to enable i Currently it supports the pool name and the nginx ingress controller service externalIPs. -2). Apply the YurtIngress CR yaml file: +2). Apply the YurtIngress CR yaml file: Assume the file name is yurtingress-test.yaml: #kubectl apply -f yurtingress-test.yaml @@ -172,7 +172,7 @@ Suppose your app workload is deployed to several NodePools and it exposes a glob If you want to access the service provided by pool01: -1). Create the ingress rule yaml file: +1). Create the ingress rule yaml file: apiVersion: extensions/v1beta1 kind: Ingress @@ -197,7 +197,7 @@ a). Ingress class decides which NodePool to provide the ingress capability, so y b). The ingress CR definition may be different for different K8S versions, so you need ensure the CR definition matches with your cluster K8S version. -2). Apply the ingress rule yaml file: +2). Apply the ingress rule yaml file: Assume the file name is ingress-myapp.yaml: #kubectl apply -f ingress-myapp.yaml diff --git a/versioned_docs/version-v0.7.0/user-manuals/workload/yurt-app-daemon.md b/versioned_docs/version-v0.7.0/user-manuals/workload/yurt-app-daemon.md index fe8fc5036a..1cefb4ab1f 100644 --- a/versioned_docs/version-v0.7.0/user-manuals/workload/yurt-app-daemon.md +++ b/versioned_docs/version-v0.7.0/user-manuals/workload/yurt-app-daemon.md @@ -235,7 +235,7 @@ spec: - args: - -conf - /etc/coredns/Corefile - image: k8s.gcr.io/coredns:1.6.7 + image: registry.k8s.io/coredns:1.6.7 imagePullPolicy: IfNotPresent name: coredns resources: @@ -251,7 +251,7 @@ spec: - NET_BIND_SERVICE drop: - all - readOnlyRootFilesystem: true + readOnlyRootFilesystem: true livenessProbe: failureThreshold: 5 httpGet: @@ -261,7 +261,7 @@ spec: initialDelaySeconds: 60 periodSeconds: 10 successThreshold: 1 - timeoutSeconds: 5 + timeoutSeconds: 5 volumeMounts: - mountPath: /etc/coredns name: config-volume @@ -302,7 +302,7 @@ spec: selector: k8s-app: kube-dns sessionAffinity: None - type: ClusterIP + type: ClusterIP --- apiVersion: v1 data: @@ -384,4 +384,4 @@ subjects: EOF -``` +``` diff --git a/versioned_docs/version-v1.0/user-manuals/network/edge-ingress.md b/versioned_docs/version-v1.0/user-manuals/network/edge-ingress.md index 7bea6fda56..acf3b66490 100644 --- a/versioned_docs/version-v1.0/user-manuals/network/edge-ingress.md +++ b/versioned_docs/version-v1.0/user-manuals/network/edge-ingress.md @@ -20,7 +20,7 @@ YurtIngress operator is responsible for orchestrating multi ingress controllers Suppose you have created 4 NodePools in your OpenYurt cluster: pool01, pool02, pool03, pool04, and you want to enable edge ingress feature on pool01 and pool03, you can create the YurtIngress CR as below: -1). Create the YurtIngress CR yaml file: +1). Create the YurtIngress CR yaml file: 1.1). A simple CR definition with some default configurations: @@ -33,8 +33,8 @@ enable edge ingress feature on pool01 and pool03, you can create the YurtIngress - name: pool01 - name: pool03 -The default nginx ingress controller replicas per pool is 1. -The default nginx ingress controller image is controller:v0.48.1 from dockerhub. +The default nginx ingress controller replicas per pool is 1. +The default nginx ingress controller image is controller:v0.48.1 from dockerhub. The default nginx ingress webhook certgen image is kube-webhook-certgen:v0.48.1 from dockerhub. 1.2). If users want to make personalized configurations about the default options, the YurtIngress CR can be defined as below: @@ -45,8 +45,8 @@ The default nginx ingress webhook certgen image is kube-webhook-certgen:v0.48.1 name: yurtingress-test spec: ingress_controller_replicas_per_pool: 2 - ingress_controller_image: k8s.gcr.io/ingress-nginx/controller:v0.49.0 - ingress_webhook_certgen_image: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v0.49.0 + ingress_controller_image: registry.k8s.io/ingress-nginx/controller:v0.49.0 + ingress_webhook_certgen_image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v0.49.0 pools: - name: pool01 ingress_ips: @@ -67,7 +67,7 @@ c). In spec, the "pools" represents the pools list on which you want to enable i Currently it supports the pool name and the nginx ingress controller service externalIPs. -2). Apply the YurtIngress CR yaml file: +2). Apply the YurtIngress CR yaml file: Assume the file name is yurtingress-test.yaml: #kubectl apply -f yurtingress-test.yaml @@ -172,7 +172,7 @@ Suppose your app workload is deployed to several NodePools and it exposes a glob If you want to access the service provided by pool01: -1). Create the ingress rule yaml file: +1). Create the ingress rule yaml file: apiVersion: extensions/v1beta1 kind: Ingress @@ -197,7 +197,7 @@ a). Ingress class decides which NodePool to provide the ingress capability, so y b). The ingress CR definition may be different for different K8S versions, so you need ensure the CR definition matches with your cluster K8S version. -2). Apply the ingress rule yaml file: +2). Apply the ingress rule yaml file: Assume the file name is ingress-myapp.yaml: #kubectl apply -f ingress-myapp.yaml diff --git a/versioned_docs/version-v1.0/user-manuals/workload/yurt-app-daemon.md b/versioned_docs/version-v1.0/user-manuals/workload/yurt-app-daemon.md index fe8fc5036a..1cefb4ab1f 100644 --- a/versioned_docs/version-v1.0/user-manuals/workload/yurt-app-daemon.md +++ b/versioned_docs/version-v1.0/user-manuals/workload/yurt-app-daemon.md @@ -235,7 +235,7 @@ spec: - args: - -conf - /etc/coredns/Corefile - image: k8s.gcr.io/coredns:1.6.7 + image: registry.k8s.io/coredns:1.6.7 imagePullPolicy: IfNotPresent name: coredns resources: @@ -251,7 +251,7 @@ spec: - NET_BIND_SERVICE drop: - all - readOnlyRootFilesystem: true + readOnlyRootFilesystem: true livenessProbe: failureThreshold: 5 httpGet: @@ -261,7 +261,7 @@ spec: initialDelaySeconds: 60 periodSeconds: 10 successThreshold: 1 - timeoutSeconds: 5 + timeoutSeconds: 5 volumeMounts: - mountPath: /etc/coredns name: config-volume @@ -302,7 +302,7 @@ spec: selector: k8s-app: kube-dns sessionAffinity: None - type: ClusterIP + type: ClusterIP --- apiVersion: v1 data: @@ -384,4 +384,4 @@ subjects: EOF -``` +``` diff --git a/versioned_docs/version-v1.1/user-manuals/network/edge-ingress.md b/versioned_docs/version-v1.1/user-manuals/network/edge-ingress.md index 7bea6fda56..acf3b66490 100644 --- a/versioned_docs/version-v1.1/user-manuals/network/edge-ingress.md +++ b/versioned_docs/version-v1.1/user-manuals/network/edge-ingress.md @@ -20,7 +20,7 @@ YurtIngress operator is responsible for orchestrating multi ingress controllers Suppose you have created 4 NodePools in your OpenYurt cluster: pool01, pool02, pool03, pool04, and you want to enable edge ingress feature on pool01 and pool03, you can create the YurtIngress CR as below: -1). Create the YurtIngress CR yaml file: +1). Create the YurtIngress CR yaml file: 1.1). A simple CR definition with some default configurations: @@ -33,8 +33,8 @@ enable edge ingress feature on pool01 and pool03, you can create the YurtIngress - name: pool01 - name: pool03 -The default nginx ingress controller replicas per pool is 1. -The default nginx ingress controller image is controller:v0.48.1 from dockerhub. +The default nginx ingress controller replicas per pool is 1. +The default nginx ingress controller image is controller:v0.48.1 from dockerhub. The default nginx ingress webhook certgen image is kube-webhook-certgen:v0.48.1 from dockerhub. 1.2). If users want to make personalized configurations about the default options, the YurtIngress CR can be defined as below: @@ -45,8 +45,8 @@ The default nginx ingress webhook certgen image is kube-webhook-certgen:v0.48.1 name: yurtingress-test spec: ingress_controller_replicas_per_pool: 2 - ingress_controller_image: k8s.gcr.io/ingress-nginx/controller:v0.49.0 - ingress_webhook_certgen_image: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v0.49.0 + ingress_controller_image: registry.k8s.io/ingress-nginx/controller:v0.49.0 + ingress_webhook_certgen_image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v0.49.0 pools: - name: pool01 ingress_ips: @@ -67,7 +67,7 @@ c). In spec, the "pools" represents the pools list on which you want to enable i Currently it supports the pool name and the nginx ingress controller service externalIPs. -2). Apply the YurtIngress CR yaml file: +2). Apply the YurtIngress CR yaml file: Assume the file name is yurtingress-test.yaml: #kubectl apply -f yurtingress-test.yaml @@ -172,7 +172,7 @@ Suppose your app workload is deployed to several NodePools and it exposes a glob If you want to access the service provided by pool01: -1). Create the ingress rule yaml file: +1). Create the ingress rule yaml file: apiVersion: extensions/v1beta1 kind: Ingress @@ -197,7 +197,7 @@ a). Ingress class decides which NodePool to provide the ingress capability, so y b). The ingress CR definition may be different for different K8S versions, so you need ensure the CR definition matches with your cluster K8S version. -2). Apply the ingress rule yaml file: +2). Apply the ingress rule yaml file: Assume the file name is ingress-myapp.yaml: #kubectl apply -f ingress-myapp.yaml diff --git a/versioned_docs/version-v1.1/user-manuals/workload/yurt-app-daemon.md b/versioned_docs/version-v1.1/user-manuals/workload/yurt-app-daemon.md index fe8fc5036a..1cefb4ab1f 100644 --- a/versioned_docs/version-v1.1/user-manuals/workload/yurt-app-daemon.md +++ b/versioned_docs/version-v1.1/user-manuals/workload/yurt-app-daemon.md @@ -235,7 +235,7 @@ spec: - args: - -conf - /etc/coredns/Corefile - image: k8s.gcr.io/coredns:1.6.7 + image: registry.k8s.io/coredns:1.6.7 imagePullPolicy: IfNotPresent name: coredns resources: @@ -251,7 +251,7 @@ spec: - NET_BIND_SERVICE drop: - all - readOnlyRootFilesystem: true + readOnlyRootFilesystem: true livenessProbe: failureThreshold: 5 httpGet: @@ -261,7 +261,7 @@ spec: initialDelaySeconds: 60 periodSeconds: 10 successThreshold: 1 - timeoutSeconds: 5 + timeoutSeconds: 5 volumeMounts: - mountPath: /etc/coredns name: config-volume @@ -302,7 +302,7 @@ spec: selector: k8s-app: kube-dns sessionAffinity: None - type: ClusterIP + type: ClusterIP --- apiVersion: v1 data: @@ -384,4 +384,4 @@ subjects: EOF -``` +``` diff --git a/versioned_docs/version-v1.2/user-manuals/network/edge-ingress.md b/versioned_docs/version-v1.2/user-manuals/network/edge-ingress.md index 7bea6fda56..acf3b66490 100644 --- a/versioned_docs/version-v1.2/user-manuals/network/edge-ingress.md +++ b/versioned_docs/version-v1.2/user-manuals/network/edge-ingress.md @@ -20,7 +20,7 @@ YurtIngress operator is responsible for orchestrating multi ingress controllers Suppose you have created 4 NodePools in your OpenYurt cluster: pool01, pool02, pool03, pool04, and you want to enable edge ingress feature on pool01 and pool03, you can create the YurtIngress CR as below: -1). Create the YurtIngress CR yaml file: +1). Create the YurtIngress CR yaml file: 1.1). A simple CR definition with some default configurations: @@ -33,8 +33,8 @@ enable edge ingress feature on pool01 and pool03, you can create the YurtIngress - name: pool01 - name: pool03 -The default nginx ingress controller replicas per pool is 1. -The default nginx ingress controller image is controller:v0.48.1 from dockerhub. +The default nginx ingress controller replicas per pool is 1. +The default nginx ingress controller image is controller:v0.48.1 from dockerhub. The default nginx ingress webhook certgen image is kube-webhook-certgen:v0.48.1 from dockerhub. 1.2). If users want to make personalized configurations about the default options, the YurtIngress CR can be defined as below: @@ -45,8 +45,8 @@ The default nginx ingress webhook certgen image is kube-webhook-certgen:v0.48.1 name: yurtingress-test spec: ingress_controller_replicas_per_pool: 2 - ingress_controller_image: k8s.gcr.io/ingress-nginx/controller:v0.49.0 - ingress_webhook_certgen_image: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v0.49.0 + ingress_controller_image: registry.k8s.io/ingress-nginx/controller:v0.49.0 + ingress_webhook_certgen_image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v0.49.0 pools: - name: pool01 ingress_ips: @@ -67,7 +67,7 @@ c). In spec, the "pools" represents the pools list on which you want to enable i Currently it supports the pool name and the nginx ingress controller service externalIPs. -2). Apply the YurtIngress CR yaml file: +2). Apply the YurtIngress CR yaml file: Assume the file name is yurtingress-test.yaml: #kubectl apply -f yurtingress-test.yaml @@ -172,7 +172,7 @@ Suppose your app workload is deployed to several NodePools and it exposes a glob If you want to access the service provided by pool01: -1). Create the ingress rule yaml file: +1). Create the ingress rule yaml file: apiVersion: extensions/v1beta1 kind: Ingress @@ -197,7 +197,7 @@ a). Ingress class decides which NodePool to provide the ingress capability, so y b). The ingress CR definition may be different for different K8S versions, so you need ensure the CR definition matches with your cluster K8S version. -2). Apply the ingress rule yaml file: +2). Apply the ingress rule yaml file: Assume the file name is ingress-myapp.yaml: #kubectl apply -f ingress-myapp.yaml diff --git a/versioned_docs/version-v1.2/user-manuals/workload/yurt-app-daemon.md b/versioned_docs/version-v1.2/user-manuals/workload/yurt-app-daemon.md index fe8fc5036a..1cefb4ab1f 100644 --- a/versioned_docs/version-v1.2/user-manuals/workload/yurt-app-daemon.md +++ b/versioned_docs/version-v1.2/user-manuals/workload/yurt-app-daemon.md @@ -235,7 +235,7 @@ spec: - args: - -conf - /etc/coredns/Corefile - image: k8s.gcr.io/coredns:1.6.7 + image: registry.k8s.io/coredns:1.6.7 imagePullPolicy: IfNotPresent name: coredns resources: @@ -251,7 +251,7 @@ spec: - NET_BIND_SERVICE drop: - all - readOnlyRootFilesystem: true + readOnlyRootFilesystem: true livenessProbe: failureThreshold: 5 httpGet: @@ -261,7 +261,7 @@ spec: initialDelaySeconds: 60 periodSeconds: 10 successThreshold: 1 - timeoutSeconds: 5 + timeoutSeconds: 5 volumeMounts: - mountPath: /etc/coredns name: config-volume @@ -302,7 +302,7 @@ spec: selector: k8s-app: kube-dns sessionAffinity: None - type: ClusterIP + type: ClusterIP --- apiVersion: v1 data: @@ -384,4 +384,4 @@ subjects: EOF -``` +```