diff --git a/.gitignore b/.gitignore index 5654b91d9..112c4f5f6 100644 --- a/.gitignore +++ b/.gitignore @@ -34,4 +34,6 @@ ignore_dir cmd/kubenest/node-agent/app.log cmd/kubenest/node-agent/cert.pem cmd/kubenest/node-agent/key.pem -cmd/kubenest/node-agent/agent.env \ No newline at end of file +cmd/kubenest/node-agent/agent.env +hack/k8s-in-k8s/nodes.txt +develop \ No newline at end of file diff --git a/deploy/crds/kosmos.io_kubenestconfigurations.yaml b/deploy/crds/kosmos.io_kubenestconfigurations.yaml index d473bde05..a5b93063e 100644 --- a/deploy/crds/kosmos.io_kubenestconfigurations.yaml +++ b/deploy/crds/kosmos.io_kubenestconfigurations.yaml @@ -45,6 +45,12 @@ spec: type: string apiServerReplicas: type: integer + apiServerServiceType: + default: hostNetwork + enum: + - nodePort + - hostNetwork + type: string clusterCIDR: type: string etcdStorageClass: diff --git a/deploy/crds/kosmos.io_virtualclusters.yaml b/deploy/crds/kosmos.io_virtualclusters.yaml index 22e0ab688..aa7ff57c9 100644 --- a/deploy/crds/kosmos.io_virtualclusters.yaml +++ b/deploy/crds/kosmos.io_virtualclusters.yaml @@ -62,6 +62,12 @@ spec: type: string apiServerReplicas: type: integer + apiServerServiceType: + default: hostNetwork + enum: + - nodePort + - hostNetwork + type: string clusterCIDR: type: string etcdStorageClass: diff --git a/deploy/virtual-cluster-operator.yml b/deploy/virtual-cluster-operator.yml index 28e0a94c2..cd8a6de39 100644 --- a/deploy/virtual-cluster-operator.yml +++ b/deploy/virtual-cluster-operator.yml @@ -252,45 +252,3 @@ data: username: {{ .USERNAME }} password: {{ .PASSWORD }} --- -apiVersion: v1 -kind: ConfigMap -metadata: - name: kosmos-hostports - namespace: kosmos-system -data: - config.yaml: | - # ports allocate for virtual cluster api server,from 33001, increment by 1 for each virtual cluster.Be careful not to use ports that are already in use - portsPool: - - 33001 - - 33002 - - 33003 - - 33004 - - 33005 - - 33006 - - 33007 - - 33008 - - 33009 - - 33010 ---- -apiVersion: v1 -data: - egress_selector_configuration.yaml: | - apiVersion: apiserver.k8s.io/v1beta1 - kind: EgressSelectorConfiguration - egressSelections: - - name: cluster - connection: - proxyProtocol: GRPC - transport: - uds: - udsName: /etc/kubernetes/konnectivity-server/konnectivity-server.socket - - name: master - connection: - proxyProtocol: Direct - - name: etcd - connection: - proxyProtocol: Direct -kind: ConfigMap -metadata: - name: kas-proxy-files - namespace: kas-proxy diff --git a/hack/k8s-in-k8s/g.env.sh b/hack/k8s-in-k8s/g.env.sh new file mode 100644 index 000000000..8431347b2 --- /dev/null +++ b/hack/k8s-in-k8s/g.env.sh @@ -0,0 +1,85 @@ +#!/usr/bin/env bash + +# ##### +# Generate by script generate_env.sh +# ##### + +SCRIPT_VERSION=0.0.1 +# tmp dir of kosmos +PATH_FILE_TMP=/apps/conf/kosmos/tmp +################################################## +# path for kubeadm config +PATH_KUBEADM_CONFIG=/etc/kubeadm +################################################## +# path for kubernetes, from kubelet args --config +PATH_KUBERNETES=/etc/kubernetes +PATH_KUBERNETES_PKI=/etc/kubernetes/pki +# name for kubelet kubeconfig file +KUBELET_KUBE_CONFIG_NAME= +################################################## +# path for kubelet +PATH_KUBELET_LIB=/var/lib/kubelet +# path for kubelet +PATH_KUBELET_CONF=. +# name for config file of kubelet +KUBELET_CONFIG_NAME= +HOST_CORE_DNS=10.96.0.10 +# kubeadm switch +USE_KUBEADM=true +# Generate kubelet.conf TIMEOUT +KUBELET_CONF_TIMEOUT=30 + +function GenerateKubeadmConfig() { + echo "--- +apiVersion: kubeadm.k8s.io/v1beta2 +caCertPath: /etc/kubernetes/pki/ca.crt +discovery: + bootstrapToken: + apiServerEndpoint: apiserver.cluster.local:6443 + token: $1 + unsafeSkipCAVerification: true +kind: JoinConfiguration +nodeRegistration: + criSocket: /run/containerd/containerd.sock + kubeletExtraArgs: + container-runtime: remote + container-runtime-endpoint: unix:///run/containerd/containerd.sock + taints: null" > $2/kubeadm.cfg.current +} + +function GenerateStaticNginxProxy() { + echo "apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + name: nginx-proxy + namespace: kube-system +spec: + containers: + - image: registry.paas/cmss/nginx:1.21.4 + imagePullPolicy: IfNotPresent + name: nginx-proxy + resources: + limits: + cpu: 300m + memory: 512M + requests: + cpu: 25m + memory: 32M + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/nginx + name: etc-nginx + readOnly: true + hostNetwork: true + priorityClassName: system-node-critical + volumes: + - hostPath: + path: /apps/conf/nginx + type: + name: etc-nginx +status: {}" > /etc/kubernetes/manifests/nginx-proxy.yaml +} + + diff --git a/hack/k8s-in-k8s/generate_env.sh b/hack/k8s-in-k8s/generate_env.sh index 9b9cd0d1a..02eb5cc38 100644 --- a/hack/k8s-in-k8s/generate_env.sh +++ b/hack/k8s-in-k8s/generate_env.sh @@ -124,7 +124,7 @@ PATH_KUBELET_CONF=$PATH_KUBELET_CONF KUBELET_CONFIG_NAME=$KUBELET_CONFIG_NAME HOST_CORE_DNS=$HOST_CORE_DNS # kubeadm switch -USE_KUBEADM=true +USE_KUBEADM=false # Generate kubelet.conf TIMEOUT KUBELET_CONF_TIMEOUT=30 diff --git a/pkg/apis/kosmos/v1alpha1/kubenestconfiguration_types.go b/pkg/apis/kosmos/v1alpha1/kubenestconfiguration_types.go index ab339432f..0ba3d5f5b 100644 --- a/pkg/apis/kosmos/v1alpha1/kubenestconfiguration_types.go +++ b/pkg/apis/kosmos/v1alpha1/kubenestconfiguration_types.go @@ -11,6 +11,13 @@ const ( KosmosKube KubeNestType = "Kosmos in kube" ) +type ApiServerServiceType string + +const ( + HostNetwork ApiServerServiceType = "hostNetwork" + NodePort ApiServerServiceType = "nodePort" +) + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // KubeNestConfiguration defines the configuration for KubeNest @@ -73,6 +80,11 @@ type KubeInKubeConfig struct { //TenantEntrypoint TenantEntrypoint `yaml:"tenantEntrypoint" json:"tenantEntrypoint,omitempty"` // +optional TenantEntrypoint TenantEntrypoint `yaml:"tenantEntrypoint" json:"tenantEntrypoint,omitempty"` + + // +kubebuilder:validation:Enum=nodePort;hostNetwork + // +kubebuilder:default=hostNetwork + // +optional + ApiServerServiceType ApiServerServiceType `yaml:"apiServerServiceType" json:"apiServerServiceType,omitempty"` } // TenantEntrypoint contains the configuration for the tenant entrypoint. diff --git a/pkg/apis/kosmos/v1alpha1/virtualcluster_types.go b/pkg/apis/kosmos/v1alpha1/virtualcluster_types.go index e0276f5b4..24f2c8331 100644 --- a/pkg/apis/kosmos/v1alpha1/virtualcluster_types.go +++ b/pkg/apis/kosmos/v1alpha1/virtualcluster_types.go @@ -58,7 +58,7 @@ type VirtualClusterSpec struct { // KubeInKubeConfig is the external config of virtual cluster // +optional - KubeInKubeConfig KubeInKubeConfig `json:"kubeInKubeConfig,omitempty"` + KubeInKubeConfig *KubeInKubeConfig `json:"kubeInKubeConfig,omitempty"` // PromotePolicies definites the policies for promote to the kubernetes's control plane // +required PromotePolicies []PromotePolicy `json:"promotePolicies,omitempty"` diff --git a/pkg/apis/kosmos/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/kosmos/v1alpha1/zz_generated.deepcopy.go index fe5679c60..d10041a7c 100644 --- a/pkg/apis/kosmos/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/kosmos/v1alpha1/zz_generated.deepcopy.go @@ -1965,7 +1965,11 @@ func (in *VirtualClusterSpec) DeepCopyInto(out *VirtualClusterSpec) { *out = make([]string, len(*in)) copy(*out, *in) } - in.KubeInKubeConfig.DeepCopyInto(&out.KubeInKubeConfig) + if in.KubeInKubeConfig != nil { + in, out := &in.KubeInKubeConfig, &out.KubeInKubeConfig + *out = new(KubeInKubeConfig) + (*in).DeepCopyInto(*out) + } if in.PromotePolicies != nil { in, out := &in.PromotePolicies, &out.PromotePolicies *out = make([]PromotePolicy, len(*in)) diff --git a/pkg/generated/openapi/zz_generated.openapi.go b/pkg/generated/openapi/zz_generated.openapi.go index f1e82700a..c1163d60d 100644 --- a/pkg/generated/openapi/zz_generated.openapi.go +++ b/pkg/generated/openapi/zz_generated.openapi.go @@ -1977,6 +1977,12 @@ func schema_pkg_apis_kosmos_v1alpha1_KubeInKubeConfig(ref common.ReferenceCallba Ref: ref("github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.TenantEntrypoint"), }, }, + "apiServerServiceType": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, }, }, }, @@ -3439,7 +3445,6 @@ func schema_pkg_apis_kosmos_v1alpha1_VirtualClusterSpec(ref common.ReferenceCall "kubeInKubeConfig": { SchemaProps: spec.SchemaProps{ Description: "KubeInKubeConfig is the external config of virtual cluster", - Default: map[string]interface{}{}, Ref: ref("github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1.KubeInKubeConfig"), }, }, diff --git a/pkg/kubenest/controller/virtualcluster_init_controller.go b/pkg/kubenest/controller/virtualcluster_init_controller.go index 87986c447..e61361645 100644 --- a/pkg/kubenest/controller/virtualcluster_init_controller.go +++ b/pkg/kubenest/controller/virtualcluster_init_controller.go @@ -4,7 +4,6 @@ import ( "context" "encoding/base64" "fmt" - "github.com/kosmos.io/kosmos/pkg/kubenest/tasks" "sort" "sync" "time" @@ -16,6 +15,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -38,9 +38,10 @@ import ( "github.com/kosmos.io/kosmos/pkg/kubenest/constants" env "github.com/kosmos.io/kosmos/pkg/kubenest/controller/virtualcluster.node.controller/env" "github.com/kosmos.io/kosmos/pkg/kubenest/controller/virtualcluster.node.controller/exector" + "github.com/kosmos.io/kosmos/pkg/kubenest/tasks" "github.com/kosmos.io/kosmos/pkg/kubenest/util" apiclient "github.com/kosmos.io/kosmos/pkg/kubenest/util/api-client" - cs "k8s.io/client-go/kubernetes" + "github.com/kosmos.io/kosmos/pkg/utils" ) type VirtualClusterInitController struct { @@ -73,6 +74,13 @@ const ( RequeueTime = 10 * time.Second ) +var nameMap = map[string]int{ + "agentport": 1, + "serverport": 2, + "healthport": 3, + "adminport": 4, +} + func (c *VirtualClusterInitController) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { startTime := time.Now() klog.V(4).InfoS("Started syncing virtual cluster", "virtual cluster", request, "startTime", startTime) @@ -298,7 +306,7 @@ func (c *VirtualClusterInitController) createVirtualCluster(virtualCluster *v1al klog.V(2).Infof("Reconciling virtual cluster", "name", virtualCluster.Name) //Assign host port - _, err := c.AllocateHostPort(virtualCluster) + _, err := c.AllocateHostPort(virtualCluster, kubeNestOptions) if err != nil { return errors.Wrap(err, "Error in assign host port!") } @@ -796,28 +804,132 @@ func (c *VirtualClusterInitController) findHostAddresses() ([]string, error) { return ret, nil } -// AllocateHostPort allocate host port for virtual cluster -// #nosec G602 -func (c *VirtualClusterInitController) AllocateHostPort(virtualCluster *v1alpha1.VirtualCluster) (int32, error) { - c.lock.Lock() - defer c.lock.Unlock() - if len(virtualCluster.Status.PortMap) > 0 || virtualCluster.Status.Port != 0 { - return 0, nil - } - hostPool, err := GetHostPortPoolFromConfigMap(c.RootClientSet, constants.KosmosNs, constants.HostPortsCMName, constants.HostPortsCMDataName) +func (c *VirtualClusterInitController) GetHostPortNextFunc(virtualCluster *v1alpha1.VirtualCluster) (func() (int32, error), error) { + var hostPool *HostPortPool + var err error + type nextfunc func() (int32, error) + var next nextfunc + hostPool, err = GetHostPortPoolFromConfigMap(c.RootClientSet, constants.KosmosNs, constants.HostPortsCMName, constants.HostPortsCMDataName) if err != nil { klog.Errorf("get host port pool error: %v", err) - return 0, err + return nil, err + } + next = func() nextfunc { + i := 0 + return func() (int32, error) { + if i >= len(hostPool.PortsPool) { + return 0, fmt.Errorf("no available ports") + } + port := hostPool.PortsPool[i] + i++ + return port, nil + } + }() + // } + return next, nil +} + +func createApiAnpAgentSvc(name, namespace string, nameMap map[string]int) *corev1.Service { + apiAnpAgentSvc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: util.GetKonnectivityApiServerName(name), + Namespace: namespace, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + Ports: func() []corev1.ServicePort { + ret := []corev1.ServicePort{} + for k, v := range nameMap { + ret = append(ret, corev1.ServicePort{ + Port: 8080 + int32(v), + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.IntOrString{ + IntVal: 8080 + int32(v), + }, + Name: k, + }) + } + return ret + }(), + }, + } + return apiAnpAgentSvc +} + +func (c *VirtualClusterInitController) GetNodePorts(client kubernetes.Interface, virtualCluster *v1alpha1.VirtualCluster) ([]int32, error) { + ports := make([]int32, 5) + ipFamilies := utils.IPFamilyGenerator(constants.ApiServerServiceSubnet) + name := virtualCluster.GetName() + namespace := virtualCluster.GetNamespace() + apiSvc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: util.GetApiServerName(name), + Namespace: namespace, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + Ports: []corev1.ServicePort{ + { + Port: 30007, // just for get node port + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.IntOrString{ + IntVal: 8080, // just for get node port + }, + Name: "client", + }, + }, + IPFamilies: ipFamilies, + }, + } + err := util.CreateOrUpdateService(client, apiSvc) + if err != nil { + return nil, fmt.Errorf("can not create api svc for allocate port, error: %s", err) + } + + createdApiSvc, err := client.CoreV1().Services(namespace).Get(context.TODO(), apiSvc.GetName(), metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("can not get api svc for allocate port, error: %s", err) + } + nodePort := createdApiSvc.Spec.Ports[0].NodePort + ports[0] = nodePort + + apiAnpAgentSvc := createApiAnpAgentSvc(name, namespace, nameMap) + err = util.CreateOrUpdateService(client, apiAnpAgentSvc) + if err != nil { + return nil, fmt.Errorf("can not create anp svc for allocate port, error: %s", err) + } + + createdAnpSvc, err := client.CoreV1().Services(namespace).Get(context.TODO(), apiAnpAgentSvc.GetName(), metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("can not get api svc for allocate port, error: %s", err) + } + + for _, port := range createdAnpSvc.Spec.Ports { + v, ok := nameMap[port.Name] + if ok { + ports[v] = port.NodePort + } else { + return nil, fmt.Errorf("can not get node port for %s", port.Name) + } + } + + return ports, nil +} + +func (c *VirtualClusterInitController) GetHostNetworkPorts(virtualCluster *v1alpha1.VirtualCluster) ([]int32, error) { + next, err := c.GetHostPortNextFunc(virtualCluster) + if err != nil { + return nil, err } hostAddress, err := c.findHostAddresses() if err != nil { - return 0, err + return nil, err } ports := func() []int32 { ports := make([]int32, 0) - for _, p := range hostPool.PortsPool { + for p, err := next(); err == nil; p, err = next() { if !c.isPortAllocated(p, hostAddress) { ports = append(ports, p) if len(ports) > constants.VirtualClusterPortNum { @@ -827,6 +939,32 @@ func (c *VirtualClusterInitController) AllocateHostPort(virtualCluster *v1alpha1 } return ports }() + + return ports, nil +} + +// AllocateHostPort allocate host port for virtual cluster +// #nosec G602 +func (c *VirtualClusterInitController) AllocateHostPort(virtualCluster *v1alpha1.VirtualCluster, kubeNestOptions *v1alpha1.KubeNestConfiguration) (int32, error) { + c.lock.Lock() + defer c.lock.Unlock() + if len(virtualCluster.Status.PortMap) > 0 || virtualCluster.Status.Port != 0 { + return 0, nil + } + + var ports []int32 + var err error + + if virtualCluster.Spec.KubeInKubeConfig != nil && virtualCluster.Spec.KubeInKubeConfig.ApiServerServiceType == v1alpha1.NodePort { + ports, err = c.GetNodePorts(c.RootClientSet, virtualCluster) + } else { + ports, err = c.GetHostNetworkPorts(virtualCluster) + } + + if err != nil { + return 0, err + } + if len(ports) < constants.VirtualClusterPortNum { klog.Errorf("no available ports to allocate") return 0, fmt.Errorf("no available ports to allocate") @@ -889,7 +1027,7 @@ func (c *VirtualClusterInitController) AllocateVip(virtualCluster *v1alpha1.Virt return err } -func (c *VirtualClusterInitController) labelNode(client cs.Interface) (reps int, err error) { +func (c *VirtualClusterInitController) labelNode(client kubernetes.Interface) (reps int, err error) { replicas := constants.VipKeepAlivedReplicas nodes, err := client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) if err != nil { diff --git a/pkg/kubenest/controller/virtualcluster_init_controller_test.go b/pkg/kubenest/controller/virtualcluster_init_controller_test.go new file mode 100644 index 000000000..2310bd661 --- /dev/null +++ b/pkg/kubenest/controller/virtualcluster_init_controller_test.go @@ -0,0 +1,60 @@ +package controller + +import ( + "fmt" + "testing" +) + +func TestNetxFunc(t *testing.T) { + portsPool := []int32{1, 2, 3, 4, 5} + type nextfunc func() (int32, error) + // var next nextfunc + next := func() nextfunc { + i := 0 + return func() (int32, error) { + if i >= len(portsPool) { + return 0, fmt.Errorf("no available ports") + } + port := portsPool[i] + i++ + return port, nil + } + }() + + for p, err := next(); err == nil; p, err = next() { + fmt.Printf("port: %d\n", p) + } +} + +func TestCreateApiAnpServer(t *testing.T) { + var name, namespace string + apiAnpAgentSvc := createApiAnpAgentSvc(name, namespace, nameMap) + + if len(apiAnpAgentSvc.Spec.Ports) != 4 { + t.Fatalf("apiAnpAgentSvc.Spec.Ports len != 4") + } + if apiAnpAgentSvc.Spec.Ports[0].Name != "agentport" { + t.Fatalf("apiAnpAgentSvc.Spec.Ports[0].Name != agentport") + } + if apiAnpAgentSvc.Spec.Ports[0].Port != 8081 { + t.Fatalf("apiAnpAgentSvc.Spec.Ports[0].Port != 8081") + } + if apiAnpAgentSvc.Spec.Ports[1].Name != "serverport" { + t.Fatalf("apiAnpAgentSvc.Spec.Ports[1].Name != serverport") + } + if apiAnpAgentSvc.Spec.Ports[1].Port != 8082 { + t.Fatalf("apiAnpAgentSvc.Spec.Ports[1].Port != 8082") + } + if apiAnpAgentSvc.Spec.Ports[2].Name != "healthport" { + t.Fatalf("apiAnpAgentSvc.Spec.Ports[2].Name != healthport") + } + if apiAnpAgentSvc.Spec.Ports[2].Port != 8083 { + t.Fatalf("apiAnpAgentSvc.Spec.Ports[2].Port != 8083") + } + if apiAnpAgentSvc.Spec.Ports[3].Name != "adminport" { + t.Fatalf("apiAnpAgentSvc.Spec.Ports[3].Name != adminport") + } + if apiAnpAgentSvc.Spec.Ports[3].Port != 8084 { + t.Fatalf("apiAnpAgentSvc.Spec.Ports[3].Port != 8084") + } +} diff --git a/pkg/kubenest/controlplane/apiserver.go b/pkg/kubenest/controlplane/apiserver.go index c9f4e48b5..a1affd1e3 100644 --- a/pkg/kubenest/controlplane/apiserver.go +++ b/pkg/kubenest/controlplane/apiserver.go @@ -14,22 +14,22 @@ import ( "github.com/kosmos.io/kosmos/pkg/kubenest/util" ) -func EnsureVirtualClusterAPIServer(client clientset.Interface, name, namespace string, portMap map[string]int32, kubeNestConfiguration *v1alpha1.KubeNestConfiguration) error { - if err := installAPIServer(client, name, namespace, portMap, kubeNestConfiguration); err != nil { +func EnsureVirtualClusterAPIServer(client clientset.Interface, name, namespace string, portMap map[string]int32, kubeNestConfiguration *v1alpha1.KubeNestConfiguration, vc *v1alpha1.VirtualCluster) error { + if err := installAPIServer(client, name, namespace, portMap, kubeNestConfiguration, vc); err != nil { return fmt.Errorf("failed to install virtual cluster apiserver, err: %w", err) } return nil } func DeleteVirtualClusterAPIServer(client clientset.Interface, name, namespace string) error { - deployName := fmt.Sprintf("%s-%s", name, "apiserver") + deployName := util.GetApiServerName(name) if err := util.DeleteDeployment(client, deployName, namespace); err != nil { return errors.Wrapf(err, "Failed to delete deployment %s/%s", deployName, namespace) } return nil } -func installAPIServer(client clientset.Interface, name, namespace string, portMap map[string]int32, kubeNestConfiguration *v1alpha1.KubeNestConfiguration) error { +func installAPIServer(client clientset.Interface, name, namespace string, portMap map[string]int32, kubeNestConfiguration *v1alpha1.KubeNestConfiguration, vc *v1alpha1.VirtualCluster) error { imageRepository, imageVersion := util.GetImageMessage() clusterIp, err := util.GetEtcdServiceClusterIp(namespace, name+constants.EtcdSuffix, client) if err != nil { @@ -51,21 +51,23 @@ func installAPIServer(client clientset.Interface, name, namespace string, portMa ClusterPort int32 AdmissionPlugins bool IPV6First bool + UseApiServerNodePort bool }{ - DeploymentName: fmt.Sprintf("%s-%s", name, "apiserver"), + DeploymentName: util.GetApiServerName(name), Namespace: namespace, ImageRepository: imageRepository, Version: imageVersion, VirtualControllerLabel: vclabel, EtcdClientService: clusterIp, ServiceSubnet: constants.ApiServerServiceSubnet, - VirtualClusterCertsSecret: fmt.Sprintf("%s-%s", name, "cert"), - EtcdCertsSecret: fmt.Sprintf("%s-%s", name, "etcd-cert"), + VirtualClusterCertsSecret: util.GetCertName(name), + EtcdCertsSecret: util.GetEtcdCertName(name), Replicas: kubeNestConfiguration.KubeInKubeConfig.ApiServerReplicas, EtcdListenClientPort: constants.ApiServerEtcdListenClientPort, ClusterPort: portMap[constants.ApiServerPortKey], IPV6First: IPV6FirstFlag, AdmissionPlugins: kubeNestConfiguration.KubeInKubeConfig.AdmissionPlugins, + UseApiServerNodePort: vc.Spec.KubeInKubeConfig != nil && vc.Spec.KubeInKubeConfig.ApiServerServiceType == v1alpha1.NodePort, }) if err != nil { return fmt.Errorf("error when parsing virtual cluster apiserver deployment template: %w", err) diff --git a/pkg/kubenest/controlplane/component.go b/pkg/kubenest/controlplane/component.go index 0efa04702..71c248888 100644 --- a/pkg/kubenest/controlplane/component.go +++ b/pkg/kubenest/controlplane/component.go @@ -137,8 +137,8 @@ func getKubeControllerManagerManifest(name, namespace, clusterCIDR string) (*app ImageRepository: imageRepository, Version: imageVersion, VirtualControllerLabel: vclabel, - VirtualClusterCertsSecret: fmt.Sprintf("%s-%s", name, "cert"), - KubeconfigSecret: fmt.Sprintf("%s-%s", name, "admin-config-clusterip"), + VirtualClusterCertsSecret: util.GetCertName(name), + KubeconfigSecret: util.GetAdminConfigClusterIPSecretName(name), ServiceSubnet: constants.ApiServerServiceSubnet, PodSubnet: podSubnet, Replicas: constants.KubeControllerReplicas, @@ -189,7 +189,7 @@ func getVirtualClusterSchedulerManifest(name, namespace string) (*appsv1.Deploym ImageRepository: imageRepository, VirtualControllerLabel: vclabel, Version: imageVersion, - KubeconfigSecret: fmt.Sprintf("%s-%s", name, "admin-config-clusterip"), + KubeconfigSecret: util.GetAdminConfigClusterIPSecretName(name), Replicas: constants.VirtualClusterSchedulerReplicas, }) if err != nil { diff --git a/pkg/kubenest/controlplane/etcd.go b/pkg/kubenest/controlplane/etcd.go index c954d58ed..453365e5e 100644 --- a/pkg/kubenest/controlplane/etcd.go +++ b/pkg/kubenest/controlplane/etcd.go @@ -26,7 +26,7 @@ func EnsureVirtualClusterEtcd(client clientset.Interface, name, namespace string } func DeleteVirtualClusterEtcd(client clientset.Interface, name, namespace string) error { - sts := fmt.Sprintf("%s-%s", name, "etcd") + sts := util.GetEtcdServerName(name) if err := util.DeleteStatefulSet(client, sts, namespace); err != nil { return errors.Wrapf(err, "Failed to delete statefulset %s/%s", sts, namespace) } @@ -46,11 +46,11 @@ func installEtcd(client clientset.Interface, name, namespace string, kubeNestCon initialClusters := make([]string, constants.EtcdReplicas) for index := range initialClusters { - memberName := fmt.Sprintf("%s-%d", fmt.Sprintf("%s-%s", name, "etcd"), index) + memberName := fmt.Sprintf("%s-%d", util.GetEtcdServerName(name), index) // build etcd member cluster peer url memberPeerURL := fmt.Sprintf("http://%s.%s.%s.svc.cluster.local:%v", memberName, - fmt.Sprintf("%s-%s", name, "etcd"), + util.GetEtcdServerName(name), namespace, constants.EtcdListenPeerPort, ) @@ -73,14 +73,14 @@ func installEtcd(client clientset.Interface, name, namespace string, kubeNestCon ETCDStorageClass, ETCDStorageSize string IPV6First bool }{ - StatefulSetName: fmt.Sprintf("%s-%s", name, "etcd"), + StatefulSetName: util.GetEtcdServerName(name), Namespace: namespace, ImageRepository: imageRepository, Version: imageVersion, VirtualControllerLabel: vclabel, - EtcdClientService: fmt.Sprintf("%s-%s", name, "etcd-client"), - CertsSecretName: fmt.Sprintf("%s-%s", name, "etcd-cert"), - EtcdPeerServiceName: fmt.Sprintf("%s-%s", name, "etcd"), + EtcdClientService: util.GetEtcdClientServerName(name), + CertsSecretName: util.GetEtcdCertName(name), + EtcdPeerServiceName: util.GetEtcdServerName(name), EtcdDataVolumeName: constants.EtcdDataVolumeName, InitialCluster: strings.Join(initialClusters, ","), EtcdCipherSuites: strings.Join(flag.PreferredTLSCipherNames(), ","), diff --git a/pkg/kubenest/controlplane/service.go b/pkg/kubenest/controlplane/service.go index 4d6ea5f2d..a252f6241 100644 --- a/pkg/kubenest/controlplane/service.go +++ b/pkg/kubenest/controlplane/service.go @@ -12,6 +12,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" + "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" "github.com/kosmos.io/kosmos/pkg/kubenest/constants" "github.com/kosmos.io/kosmos/pkg/kubenest/manifest/controlplane/apiserver" "github.com/kosmos.io/kosmos/pkg/kubenest/manifest/controlplane/coredns/host" @@ -20,8 +21,8 @@ import ( "github.com/kosmos.io/kosmos/pkg/utils" ) -func EnsureVirtualClusterService(client clientset.Interface, name, namespace string, portMap map[string]int32) error { - if err := createServerService(client, name, namespace, portMap); err != nil { +func EnsureVirtualClusterService(client clientset.Interface, name, namespace string, portMap map[string]int32, kubeNestOpt *v1alpha1.KubeNestConfiguration, vc *v1alpha1.VirtualCluster) error { + if err := createServerService(client, name, namespace, portMap, kubeNestOpt, vc); err != nil { return fmt.Errorf("failed to create virtual cluster apiserver-service, err: %w", err) } return nil @@ -29,10 +30,12 @@ func EnsureVirtualClusterService(client clientset.Interface, name, namespace str func DeleteVirtualClusterService(client clientset.Interface, name, namespace string) error { services := []string{ - fmt.Sprintf("%s-%s", name, "apiserver"), - fmt.Sprintf("%s-%s", name, "etcd"), - fmt.Sprintf("%s-%s", name, "etcd-client"), + util.GetApiServerName(name), + util.GetEtcdServerName(name), + util.GetEtcdClientServerName(name), "kube-dns", + util.GetKonnectivityServerName(name), + util.GetKonnectivityApiServerName(name), } for _, service := range services { err := client.CoreV1().Services(namespace).Delete(context.TODO(), service, metav1.DeleteOptions{}) @@ -49,18 +52,20 @@ func DeleteVirtualClusterService(client clientset.Interface, name, namespace str return nil } -func createServerService(client clientset.Interface, name, namespace string, portMap map[string]int32) error { +func createServerService(client clientset.Interface, name, namespace string, portMap map[string]int32, _ *v1alpha1.KubeNestConfiguration, vc *v1alpha1.VirtualCluster) error { ipFamilies := utils.IPFamilyGenerator(constants.ApiServerServiceSubnet) apiserverServiceBytes, err := util.ParseTemplate(apiserver.ApiserverService, struct { ServiceName, Namespace, ServiceType string ServicePort int32 IPFamilies []corev1.IPFamily + UseApiServerNodePort bool }{ - ServiceName: fmt.Sprintf("%s-%s", name, "apiserver"), - Namespace: namespace, - ServiceType: constants.ApiServerServiceType, - ServicePort: portMap[constants.ApiServerPortKey], - IPFamilies: ipFamilies, + ServiceName: util.GetApiServerName(name), + Namespace: namespace, + ServiceType: constants.ApiServerServiceType, + ServicePort: portMap[constants.ApiServerPortKey], + IPFamilies: ipFamilies, + UseApiServerNodePort: vc.Spec.KubeInKubeConfig != nil && vc.Spec.KubeInKubeConfig.ApiServerServiceType == v1alpha1.NodePort, }) if err != nil { return fmt.Errorf("error when parsing virtualClusterApiserver serive template: %w", err) @@ -69,7 +74,7 @@ func createServerService(client clientset.Interface, name, namespace string, por ServiceName, Namespace string ProxyServerPort int32 }{ - ServiceName: fmt.Sprintf("%s-%s", name, "konnectivity-server"), + ServiceName: util.GetKonnectivityServerName(name), Namespace: namespace, ProxyServerPort: portMap[constants.ApiServerNetworkProxyServerPortKey], }) @@ -81,7 +86,7 @@ func createServerService(client clientset.Interface, name, namespace string, por if err := yaml.Unmarshal([]byte(apiserverServiceBytes), apiserverService); err != nil { return fmt.Errorf("error when decoding virtual cluster apiserver service: %w", err) } - if err := createOrUpdateService(client, apiserverService); err != nil { + if err := util.CreateOrUpdateService(client, apiserverService); err != nil { return fmt.Errorf("err when creating virtual cluster apiserver service for %s, err: %w", apiserverService.Name, err) } @@ -89,7 +94,7 @@ func createServerService(client clientset.Interface, name, namespace string, por if err := yaml.Unmarshal([]byte(anpServiceBytes), anpService); err != nil { return fmt.Errorf("error when decoding virtual cluster anp service: %w", err) } - if err := createOrUpdateService(client, anpService); err != nil { + if err := util.CreateOrUpdateService(client, anpService); err != nil { return fmt.Errorf("err when creating virtual cluster anp service for %s, err: %w", anpService.Name, err) } @@ -97,7 +102,7 @@ func createServerService(client clientset.Interface, name, namespace string, por ServiceName, Namespace string EtcdListenClientPort, EtcdListenPeerPort int32 }{ - ServiceName: fmt.Sprintf("%s-%s", name, "etcd"), + ServiceName: util.GetEtcdServerName(name), Namespace: namespace, EtcdListenClientPort: constants.EtcdListenClientPort, EtcdListenPeerPort: constants.EtcdListenPeerPort, @@ -111,7 +116,7 @@ func createServerService(client clientset.Interface, name, namespace string, por return fmt.Errorf("error when decoding Etcd client service: %w", err) } - if err := createOrUpdateService(client, etcdPeerService); err != nil { + if err := util.CreateOrUpdateService(client, etcdPeerService); err != nil { return fmt.Errorf("error when creating etcd client service, err: %w", err) } @@ -120,7 +125,7 @@ func createServerService(client clientset.Interface, name, namespace string, por ServiceName, Namespace string EtcdListenClientPort int32 }{ - ServiceName: fmt.Sprintf("%s-%s", name, "etcd-client"), + ServiceName: util.GetEtcdClientServerName(name), Namespace: namespace, EtcdListenClientPort: constants.EtcdListenClientPort, }) @@ -133,7 +138,7 @@ func createServerService(client clientset.Interface, name, namespace string, por return fmt.Errorf("err when decoding Etcd client service: %w", err) } - if err := createOrUpdateService(client, etcdClientService); err != nil { + if err := util.CreateOrUpdateService(client, etcdClientService); err != nil { return fmt.Errorf("err when creating etcd client service, err: %w", err) } @@ -152,31 +157,9 @@ func createServerService(client clientset.Interface, name, namespace string, por return fmt.Errorf("err when decoding core-dns service: %w", err) } - if err := createOrUpdateService(client, coreDnsService); err != nil { + if err := util.CreateOrUpdateService(client, coreDnsService); err != nil { return fmt.Errorf("err when creating core-dns service, err: %w", err) } return nil } - -func createOrUpdateService(client clientset.Interface, service *corev1.Service) error { - _, err := client.CoreV1().Services(service.GetNamespace()).Create(context.TODO(), service, metav1.CreateOptions{}) - if err != nil { - if !apierrors.IsAlreadyExists(err) { - return errors.Wrapf(err, "Failed to create service %s/%s", service.GetName(), service.GetNamespace()) - } - - older, err := client.CoreV1().Services(service.GetNamespace()).Get(context.TODO(), service.GetName(), metav1.GetOptions{}) - if err != nil { - return err - } - - service.ResourceVersion = older.ResourceVersion - if _, err := client.CoreV1().Services(service.GetNamespace()).Update(context.TODO(), service, metav1.UpdateOptions{}); err != nil { - return fmt.Errorf("unable to update Service: %v", err) - } - } - - klog.V(5).InfoS("Successfully created or updated service", "service", service.GetName()) - return nil -} diff --git a/pkg/kubenest/init.go b/pkg/kubenest/init.go index ec9b2acdf..0d4446cd2 100644 --- a/pkg/kubenest/init.go +++ b/pkg/kubenest/init.go @@ -86,7 +86,7 @@ func UninstallPhase(opts *InitOptions) *workflow.Phase { destroyPhase.AppendTask(tasks.UninstallCoreDNSTask()) destroyPhase.AppendTask(tasks.UninstallComponentTask()) destroyPhase.AppendTask(tasks.UninstallVirtualClusterApiserverTask()) - // destroyPhase.AppendTask(tasks.UninstallAnpTask()) + destroyPhase.AppendTask(tasks.UninstallAnpTask()) destroyPhase.AppendTask(tasks.UninstallEtcdTask()) destroyPhase.AppendTask(tasks.UninstallVirtualClusterServiceTask()) destroyPhase.AppendTask(tasks.UninstallCertsAndKubeconfigTask()) diff --git a/pkg/kubenest/manifest/controlplane/apiserver/mainfests_deployment.go b/pkg/kubenest/manifest/controlplane/apiserver/mainfests_deployment.go index 348070322..152208e32 100644 --- a/pkg/kubenest/manifest/controlplane/apiserver/mainfests_deployment.go +++ b/pkg/kubenest/manifest/controlplane/apiserver/mainfests_deployment.go @@ -21,7 +21,9 @@ spec: virtualCluster-app: apiserver spec: automountServiceAccountToken: false + {{ if not .UseApiServerNodePort }} hostNetwork: true + {{ end }} dnsPolicy: ClusterFirstWithHostNet tolerations: - key: {{ .VirtualControllerLabel }} @@ -50,11 +52,19 @@ spec: image: {{ .ImageRepository }}/kube-apiserver:{{ .Version }} imagePullPolicy: IfNotPresent env: - - name: PODIP - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: status.podIP + {{ if .UseApiServerNodePort }} + - name: HOSTIP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + {{ else}} + - name: PODIP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + {{ end }} command: - kube-apiserver - --allow-privileged=true @@ -93,7 +103,11 @@ spec: - --max-requests-inflight=1500 - --max-mutating-requests-inflight=500 - --v=4 + {{ if .UseApiServerNodePort }} + - --advertise-address=$(HOSTIP) + {{ else }} - --advertise-address=$(PODIP) + {{ end }} {{ if not .AdmissionPlugins }} - --disable-admission-plugins=License {{ end }} @@ -161,7 +175,9 @@ spec: virtualCluster-anp: apiserver-anp spec: automountServiceAccountToken: false + {{ if not .UseApiServerNodePort }} hostNetwork: true + {{ end }} dnsPolicy: ClusterFirstWithHostNet tolerations: - key: {{ .VirtualControllerLabel }} @@ -190,11 +206,19 @@ spec: image: {{ .ImageRepository }}/kube-apiserver:{{ .Version }} imagePullPolicy: IfNotPresent env: - - name: PODIP - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: status.podIP + {{ if .UseApiServerNodePort }} + - name: HOSTIP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + {{ else}} + - name: PODIP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + {{ end }} command: - kube-apiserver - --allow-privileged=true @@ -233,7 +257,11 @@ spec: - --max-requests-inflight=1500 - --max-mutating-requests-inflight=500 - --v=4 + {{ if .UseApiServerNodePort }} + - --advertise-address=$(HOSTIP) + {{ else }} - --advertise-address=$(PODIP) + {{ end }} - --egress-selector-config-file=/etc/kubernetes/konnectivity-server-config/{{ .Namespace }}/{{ .Name }}/egress_selector_configuration.yaml {{ if not .AdmissionPlugins }} - --disable-admission-plugins=License @@ -372,6 +400,34 @@ spec: configMap: name: kas-proxy-files ` + ApiserverAnpAgentService = ` +apiVersion: v1 +kind: Service +metadata: + name: {{ .SVCName }} + namespace: {{ .Namespace }} +spec: + ports: + - port: {{ .ServerPort }} + name: serverport + targetPort: {{ .ServerPort }} + nodePort: {{ .ServerPort }} + - port: {{ .AgentPort }} + name: agentport + targetPort: {{ .AgentPort }} + nodePort: {{ .AgentPort }} + - port: {{ .HealthPort }} + name: healthport + targetPort: {{ .HealthPort }} + nodePort: {{ .HealthPort }} + - port: {{ .AdminPort }} + name: adminport + targetPort: {{ .AdminPort }} + nodePort: {{ .AdminPort }} + selector: + virtualCluster-app: apiserver + type: NodePort + ` AnpAgentManifest = ` apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding diff --git a/pkg/kubenest/manifest/controlplane/apiserver/mainfests_service.go b/pkg/kubenest/manifest/controlplane/apiserver/mainfests_service.go index d97138cbf..8bb31367c 100644 --- a/pkg/kubenest/manifest/controlplane/apiserver/mainfests_service.go +++ b/pkg/kubenest/manifest/controlplane/apiserver/mainfests_service.go @@ -20,6 +20,9 @@ spec: port: {{ .ServicePort }} protocol: TCP targetPort: {{ .ServicePort }} + {{ if .UseApiServerNodePort }} + nodePort: {{ .ServicePort }} + {{ end }} selector: virtualCluster-app: apiserver type: {{ .ServiceType }} diff --git a/pkg/kubenest/tasks/anp.go b/pkg/kubenest/tasks/anp.go index b44eb962c..ad9f2b119 100644 --- a/pkg/kubenest/tasks/anp.go +++ b/pkg/kubenest/tasks/anp.go @@ -8,6 +8,7 @@ import ( "github.com/pkg/errors" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/yaml" @@ -95,7 +96,7 @@ func runAnpServer(r workflow.RunData) error { if err != nil { return fmt.Errorf("failed to create egress_selector_configuration config map, err: %w", err) } - err = installAnpServer(data.RemoteClient(), name, namespace, portMap, kubeNestOpt) + err = installAnpServer(data.RemoteClient(), name, namespace, portMap, kubeNestOpt, data.VirtualCluster()) if err != nil { return fmt.Errorf("failed to install virtual cluster anp component, err: %w", err) } @@ -131,23 +132,18 @@ func uninstallAnp(r workflow.RunData) error { if !ok { return errors.New("Virtual cluster anp task invoked with an invalid data struct") } - name, namespace := data.GetName(), data.GetNamespace() client := data.RemoteClient() - portMap := data.HostPortMap() - kubeNestOpt := data.KubeNestOpt() - anpManifest, vcClient, err := getAnpAgentManifest(client, name, namespace, portMap, kubeNestOpt) + namespace := data.GetNamespace() + name := "kas-proxy-files" + err := client.CoreV1().ConfigMaps(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) if err != nil { - return fmt.Errorf("failed to uninstall anp agent when get anp manifest, err: %w", err) - } - actionFunc := func(ctx context.Context, c dynamic.Interface, u *unstructured.Unstructured) error { - // create the object - return util.DeleteObject(vcClient, u.GetNamespace(), u.GetName(), u) + if !apierrors.IsNotFound(err) { + return errors.Wrapf(err, "Failed to delete service %s/%s", name, namespace) + } } - - klog.V(2).InfoS("[VirtualClusterAnp] Successfully uninstalled virtual cluster anp component", "virtual cluster", klog.KObj(data)) - return util.ForEachObjectInYAML(context.TODO(), vcClient, []byte(anpManifest), "", actionFunc) + return nil } -func installAnpServer(client clientset.Interface, name, namespace string, portMap map[string]int32, kubeNestConfiguration *v1alpha1.KubeNestConfiguration) error { +func installAnpServer(client clientset.Interface, name, namespace string, portMap map[string]int32, kubeNestConfiguration *v1alpha1.KubeNestConfiguration, vc *v1alpha1.VirtualCluster) error { imageRepository, imageVersion := util.GetImageMessage() clusterIp, err := util.GetEtcdServiceClusterIp(namespace, name+constants.EtcdSuffix, client) if err != nil { @@ -176,16 +172,17 @@ func installAnpServer(client clientset.Interface, name, namespace string, portMa AnpMode string AdmissionPlugins bool IPV6First bool + UseApiServerNodePort bool }{ - DeploymentName: fmt.Sprintf("%s-%s", name, "apiserver"), + DeploymentName: util.GetApiServerName(name), Namespace: namespace, ImageRepository: imageRepository, Version: imageVersion, VirtualControllerLabel: vclabel, EtcdClientService: clusterIp, ServiceSubnet: constants.ApiServerServiceSubnet, - VirtualClusterCertsSecret: fmt.Sprintf("%s-%s", name, "cert"), - EtcdCertsSecret: fmt.Sprintf("%s-%s", name, "etcd-cert"), + VirtualClusterCertsSecret: util.GetCertName(name), + EtcdCertsSecret: util.GetEtcdCertName(name), Replicas: kubeNestConfiguration.KubeInKubeConfig.ApiServerReplicas, EtcdListenClientPort: constants.ApiServerEtcdListenClientPort, ClusterPort: portMap[constants.ApiServerPortKey], @@ -193,11 +190,12 @@ func installAnpServer(client clientset.Interface, name, namespace string, portMa ServerPort: portMap[constants.ApiServerNetworkProxyServerPortKey], HealthPort: portMap[constants.ApiServerNetworkProxyHealthPortKey], AdminPort: portMap[constants.ApiServerNetworkProxyAdminPortKey], - KubeconfigSecret: fmt.Sprintf("%s-%s", name, "admin-config-clusterip"), + KubeconfigSecret: util.GetAdminConfigClusterIPSecretName(name), Name: name, AnpMode: kubeNestConfiguration.KubeInKubeConfig.AnpMode, AdmissionPlugins: kubeNestConfiguration.KubeInKubeConfig.AdmissionPlugins, IPV6First: IPV6FirstFlag, + UseApiServerNodePort: vc.Spec.KubeInKubeConfig != nil && vc.Spec.KubeInKubeConfig.ApiServerServiceType == v1alpha1.NodePort, }) if err != nil { return fmt.Errorf("error when parsing virtual cluster apiserver deployment template: %w", err) @@ -212,6 +210,38 @@ func installAnpServer(client clientset.Interface, name, namespace string, portMa if err := util.CreateOrUpdateDeployment(client, apiserverDeployment); err != nil { return fmt.Errorf("error when creating deployment for %s, err: %w", apiserverDeployment.Name, err) } + + if vc.Spec.KubeInKubeConfig != nil && vc.Spec.KubeInKubeConfig.ApiServerServiceType == v1alpha1.NodePort { + apiserverServiceBytes, err := util.ParseTemplate(apiserver.ApiserverAnpAgentService, struct { + SVCName, Namespace string + ClusterPort int32 + AgentPort int32 + ServerPort int32 + HealthPort int32 + AdminPort int32 + }{ + SVCName: util.GetKonnectivityApiServerName(name), + Namespace: namespace, + ClusterPort: portMap[constants.ApiServerPortKey], + AgentPort: portMap[constants.ApiServerNetworkProxyAgentPortKey], + ServerPort: portMap[constants.ApiServerNetworkProxyServerPortKey], + HealthPort: portMap[constants.ApiServerNetworkProxyHealthPortKey], + AdminPort: portMap[constants.ApiServerNetworkProxyAdminPortKey], + }) + if err != nil { + return fmt.Errorf("error when parsing virtual cluster apiserver svc template: %w", err) + } + klog.V(4).InfoS("[anp] apply anp server svc", "anp sever svc deploy", apiserverServiceBytes) + + apiserverSvc := &v1.Service{} + if err := yaml.Unmarshal([]byte(apiserverServiceBytes), apiserverSvc); err != nil { + return fmt.Errorf("error when decoding virtual cluster apiserver svc: %w", err) + } + + if err := util.CreateOrUpdateService(client, apiserverSvc); err != nil { + return fmt.Errorf("error when creating svc for %s, err: %w", apiserverSvc.Name, err) + } + } return nil } @@ -221,7 +251,7 @@ func installAnpAgent(data InitData) error { namespace := data.GetNamespace() portMap := data.HostPortMap() kubeNestOpt := data.KubeNestOpt() - anpAgentManifestBytes, vcClient, err2 := getAnpAgentManifest(client, name, namespace, portMap, kubeNestOpt) + anpAgentManifestBytes, vcClient, err2 := getAnpAgentManifest(client, name, namespace, portMap, kubeNestOpt, data.VirtualCluster()) if err2 != nil { return err2 } @@ -234,10 +264,17 @@ func installAnpAgent(data InitData) error { return util.ForEachObjectInYAML(context.TODO(), vcClient, []byte(anpAgentManifestBytes), "", actionFunc) } -func getAnpAgentManifest(client clientset.Interface, name string, namespace string, portMap map[string]int32, kubeNestConfiguration *v1alpha1.KubeNestConfiguration) (string, dynamic.Interface, error) { +func getAnpAgentManifest(client clientset.Interface, name string, namespace string, portMap map[string]int32, kubeNestConfiguration *v1alpha1.KubeNestConfiguration, vc *v1alpha1.VirtualCluster) (string, dynamic.Interface, error) { imageRepository, imageVersion := util.GetImageMessage() // get apiServer hostIp - proxyServerHost, err := getDeploymentPodIPs(client, namespace, fmt.Sprintf("%s-%s", name, "apiserver")) + var proxyServerHost []string + var err error + if vc.Spec.KubeInKubeConfig != nil && vc.Spec.KubeInKubeConfig.ApiServerServiceType == v1alpha1.NodePort { + proxyServerHost, err = getDeploymentHostIPs(client, namespace, util.GetApiServerName(name)) + } else { + proxyServerHost, err = getDeploymentPodIPs(client, namespace, util.GetApiServerName(name)) + } + if err != nil { klog.Warningf("Failed to get apiserver hostIp, err: %v", err) // ignore if can't get the hostIp when uninstall the deployment @@ -257,7 +294,7 @@ func getAnpAgentManifest(client clientset.Interface, name string, namespace stri AgentPort: portMap[constants.ApiServerNetworkProxyAgentPortKey], ProxyServerHost: proxyServerHost, AnpMode: kubeNestConfiguration.KubeInKubeConfig.AnpMode, - AgentCertName: fmt.Sprintf("%s-%s", name, "cert"), + AgentCertName: util.GetCertName(name), }) if err != nil { return "", nil, fmt.Errorf("error when parsing virtual cluster apiserver deployment template: %w", err) @@ -295,9 +332,33 @@ func getDeploymentPodIPs(clientset clientset.Interface, namespace, deploymentNam return podIPs, nil } +func getDeploymentHostIPs(clientset clientset.Interface, namespace, deploymentName string) ([]string, error) { + deployment, err := clientset.AppsV1().Deployments(namespace).Get(context.TODO(), deploymentName, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("error getting deployment: %v", err) + } + + labelSelector := metav1.FormatLabelSelector(deployment.Spec.Selector) + listOptions := metav1.ListOptions{LabelSelector: labelSelector} + + pods, err := clientset.CoreV1().Pods(namespace).List(context.TODO(), listOptions) + if err != nil { + return nil, fmt.Errorf("error listing pods: %v", err) + } + + var podIPs []string + for _, pod := range pods.Items { + if pod.Status.Phase == v1.PodRunning { + podIPs = append(podIPs, pod.Status.HostIP) + } + } + + return podIPs, nil +} + func getVcDynamicClient(client clientset.Interface, name, namespace string) (dynamic.Interface, error) { secret, err := client.CoreV1().Secrets(namespace).Get(context.TODO(), - fmt.Sprintf("%s-%s", name, constants.AdminConfig), metav1.GetOptions{}) + util.GetAdminConfigSecretName(name), metav1.GetOptions{}) if err != nil { return nil, errors.Wrap(err, "Get virtualcluster kubeconfig secret error") } @@ -313,7 +374,7 @@ func getVcDynamicClient(client clientset.Interface, name, namespace string) (dyn } func GetVcClientset(client clientset.Interface, name, namespace string) (clientset.Interface, error) { secret, err := client.CoreV1().Secrets(namespace).Get(context.TODO(), - fmt.Sprintf("%s-%s", name, constants.AdminConfig), metav1.GetOptions{}) + util.GetAdminConfigSecretName(name), metav1.GetOptions{}) if err != nil { return nil, errors.Wrap(err, "Get virtualcluster kubeconfig secret error") } @@ -352,7 +413,7 @@ func runUploadProxyAgentCert(r workflow.RunData) error { } err = createOrUpdateSecret(vcClient, &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-%s", data.GetName(), "cert"), + Name: util.GetCertName(data.GetName()), Namespace: "kube-system", Labels: VirtualClusterControllerLabel, }, diff --git a/pkg/kubenest/tasks/apiserver.go b/pkg/kubenest/tasks/apiserver.go index 68be48d74..254163f05 100644 --- a/pkg/kubenest/tasks/apiserver.go +++ b/pkg/kubenest/tasks/apiserver.go @@ -52,6 +52,7 @@ func runVirtualClusterAPIServer(r workflow.RunData) error { data.GetNamespace(), data.HostPortMap(), data.KubeNestOpt(), + data.VirtualCluster(), ) if err != nil { return fmt.Errorf("failed to install virtual cluster apiserver component, err: %w", err) diff --git a/pkg/kubenest/tasks/cert.go b/pkg/kubenest/tasks/cert.go index f674c2fc1..e8deaa495 100644 --- a/pkg/kubenest/tasks/cert.go +++ b/pkg/kubenest/tasks/cert.go @@ -8,6 +8,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog/v2" + "github.com/kosmos.io/kosmos/pkg/kubenest/util" "github.com/kosmos.io/kosmos/pkg/kubenest/util/cert" "github.com/kosmos.io/kosmos/pkg/kubenest/workflow" ) @@ -37,7 +38,7 @@ func skipCerts(d workflow.RunData) (bool, error) { return false, errors.New("certs task invoked with an invalid data struct") } - secretName := fmt.Sprintf("%s-%s", data.GetName(), "cert") + secretName := util.GetCertName(data.GetName()) secret, err := data.RemoteClient().CoreV1().Secrets(data.GetNamespace()).Get(context.TODO(), secretName, metav1.GetOptions{}) if err != nil { return false, nil diff --git a/pkg/kubenest/tasks/coredns.go b/pkg/kubenest/tasks/coredns.go index 526fa121a..936bff053 100644 --- a/pkg/kubenest/tasks/coredns.go +++ b/pkg/kubenest/tasks/coredns.go @@ -192,7 +192,7 @@ func runCoreDnsVirtualTask(r workflow.RunData) error { } secret, err := data.RemoteClient().CoreV1().Secrets(data.GetNamespace()).Get(context.TODO(), - fmt.Sprintf("%s-%s", data.GetName(), constants.AdminConfig), metav1.GetOptions{}) + util.GetAdminConfigSecretName(data.GetName()), metav1.GetOptions{}) if err != nil { return errors.Wrap(err, "Get virtualcluster kubeconfig secret error") } diff --git a/pkg/kubenest/tasks/endpoint.go b/pkg/kubenest/tasks/endpoint.go index 4224b0c68..7e5474b51 100644 --- a/pkg/kubenest/tasks/endpoint.go +++ b/pkg/kubenest/tasks/endpoint.go @@ -2,7 +2,6 @@ package tasks import ( "context" - "fmt" "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -12,6 +11,7 @@ import ( "github.com/kosmos.io/kosmos/pkg/kubenest/constants" "github.com/kosmos.io/kosmos/pkg/kubenest/controlplane" + "github.com/kosmos.io/kosmos/pkg/kubenest/util" "github.com/kosmos.io/kosmos/pkg/kubenest/workflow" ) @@ -46,7 +46,7 @@ func runEndPointInVirtualClusterTask(r workflow.RunData) error { } secret, err := data.RemoteClient().CoreV1().Secrets(data.GetNamespace()).Get(context.TODO(), - fmt.Sprintf("%s-%s", data.GetName(), constants.AdminConfig), metav1.GetOptions{}) + util.GetAdminConfigSecretName(data.GetName()), metav1.GetOptions{}) if err != nil { return errors.Wrap(err, "Get virtualcluster kubeconfig secret error") } diff --git a/pkg/kubenest/tasks/manifests_components.go b/pkg/kubenest/tasks/manifests_components.go index 1cdcd70ac..3af0ffda2 100644 --- a/pkg/kubenest/tasks/manifests_components.go +++ b/pkg/kubenest/tasks/manifests_components.go @@ -2,7 +2,6 @@ package tasks import ( "context" - "fmt" "os" "path/filepath" @@ -61,7 +60,7 @@ func applyComponentsManifests(r workflow.RunData) error { } keepalivedReplicas := constants.VipKeepAlivedReplicas secret, err := data.RemoteClient().CoreV1().Secrets(data.GetNamespace()).Get(context.TODO(), - fmt.Sprintf("%s-%s", data.GetName(), constants.AdminConfig), metav1.GetOptions{}) + util.GetAdminConfigSecretName(data.GetName()), metav1.GetOptions{}) if err != nil { return errors.Wrap(err, "Get virtualcluster kubeconfig secret error") } diff --git a/pkg/kubenest/tasks/proxy.go b/pkg/kubenest/tasks/proxy.go index 50dd9e1a4..089bebeeb 100644 --- a/pkg/kubenest/tasks/proxy.go +++ b/pkg/kubenest/tasks/proxy.go @@ -13,6 +13,7 @@ import ( "github.com/kosmos.io/kosmos/pkg/kubenest/constants" "github.com/kosmos.io/kosmos/pkg/kubenest/controlplane" + "github.com/kosmos.io/kosmos/pkg/kubenest/util" apiclient "github.com/kosmos.io/kosmos/pkg/kubenest/util/api-client" "github.com/kosmos.io/kosmos/pkg/kubenest/workflow" ) @@ -59,7 +60,7 @@ func runVirtualClusterProxy(r workflow.RunData) error { // Get the kubeconfig of virtual cluster and put it into the cm of kube-proxy secret, err := data.RemoteClient().CoreV1().Secrets(data.GetNamespace()).Get(context.TODO(), - fmt.Sprintf("%s-%s", data.GetName(), constants.AdminConfig), metav1.GetOptions{}) + util.GetAdminConfigSecretName(data.GetName()), metav1.GetOptions{}) if err != nil { return errors.Wrap(err, "Get virtualcluster kubeconfig secret error") } @@ -126,7 +127,7 @@ func uninstallVirtualClusterProxy(r workflow.RunData) error { } secret, err := data.RemoteClient().CoreV1().Secrets(data.GetNamespace()).Get(context.TODO(), - fmt.Sprintf("%s-%s", data.GetName(), constants.AdminConfig), metav1.GetOptions{}) + util.GetAdminConfigSecretName(data.GetName()), metav1.GetOptions{}) if err != nil { return errors.Wrap(err, "Get virtualcluster kubeconfig secret error") } diff --git a/pkg/kubenest/tasks/service.go b/pkg/kubenest/tasks/service.go index 84e4116d4..30eaeefaa 100644 --- a/pkg/kubenest/tasks/service.go +++ b/pkg/kubenest/tasks/service.go @@ -45,6 +45,8 @@ func runVirtualClusterService(r workflow.RunData) error { data.GetName(), data.GetNamespace(), data.HostPortMap(), + data.KubeNestOpt(), + data.VirtualCluster(), ) if err != nil { return fmt.Errorf("failed to install virtual cluster service , err: %w", err) diff --git a/pkg/kubenest/tasks/upload.go b/pkg/kubenest/tasks/upload.go index aa8c33a97..09171e44f 100644 --- a/pkg/kubenest/tasks/upload.go +++ b/pkg/kubenest/tasks/upload.go @@ -100,7 +100,7 @@ func runUploadVirtualClusterCert(r workflow.RunData) error { err := createOrUpdateSecret(data.RemoteClient(), &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-%s", data.GetName(), "cert"), + Name: util.GetCertName(data.GetName()), Namespace: data.GetNamespace(), Labels: VirtualClusterControllerLabel, }, @@ -127,7 +127,7 @@ func runUploadEtcdCert(r workflow.RunData) error { err := createOrUpdateSecret(data.RemoteClient(), &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Namespace: data.GetNamespace(), - Name: fmt.Sprintf("%s-%s", data.GetName(), "etcd-cert"), + Name: util.GetEtcdCertName(data.GetName()), Labels: VirtualClusterControllerLabel, }, @@ -172,7 +172,7 @@ func runUploadAdminKubeconfig(r workflow.RunData) error { } var controlplaneIpEndpoint, clusterIPEndpoint string - service, err := data.RemoteClient().CoreV1().Services(data.GetNamespace()).Get(context.TODO(), fmt.Sprintf("%s-%s", data.GetName(), "apiserver"), metav1.GetOptions{}) + service, err := data.RemoteClient().CoreV1().Services(data.GetNamespace()).Get(context.TODO(), util.GetApiServerName(data.GetName()), metav1.GetOptions{}) if err != nil { return err } @@ -204,7 +204,7 @@ func runUploadAdminKubeconfig(r workflow.RunData) error { err = createOrUpdateSecret(data.RemoteClient(), &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Namespace: data.GetNamespace(), - Name: fmt.Sprintf("%s-%s", data.GetName(), "admin-config"), + Name: util.GetAdminConfigSecretName(data.GetName()), Labels: VirtualClusterControllerLabel, }, Data: map[string][]byte{"kubeconfig": controlplaneIpConfigBytes}, @@ -216,7 +216,7 @@ func runUploadAdminKubeconfig(r workflow.RunData) error { err = createOrUpdateSecret(data.RemoteClient(), &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Namespace: data.GetNamespace(), - Name: fmt.Sprintf("%s-%s", data.GetName(), "admin-config-clusterip"), + Name: util.GetAdminConfigClusterIPSecretName(data.GetName()), Labels: VirtualClusterControllerLabel, }, Data: map[string][]byte{"kubeconfig": clusterIPConfigBytes}, @@ -301,10 +301,10 @@ func deleteSecrets(r workflow.RunData) error { } secrets := []string{ - fmt.Sprintf("%s-%s", data.GetName(), "cert"), - fmt.Sprintf("%s-%s", data.GetName(), "etcd-cert"), - fmt.Sprintf("%s-%s", data.GetName(), "admin-config"), - fmt.Sprintf("%s-%s", data.GetName(), "admin-config-clusterip"), + util.GetCertName(data.GetName()), + util.GetEtcdCertName(data.GetName()), + util.GetAdminConfigSecretName(data.GetName()), + util.GetAdminConfigClusterIPSecretName(data.GetName()), } for _, secret := range secrets { err := data.RemoteClient().CoreV1().Secrets(data.GetNamespace()).Delete(context.TODO(), secret, metav1.DeleteOptions{}) diff --git a/pkg/kubenest/util/cert/certs.go b/pkg/kubenest/util/cert/certs.go index 5718662f4..fe90409e6 100644 --- a/pkg/kubenest/util/cert/certs.go +++ b/pkg/kubenest/util/cert/certs.go @@ -143,8 +143,8 @@ func VirtualClusterFrontProxyClient() *CertConfig { } func etcdServerAltNamesMutator(cfg *AltNamesMutatorConfig) (*certutil.AltNames, error) { - etcdClientServiceDNS := fmt.Sprintf("%s.%s.svc.cluster.local", fmt.Sprintf("%s-%s", cfg.Name, "etcd-client"), cfg.Namespace) - etcdPeerServiceDNS := fmt.Sprintf("*.%s.%s.svc.cluster.local", fmt.Sprintf("%s-%s", cfg.Name, "etcd"), cfg.Namespace) + etcdClientServiceDNS := fmt.Sprintf("%s.%s.svc.cluster.local", util.GetEtcdClientServerName(cfg.Name), cfg.Namespace) + etcdPeerServiceDNS := fmt.Sprintf("*.%s.%s.svc.cluster.local", util.GetEtcdServerName(cfg.Name), cfg.Namespace) altNames := &certutil.AltNames{ DNSNames: []string{"localhost", etcdClientServiceDNS, etcdPeerServiceDNS}, diff --git a/pkg/kubenest/util/helper.go b/pkg/kubenest/util/helper.go index 52a2a7a00..26d91fa6c 100644 --- a/pkg/kubenest/util/helper.go +++ b/pkg/kubenest/util/helper.go @@ -24,6 +24,23 @@ import ( "sigs.k8s.io/yaml" ) +func CreateOrUpdateService(client clientset.Interface, svc *v1.Service) error { + _, err := client.CoreV1().Services(svc.GetNamespace()).Update(context.TODO(), svc, metav1.UpdateOptions{}) + if err != nil { + if !apierrors.IsNotFound(err) { + return err + } + + _, err := client.CoreV1().Services(svc.GetNamespace()).Create(context.TODO(), svc, metav1.CreateOptions{}) + if err != nil { + return err + } + } + + klog.V(5).InfoS("Successfully created or updated svc", "svc", svc.GetName()) + return nil +} + func CreateOrUpdateDeployment(client clientset.Interface, deployment *appsv1.Deployment) error { _, err := client.AppsV1().Deployments(deployment.GetNamespace()).Create(context.TODO(), deployment, metav1.CreateOptions{}) if err != nil { diff --git a/pkg/kubenest/util/helper_test.go b/pkg/kubenest/util/helper_test.go new file mode 100644 index 000000000..c533e7908 --- /dev/null +++ b/pkg/kubenest/util/helper_test.go @@ -0,0 +1,233 @@ +package util + +import ( + "context" + "fmt" + "testing" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/kubernetes" + clientset "k8s.io/client-go/kubernetes" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +// createKubeConfig creates a kubeConfig from the given config and masterOverride. +func createKubeConfig() (*restclient.Config, error) { + kubeConfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + &clientcmd.ClientConfigLoadingRules{ExplicitPath: "../../../ignore_dir/local.conf"}, + &clientcmd.ConfigOverrides{}).ClientConfig() + if err != nil { + return nil, err + } + + kubeConfig.DisableCompression = true + kubeConfig.QPS = 40.0 + kubeConfig.Burst = 60 + + return kubeConfig, nil +} + +func prepare() (clientset.Interface, error) { + // Prepare kube config. + kubeConfig, err := createKubeConfig() + if err != nil { + return nil, err + } + + hostKubeClient, err := kubernetes.NewForConfig(kubeConfig) + if err != nil { + return nil, fmt.Errorf("could not create clientset: %v", err) + } + + return hostKubeClient, nil +} + +func TestCreateOrUpdate(t *testing.T) { + client, err := prepare() + if err != nil { + t.Fatalf("failed to prepare client: %v", err) + } + + tests := []struct { + name string + input *v1.Service + want bool + }{ + { + name: "basic", + input: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-nodeport-service", + Namespace: "default", + }, + Spec: v1.ServiceSpec{ + Type: v1.ServiceTypeNodePort, + Selector: map[string]string{ + "app": "my-app", + }, + Ports: []v1.ServicePort{ + { + Port: 30007, // 服务的端口 + Protocol: v1.ProtocolTCP, + TargetPort: intstr.IntOrString{ + IntVal: 8080, // Pod 中的目标端口 + }, + NodePort: 30007, // 固定的 NodePort 端口 + }, + }, + }, + }, + want: true, + }, + { + name: "same port", + input: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-nodeport-service", + Namespace: "default", + }, + Spec: v1.ServiceSpec{ + Type: v1.ServiceTypeNodePort, + Selector: map[string]string{ + "app": "my-app", + }, + Ports: []v1.ServicePort{ + { + Port: 30007, // 服务的端口 + Protocol: v1.ProtocolTCP, + TargetPort: intstr.IntOrString{ + IntVal: 8080, // Pod 中的目标端口 + }, + NodePort: 30007, // 固定的 NodePort 端口 + }, + }, + }, + }, + want: true, + }, + { + name: "different port", + input: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-nodeport-service", + Namespace: "default", + }, + Spec: v1.ServiceSpec{ + Type: v1.ServiceTypeNodePort, + Selector: map[string]string{ + "app": "my-app", + }, + Ports: []v1.ServicePort{ + { + Port: 30077, // 服务的端口 + Protocol: v1.ProtocolTCP, + TargetPort: intstr.IntOrString{ + IntVal: 8080, // Pod 中的目标端口 + }, + NodePort: 30077, // 固定的 NodePort 端口 + }, + }, + }, + }, + want: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := CreateOrUpdateService(client, tt.input) + if err != nil { + t.Fatalf("CreateOrUpdateService() error = %v", err) + } + }) + } +} + +func TestCreateSvc(t *testing.T) { + client, err := prepare() + if err != nil { + t.Fatalf("failed to prepare client: %v", err) + } + + tests := []struct { + name string + input *v1.Service + update *v1.Service + want bool + }{ + { + name: "ipv4 only", + input: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-nodeport-service", + Namespace: "default", + }, + Spec: v1.ServiceSpec{ + Type: v1.ServiceTypeNodePort, + Selector: map[string]string{ + "app": "my-app", + }, + Ports: []v1.ServicePort{ + { + Port: 30007, // 服务的端口 + Protocol: v1.ProtocolTCP, + TargetPort: intstr.IntOrString{ + IntVal: 8080, // Pod 中的目标端口 + }, + // NodePort: 30007, // 固定的 NodePort 端口 + }, + }, + }, + }, + update: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-nodeport-service", + Namespace: "default", + }, + Spec: v1.ServiceSpec{ + Type: v1.ServiceTypeNodePort, + Selector: map[string]string{ + "app": "my-app", + }, + Ports: []v1.ServicePort{ + { + Port: 30007, // 服务的端口 + Protocol: v1.ProtocolTCP, + TargetPort: intstr.IntOrString{ + IntVal: 8080, // Pod 中的目标端口 + }, + // NodePort: 30007, // 固定的 NodePort 端口 + }, + }, + }, + }, + want: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := CreateOrUpdateService(client, tt.input) + if err != nil { + t.Fatalf("CreateOrUpdateService() error = %v", err) + } + svc, err := client.CoreV1().Services(tt.input.Namespace).Get(context.TODO(), tt.input.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("CreateOrUpdateService() error = %v", err) + } + nodePort := svc.Spec.Ports[0].NodePort + tt.update.Spec.Ports[0].NodePort = nodePort + tt.update.Spec.Ports[0].Port = nodePort + tt.update.Spec.Ports[0].TargetPort = intstr.IntOrString{ + IntVal: nodePort, + } + err = CreateOrUpdateService(client, tt.update) + if err != nil { + t.Fatalf("CreateOrUpdateService() error = %v", err) + } + }) + } +} diff --git a/pkg/kubenest/util/name.go b/pkg/kubenest/util/name.go new file mode 100644 index 000000000..86cdb7e6a --- /dev/null +++ b/pkg/kubenest/util/name.go @@ -0,0 +1,43 @@ +package util + +import ( + "fmt" + + "github.com/kosmos.io/kosmos/pkg/kubenest/constants" +) + +func GetApiServerName(name string) string { + return fmt.Sprintf("%s-%s", name, "apiserver") +} + +func GetEtcdClientServerName(name string) string { + return fmt.Sprintf("%s-%s", name, "etcd-client") +} + +func GetKonnectivityServerName(name string) string { + return fmt.Sprintf("%s-%s", name, "konnectivity-server") +} + +func GetKonnectivityApiServerName(name string) string { + return fmt.Sprintf("%s-%s-konnectivity", name, "apiserver") +} + +func GetEtcdServerName(name string) string { + return fmt.Sprintf("%s-%s", name, "etcd") +} + +func GetCertName(name string) string { + return fmt.Sprintf("%s-%s", name, "cert") +} + +func GetEtcdCertName(name string) string { + return fmt.Sprintf("%s-%s", name, "etcd-cert") +} + +func GetAdminConfigSecretName(name string) string { + return fmt.Sprintf("%s-%s", name, constants.AdminConfig) +} + +func GetAdminConfigClusterIPSecretName(name string) string { + return fmt.Sprintf("%s-%s", name, "admin-config-clusterip") +}