From 79e18b6904a6a4d157133133373fd8f6d8924c8c Mon Sep 17 00:00:00 2001 From: Jian Qiu Date: Mon, 5 Jun 2023 14:13:03 +0800 Subject: [PATCH] Build common options for agent Signed-off-by: Jian Qiu --- .github/workflows/e2e.yml | 12 +- Makefile | 2 +- ...nager-registration-webhook-deployment.yaml | 1 - .../klusterlet-registration-deployment.yaml | 2 +- pkg/cmd/hub/operator.go | 2 +- pkg/cmd/spoke/operator.go | 2 +- pkg/common/options/options.go | 64 ++++++ .../certrotation/cabundle.go | 0 .../certrotation/cabundle_test.go | 0 .../certrotation/signer.go | 0 .../certrotation/signer_test.go | 0 .../certrotation/target.go | 0 .../certrotation/target_test.go | 0 .../helpers/error.go | 0 .../helpers/helpers.go | 0 .../helpers/helpers_test.go | 0 .../helpers/queuekey.go | 0 .../helpers/queuekey_test.go | 0 .../helpers/sa_syncer.go | 0 .../helpers/sa_syncer_test.go | 0 .../helpers/testing/assertion.go | 2 +- .../certrotation_controller.go | 4 +- .../certrotation_controller_test.go | 2 +- .../clustermanager_controller.go | 2 +- .../clustermanager_controller_test.go | 2 +- .../clustermanager_crd_reconcile.go | 4 +- .../clustermanager_hub_reconcile.go | 2 +- .../clustermanager_runtime_reconcile.go | 2 +- .../clustermanager_webhook_reconcile.go | 2 +- .../crd_status_controller.go | 4 +- .../crd_status_controller_test.go | 2 +- .../migration_controller.go | 2 +- .../migration_controller_test.go | 0 .../clustermanager_status_controller.go | 2 +- .../clustermanager_status_controller_test.go | 2 +- .../operators/clustermanager/options.go | 12 +- .../operators/crdmanager/manager.go | 0 .../operators/crdmanager/manager_test.go | 0 .../addonsecretcontroller/controller.go | 2 +- .../addonsecretcontroller/controller_test.go | 0 .../bootstrapcontroller.go | 2 +- .../bootstrapcontroller_test.go | 0 .../klusterletcontroller/client_builder.go | 0 .../klusterlet_cleanup_controller.go | 2 +- .../klusterlet_cleanup_controller_test.go | 2 +- .../klusterlet_controller.go | 2 +- .../klusterlet_controller_test.go | 4 +- .../klusterlet_crd_reconcile.go | 4 +- .../klusterlet_managed_reconcile.go | 2 +- .../klusterlet_management_recocile.go | 2 +- .../klusterlet_runtime_reconcile.go | 2 +- .../klusterlet_ssar_controller.go | 2 +- .../klusterlet_ssar_controller_test.go | 4 +- .../klusterlet_status_controller.go | 2 +- .../klusterlet_status_controller_test.go | 2 +- .../operators/klusterlet/options.go | 10 +- pkg/registration/spoke/spokeagent.go | 82 +++----- pkg/registration/spoke/spokeagent_test.go | 76 ++++--- pkg/work/spoke/spokeagent.go | 80 +++----- test/e2e/common.go | 2 +- .../operator/clustermanager_hosted_test.go | 2 +- .../operator/clustermanager_test.go | 6 +- test/integration/operator/doc.go | 2 +- .../operator/integration_suite_test.go | 6 +- .../operator/klusterlet_hosted_test.go | 2 +- test/integration/operator/klusterlet_test.go | 4 +- .../registration/addon_lease_test.go | 5 +- .../registration/addon_registration_test.go | 5 +- .../registration/certificate_rotation_test.go | 5 +- .../registration/disaster_recovery_test.go | 4 +- .../registration/managedcluster_lease_test.go | 16 +- .../registration/spokeagent_recovery_test.go | 7 +- .../registration/spokeagent_restart_test.go | 14 +- .../spokecluster_autoapproval_test.go | 4 +- .../registration/spokecluster_claim_test.go | 4 +- .../registration/spokecluster_joining_test.go | 4 +- .../registration/spokecluster_status_test.go | 4 +- .../registration/taint_add_test.go | 4 +- test/integration/work/deleteoption_test.go | 96 ++++----- test/integration/work/executor_test.go | 188 +++++++++--------- test/integration/work/statusfeedback_test.go | 68 ++++--- .../work/unmanaged_appliedwork_test.go | 28 +-- test/integration/work/updatestrategy_test.go | 86 ++++---- test/integration/work/work_test.go | 94 ++++----- 84 files changed, 576 insertions(+), 497 deletions(-) create mode 100644 pkg/common/options/options.go rename pkg/{registration-operator => operator}/certrotation/cabundle.go (100%) rename pkg/{registration-operator => operator}/certrotation/cabundle_test.go (100%) rename pkg/{registration-operator => operator}/certrotation/signer.go (100%) rename pkg/{registration-operator => operator}/certrotation/signer_test.go (100%) rename pkg/{registration-operator => operator}/certrotation/target.go (100%) rename pkg/{registration-operator => operator}/certrotation/target_test.go (100%) rename pkg/{registration-operator => operator}/helpers/error.go (100%) rename pkg/{registration-operator => operator}/helpers/helpers.go (100%) rename pkg/{registration-operator => operator}/helpers/helpers_test.go (100%) rename pkg/{registration-operator => operator}/helpers/queuekey.go (100%) rename pkg/{registration-operator => operator}/helpers/queuekey_test.go (100%) rename pkg/{registration-operator => operator}/helpers/sa_syncer.go (100%) rename pkg/{registration-operator => operator}/helpers/sa_syncer_test.go (100%) rename pkg/{registration-operator => operator}/helpers/testing/assertion.go (97%) rename pkg/{registration-operator => operator}/operators/clustermanager/controllers/certrotationcontroller/certrotation_controller.go (98%) rename pkg/{registration-operator => operator}/operators/clustermanager/controllers/certrotationcontroller/certrotation_controller_test.go (99%) rename pkg/{registration-operator => operator}/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller.go (99%) rename pkg/{registration-operator => operator}/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller_test.go (99%) rename pkg/{registration-operator => operator}/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_crd_reconcile.go (97%) rename pkg/{registration-operator => operator}/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_hub_reconcile.go (98%) rename pkg/{registration-operator => operator}/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_runtime_reconcile.go (99%) rename pkg/{registration-operator => operator}/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_webhook_reconcile.go (98%) rename pkg/{registration-operator => operator}/operators/clustermanager/controllers/crdstatuccontroller/crd_status_controller.go (96%) rename pkg/{registration-operator => operator}/operators/clustermanager/controllers/crdstatuccontroller/crd_status_controller_test.go (96%) rename pkg/{registration-operator => operator}/operators/clustermanager/controllers/migrationcontroller/migration_controller.go (99%) rename pkg/{registration-operator => operator}/operators/clustermanager/controllers/migrationcontroller/migration_controller_test.go (100%) rename pkg/{registration-operator => operator}/operators/clustermanager/controllers/statuscontroller/clustermanager_status_controller.go (98%) rename pkg/{registration-operator => operator}/operators/clustermanager/controllers/statuscontroller/clustermanager_status_controller_test.go (98%) rename pkg/{registration-operator => operator}/operators/clustermanager/options.go (87%) rename pkg/{registration-operator => operator}/operators/crdmanager/manager.go (100%) rename pkg/{registration-operator => operator}/operators/crdmanager/manager_test.go (100%) rename pkg/{registration-operator => operator}/operators/klusterlet/controllers/addonsecretcontroller/controller.go (97%) rename pkg/{registration-operator => operator}/operators/klusterlet/controllers/addonsecretcontroller/controller_test.go (100%) rename pkg/{registration-operator => operator}/operators/klusterlet/controllers/bootstrapcontroller/bootstrapcontroller.go (99%) rename pkg/{registration-operator => operator}/operators/klusterlet/controllers/bootstrapcontroller/bootstrapcontroller_test.go (100%) rename pkg/{registration-operator => operator}/operators/klusterlet/controllers/klusterletcontroller/client_builder.go (100%) rename pkg/{registration-operator => operator}/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller.go (99%) rename pkg/{registration-operator => operator}/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller_test.go (99%) rename pkg/{registration-operator => operator}/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller.go (99%) rename pkg/{registration-operator => operator}/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller_test.go (99%) rename pkg/{registration-operator => operator}/operators/klusterlet/controllers/klusterletcontroller/klusterlet_crd_reconcile.go (97%) rename pkg/{registration-operator => operator}/operators/klusterlet/controllers/klusterletcontroller/klusterlet_managed_reconcile.go (99%) rename pkg/{registration-operator => operator}/operators/klusterlet/controllers/klusterletcontroller/klusterlet_management_recocile.go (98%) rename pkg/{registration-operator => operator}/operators/klusterlet/controllers/klusterletcontroller/klusterlet_runtime_reconcile.go (99%) rename pkg/{registration-operator => operator}/operators/klusterlet/controllers/ssarcontroller/klusterlet_ssar_controller.go (99%) rename pkg/{registration-operator => operator}/operators/klusterlet/controllers/ssarcontroller/klusterlet_ssar_controller_test.go (98%) rename pkg/{registration-operator => operator}/operators/klusterlet/controllers/statuscontroller/klusterlet_status_controller.go (98%) rename pkg/{registration-operator => operator}/operators/klusterlet/controllers/statuscontroller/klusterlet_status_controller_test.go (98%) rename pkg/{registration-operator => operator}/operators/klusterlet/options.go (88%) diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index d771c4794..6103dca03 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -33,15 +33,15 @@ jobs: - name: install imagebuilder run: go install github.com/openshift/imagebuilder/cmd/imagebuilder@v1.2.3 - name: Build images - run: make images + run: IMAGE_TAG=e2e make images - name: Load images run: | - kind load docker-image --name=kind quay.io/open-cluster-management/registration-operator:latest - kind load docker-image --name=kind quay.io/open-cluster-management/registration:latest - kind load docker-image --name=kind quay.io/open-cluster-management/work:latest - kind load docker-image --name=kind quay.io/open-cluster-management/placement:latest + kind load docker-image --name=kind quay.io/open-cluster-management/registration-operator:e2e + kind load docker-image --name=kind quay.io/open-cluster-management/registration:e2e + kind load docker-image --name=kind quay.io/open-cluster-management/work:e2e + kind load docker-image --name=kind quay.io/open-cluster-management/placement:e2e - name: Test E2E run: | - make test-e2e + IMAGE_TAG=e2e make test-e2e env: KUBECONFIG: /home/runner/.kube/config diff --git a/Makefile b/Makefile index a918f1b59..b7e8faa5a 100644 --- a/Makefile +++ b/Makefile @@ -38,7 +38,7 @@ REGISTRATION_IMAGE ?= $(IMAGE_REGISTRY)/registration:$(IMAGE_TAG) # PLACEMENT_IMAGE can be set in the env to override calculated value PLACEMENT_IMAGE ?= $(IMAGE_REGISTRY)/placement:$(IMAGE_TAG) # ADDON_MANAGER_IMAGE can be set in the env to override calculated value -ADDON_MANAGER_IMAGE ?= $(IMAGE_REGISTRY)/addon-manager:$(IMAGE_TAG) +ADDON_MANAGER_IMAGE ?= $(IMAGE_REGISTRY)/addon-manager:latest $(call build-image,registration,$(REGISTRATION_IMAGE),./build/Dockerfile.registration,.) $(call build-image,work,$(WORK_IMAGE),./build/Dockerfile.work,.) diff --git a/manifests/cluster-manager/management/cluster-manager-registration-webhook-deployment.yaml b/manifests/cluster-manager/management/cluster-manager-registration-webhook-deployment.yaml index 900b28e2f..1d289320c 100644 --- a/manifests/cluster-manager/management/cluster-manager-registration-webhook-deployment.yaml +++ b/manifests/cluster-manager/management/cluster-manager-registration-webhook-deployment.yaml @@ -56,7 +56,6 @@ spec: {{ if .HostedMode }} - "--kubeconfig=/var/run/secrets/hub/kubeconfig" {{ end }} - imagePullPolicy: Always resources: requests: cpu: 2m diff --git a/manifests/klusterlet/management/klusterlet-registration-deployment.yaml b/manifests/klusterlet/management/klusterlet-registration-deployment.yaml index 30bba6e99..6ec9e01ae 100644 --- a/manifests/klusterlet/management/klusterlet-registration-deployment.yaml +++ b/manifests/klusterlet/management/klusterlet-registration-deployment.yaml @@ -51,7 +51,7 @@ spec: args: - "/registration" - "agent" - - "--cluster-name={{ .ClusterName }}" + - "--spoke-cluster-name={{ .ClusterName }}" - "--bootstrap-kubeconfig=/spoke/bootstrap/kubeconfig" {{ if gt (len .RegistrationFeatureGates) 0 }} {{range .RegistrationFeatureGates}} diff --git a/pkg/cmd/hub/operator.go b/pkg/cmd/hub/operator.go index c0886d395..fd2862040 100644 --- a/pkg/cmd/hub/operator.go +++ b/pkg/cmd/hub/operator.go @@ -5,7 +5,7 @@ import ( "github.com/openshift/library-go/pkg/controller/controllercmd" - "open-cluster-management.io/ocm/pkg/registration-operator/operators/clustermanager" + "open-cluster-management.io/ocm/pkg/operator/operators/clustermanager" "open-cluster-management.io/ocm/pkg/version" ) diff --git a/pkg/cmd/spoke/operator.go b/pkg/cmd/spoke/operator.go index be4499a76..d52d73529 100644 --- a/pkg/cmd/spoke/operator.go +++ b/pkg/cmd/spoke/operator.go @@ -5,7 +5,7 @@ import ( "github.com/openshift/library-go/pkg/controller/controllercmd" - "open-cluster-management.io/ocm/pkg/registration-operator/operators/klusterlet" + "open-cluster-management.io/ocm/pkg/operator/operators/klusterlet" "open-cluster-management.io/ocm/pkg/version" ) diff --git a/pkg/common/options/options.go b/pkg/common/options/options.go new file mode 100644 index 000000000..b282d6c0b --- /dev/null +++ b/pkg/common/options/options.go @@ -0,0 +1,64 @@ +package options + +import ( + "fmt" + "github.com/openshift/library-go/pkg/controller/controllercmd" + "github.com/spf13/pflag" + apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "strings" +) + +// AgentOptions is the common agent options +type AgentOptions struct { + SpokeKubeconfigFile string + SpokeClusterName string + Burst int + QPS float32 +} + +// NewWorkloadAgentOptions returns the flags with default value set +func NewAgentOptions() *AgentOptions { + return &AgentOptions{ + QPS: 50, + Burst: 100, + } +} + +func (o *AgentOptions) AddFlags(flags *pflag.FlagSet) { + flags.StringVar(&o.SpokeKubeconfigFile, "spoke-kubeconfig", o.SpokeKubeconfigFile, + "Location of kubeconfig file to connect to spoke cluster. If this is not set, will use '--kubeconfig' to build client to connect to the managed cluster.") + flags.StringVar(&o.SpokeClusterName, "spoke-cluster-name", o.SpokeClusterName, "Name of the spoke cluster.") + flags.MarkDeprecated("cluster-name", "use spoke-cluster-name flag") + flags.StringVar(&o.SpokeClusterName, "cluster-name", o.SpokeClusterName, + "Name of the spoke cluster.") + flags.Float32Var(&o.QPS, "spoke-kube-api-qps", o.QPS, "QPS to use while talking with apiserver on spoke cluster.") + flags.IntVar(&o.Burst, "spoke-kube-api-burst", o.Burst, "Burst to use while talking with apiserver on spoke cluster.") +} + +// spokeKubeConfig builds kubeconfig for the spoke/managed cluster +func (o *AgentOptions) SpokeKubeConfig(controllerContext *controllercmd.ControllerContext) (*rest.Config, error) { + if o.SpokeKubeconfigFile == "" { + return controllerContext.KubeConfig, nil + } + + spokeRestConfig, err := clientcmd.BuildConfigFromFlags("" /* leave masterurl as empty */, o.SpokeKubeconfigFile) + if err != nil { + return nil, fmt.Errorf("unable to load spoke kubeconfig from file %q: %w", o.SpokeKubeconfigFile, err) + } + spokeRestConfig.QPS = o.QPS + spokeRestConfig.Burst = o.Burst + return spokeRestConfig, nil +} + +func (o *AgentOptions) Validate() error { + if o.SpokeClusterName == "" { + return fmt.Errorf("cluster name is empty") + } + if errMsgs := apimachineryvalidation.ValidateNamespaceName(o.SpokeClusterName, false); len(errMsgs) > 0 { + return fmt.Errorf("metadata.name format is not correct: %s", strings.Join(errMsgs, ",")) + } + + return nil +} diff --git a/pkg/registration-operator/certrotation/cabundle.go b/pkg/operator/certrotation/cabundle.go similarity index 100% rename from pkg/registration-operator/certrotation/cabundle.go rename to pkg/operator/certrotation/cabundle.go diff --git a/pkg/registration-operator/certrotation/cabundle_test.go b/pkg/operator/certrotation/cabundle_test.go similarity index 100% rename from pkg/registration-operator/certrotation/cabundle_test.go rename to pkg/operator/certrotation/cabundle_test.go diff --git a/pkg/registration-operator/certrotation/signer.go b/pkg/operator/certrotation/signer.go similarity index 100% rename from pkg/registration-operator/certrotation/signer.go rename to pkg/operator/certrotation/signer.go diff --git a/pkg/registration-operator/certrotation/signer_test.go b/pkg/operator/certrotation/signer_test.go similarity index 100% rename from pkg/registration-operator/certrotation/signer_test.go rename to pkg/operator/certrotation/signer_test.go diff --git a/pkg/registration-operator/certrotation/target.go b/pkg/operator/certrotation/target.go similarity index 100% rename from pkg/registration-operator/certrotation/target.go rename to pkg/operator/certrotation/target.go diff --git a/pkg/registration-operator/certrotation/target_test.go b/pkg/operator/certrotation/target_test.go similarity index 100% rename from pkg/registration-operator/certrotation/target_test.go rename to pkg/operator/certrotation/target_test.go diff --git a/pkg/registration-operator/helpers/error.go b/pkg/operator/helpers/error.go similarity index 100% rename from pkg/registration-operator/helpers/error.go rename to pkg/operator/helpers/error.go diff --git a/pkg/registration-operator/helpers/helpers.go b/pkg/operator/helpers/helpers.go similarity index 100% rename from pkg/registration-operator/helpers/helpers.go rename to pkg/operator/helpers/helpers.go diff --git a/pkg/registration-operator/helpers/helpers_test.go b/pkg/operator/helpers/helpers_test.go similarity index 100% rename from pkg/registration-operator/helpers/helpers_test.go rename to pkg/operator/helpers/helpers_test.go diff --git a/pkg/registration-operator/helpers/queuekey.go b/pkg/operator/helpers/queuekey.go similarity index 100% rename from pkg/registration-operator/helpers/queuekey.go rename to pkg/operator/helpers/queuekey.go diff --git a/pkg/registration-operator/helpers/queuekey_test.go b/pkg/operator/helpers/queuekey_test.go similarity index 100% rename from pkg/registration-operator/helpers/queuekey_test.go rename to pkg/operator/helpers/queuekey_test.go diff --git a/pkg/registration-operator/helpers/sa_syncer.go b/pkg/operator/helpers/sa_syncer.go similarity index 100% rename from pkg/registration-operator/helpers/sa_syncer.go rename to pkg/operator/helpers/sa_syncer.go diff --git a/pkg/registration-operator/helpers/sa_syncer_test.go b/pkg/operator/helpers/sa_syncer_test.go similarity index 100% rename from pkg/registration-operator/helpers/sa_syncer_test.go rename to pkg/operator/helpers/sa_syncer_test.go diff --git a/pkg/registration-operator/helpers/testing/assertion.go b/pkg/operator/helpers/testing/assertion.go similarity index 97% rename from pkg/registration-operator/helpers/testing/assertion.go rename to pkg/operator/helpers/testing/assertion.go index 9d8f12488..4fce8a1b4 100644 --- a/pkg/registration-operator/helpers/testing/assertion.go +++ b/pkg/operator/helpers/testing/assertion.go @@ -9,7 +9,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" opratorapiv1 "open-cluster-management.io/api/operator/v1" - "open-cluster-management.io/ocm/pkg/registration-operator/helpers" + "open-cluster-management.io/ocm/pkg/operator/helpers" ) func NamedCondition(name, reason string, status metav1.ConditionStatus) metav1.Condition { diff --git a/pkg/registration-operator/operators/clustermanager/controllers/certrotationcontroller/certrotation_controller.go b/pkg/operator/operators/clustermanager/controllers/certrotationcontroller/certrotation_controller.go similarity index 98% rename from pkg/registration-operator/operators/clustermanager/controllers/certrotationcontroller/certrotation_controller.go rename to pkg/operator/operators/clustermanager/controllers/certrotationcontroller/certrotation_controller.go index b4a1a2647..22301b97e 100644 --- a/pkg/registration-operator/operators/clustermanager/controllers/certrotationcontroller/certrotation_controller.go +++ b/pkg/operator/operators/clustermanager/controllers/certrotationcontroller/certrotation_controller.go @@ -21,8 +21,8 @@ import ( operatorinformer "open-cluster-management.io/api/client/operator/informers/externalversions/operator/v1" operatorlister "open-cluster-management.io/api/client/operator/listers/operator/v1" operatorv1 "open-cluster-management.io/api/operator/v1" - "open-cluster-management.io/ocm/pkg/registration-operator/certrotation" - "open-cluster-management.io/ocm/pkg/registration-operator/helpers" + "open-cluster-management.io/ocm/pkg/operator/certrotation" + "open-cluster-management.io/ocm/pkg/operator/helpers" ) const ( diff --git a/pkg/registration-operator/operators/clustermanager/controllers/certrotationcontroller/certrotation_controller_test.go b/pkg/operator/operators/clustermanager/controllers/certrotationcontroller/certrotation_controller_test.go similarity index 99% rename from pkg/registration-operator/operators/clustermanager/controllers/certrotationcontroller/certrotation_controller_test.go rename to pkg/operator/operators/clustermanager/controllers/certrotationcontroller/certrotation_controller_test.go index 1bc5f7a22..9ee4c9910 100644 --- a/pkg/registration-operator/operators/clustermanager/controllers/certrotationcontroller/certrotation_controller_test.go +++ b/pkg/operator/operators/clustermanager/controllers/certrotationcontroller/certrotation_controller_test.go @@ -22,7 +22,7 @@ import ( operatorinformers "open-cluster-management.io/api/client/operator/informers/externalversions" operatorapiv1 "open-cluster-management.io/api/operator/v1" testingcommon "open-cluster-management.io/ocm/pkg/common/testing" - "open-cluster-management.io/ocm/pkg/registration-operator/helpers" + "open-cluster-management.io/ocm/pkg/operator/helpers" ) const ( diff --git a/pkg/registration-operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller.go b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller.go similarity index 99% rename from pkg/registration-operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller.go rename to pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller.go index c9c0016e0..23bc3cde1 100644 --- a/pkg/registration-operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller.go +++ b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller.go @@ -33,7 +33,7 @@ import ( ocmfeature "open-cluster-management.io/api/feature" operatorapiv1 "open-cluster-management.io/api/operator/v1" "open-cluster-management.io/ocm/manifests" - "open-cluster-management.io/ocm/pkg/registration-operator/helpers" + "open-cluster-management.io/ocm/pkg/operator/helpers" migrationclient "sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1" ) diff --git a/pkg/registration-operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller_test.go b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller_test.go similarity index 99% rename from pkg/registration-operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller_test.go rename to pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller_test.go index 0e5288587..36cc300d7 100644 --- a/pkg/registration-operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller_test.go +++ b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller_test.go @@ -30,7 +30,7 @@ import ( fakemigrationclient "sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/fake" migrationclient "sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1" - "open-cluster-management.io/ocm/pkg/registration-operator/helpers" + "open-cluster-management.io/ocm/pkg/operator/helpers" ) var ( diff --git a/pkg/registration-operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_crd_reconcile.go b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_crd_reconcile.go similarity index 97% rename from pkg/registration-operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_crd_reconcile.go rename to pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_crd_reconcile.go index fd6c07754..a0326e152 100644 --- a/pkg/registration-operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_crd_reconcile.go +++ b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_crd_reconcile.go @@ -17,8 +17,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" operatorapiv1 "open-cluster-management.io/api/operator/v1" "open-cluster-management.io/ocm/manifests" - "open-cluster-management.io/ocm/pkg/registration-operator/helpers" - "open-cluster-management.io/ocm/pkg/registration-operator/operators/crdmanager" + "open-cluster-management.io/ocm/pkg/operator/helpers" + "open-cluster-management.io/ocm/pkg/operator/operators/crdmanager" migrationclient "sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1" ) diff --git a/pkg/registration-operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_hub_reconcile.go b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_hub_reconcile.go similarity index 98% rename from pkg/registration-operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_hub_reconcile.go rename to pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_hub_reconcile.go index 25a41a2ea..09eda2cc5 100644 --- a/pkg/registration-operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_hub_reconcile.go +++ b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_hub_reconcile.go @@ -17,7 +17,7 @@ import ( "k8s.io/client-go/kubernetes" operatorapiv1 "open-cluster-management.io/api/operator/v1" "open-cluster-management.io/ocm/manifests" - "open-cluster-management.io/ocm/pkg/registration-operator/helpers" + "open-cluster-management.io/ocm/pkg/operator/helpers" ) var ( diff --git a/pkg/registration-operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_runtime_reconcile.go b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_runtime_reconcile.go similarity index 99% rename from pkg/registration-operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_runtime_reconcile.go rename to pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_runtime_reconcile.go index c9a897ccf..6964b88b8 100644 --- a/pkg/registration-operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_runtime_reconcile.go +++ b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_runtime_reconcile.go @@ -19,7 +19,7 @@ import ( "k8s.io/client-go/rest" operatorapiv1 "open-cluster-management.io/api/operator/v1" "open-cluster-management.io/ocm/manifests" - "open-cluster-management.io/ocm/pkg/registration-operator/helpers" + "open-cluster-management.io/ocm/pkg/operator/helpers" ) var ( diff --git a/pkg/registration-operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_webhook_reconcile.go b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_webhook_reconcile.go similarity index 98% rename from pkg/registration-operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_webhook_reconcile.go rename to pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_webhook_reconcile.go index 90dafe5d8..8199e9d27 100644 --- a/pkg/registration-operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_webhook_reconcile.go +++ b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_webhook_reconcile.go @@ -17,7 +17,7 @@ import ( "k8s.io/client-go/kubernetes" operatorapiv1 "open-cluster-management.io/api/operator/v1" "open-cluster-management.io/ocm/manifests" - "open-cluster-management.io/ocm/pkg/registration-operator/helpers" + "open-cluster-management.io/ocm/pkg/operator/helpers" ) var ( diff --git a/pkg/registration-operator/operators/clustermanager/controllers/crdstatuccontroller/crd_status_controller.go b/pkg/operator/operators/clustermanager/controllers/crdstatuccontroller/crd_status_controller.go similarity index 96% rename from pkg/registration-operator/operators/clustermanager/controllers/crdstatuccontroller/crd_status_controller.go rename to pkg/operator/operators/clustermanager/controllers/crdstatuccontroller/crd_status_controller.go index ac256da38..219120d36 100644 --- a/pkg/registration-operator/operators/clustermanager/controllers/crdstatuccontroller/crd_status_controller.go +++ b/pkg/operator/operators/clustermanager/controllers/crdstatuccontroller/crd_status_controller.go @@ -22,8 +22,8 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/klog/v2" - "open-cluster-management.io/ocm/pkg/registration-operator/helpers" - "open-cluster-management.io/ocm/pkg/registration-operator/operators/clustermanager/controllers/migrationcontroller" + "open-cluster-management.io/ocm/pkg/operator/helpers" + "open-cluster-management.io/ocm/pkg/operator/operators/clustermanager/controllers/migrationcontroller" ) var ( diff --git a/pkg/registration-operator/operators/clustermanager/controllers/crdstatuccontroller/crd_status_controller_test.go b/pkg/operator/operators/clustermanager/controllers/crdstatuccontroller/crd_status_controller_test.go similarity index 96% rename from pkg/registration-operator/operators/clustermanager/controllers/crdstatuccontroller/crd_status_controller_test.go rename to pkg/operator/operators/clustermanager/controllers/crdstatuccontroller/crd_status_controller_test.go index f26d4d7b7..92d7064af 100644 --- a/pkg/registration-operator/operators/clustermanager/controllers/crdstatuccontroller/crd_status_controller_test.go +++ b/pkg/operator/operators/clustermanager/controllers/crdstatuccontroller/crd_status_controller_test.go @@ -19,7 +19,7 @@ import ( operatorinformers "open-cluster-management.io/api/client/operator/informers/externalversions" operatorapiv1 "open-cluster-management.io/api/operator/v1" testingcommon "open-cluster-management.io/ocm/pkg/common/testing" - "open-cluster-management.io/ocm/pkg/registration-operator/operators/clustermanager/controllers/migrationcontroller" + "open-cluster-management.io/ocm/pkg/operator/operators/clustermanager/controllers/migrationcontroller" ) func TestSync(t *testing.T) { diff --git a/pkg/registration-operator/operators/clustermanager/controllers/migrationcontroller/migration_controller.go b/pkg/operator/operators/clustermanager/controllers/migrationcontroller/migration_controller.go similarity index 99% rename from pkg/registration-operator/operators/clustermanager/controllers/migrationcontroller/migration_controller.go rename to pkg/operator/operators/clustermanager/controllers/migrationcontroller/migration_controller.go index 85a2e1f41..a6e1863cc 100644 --- a/pkg/registration-operator/operators/clustermanager/controllers/migrationcontroller/migration_controller.go +++ b/pkg/operator/operators/clustermanager/controllers/migrationcontroller/migration_controller.go @@ -27,7 +27,7 @@ import ( operatorinformer "open-cluster-management.io/api/client/operator/informers/externalversions/operator/v1" operatorlister "open-cluster-management.io/api/client/operator/listers/operator/v1" "open-cluster-management.io/ocm/manifests" - "open-cluster-management.io/ocm/pkg/registration-operator/helpers" + "open-cluster-management.io/ocm/pkg/operator/helpers" migrationv1alpha1 "sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1" migrationv1alpha1client "sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1" ) diff --git a/pkg/registration-operator/operators/clustermanager/controllers/migrationcontroller/migration_controller_test.go b/pkg/operator/operators/clustermanager/controllers/migrationcontroller/migration_controller_test.go similarity index 100% rename from pkg/registration-operator/operators/clustermanager/controllers/migrationcontroller/migration_controller_test.go rename to pkg/operator/operators/clustermanager/controllers/migrationcontroller/migration_controller_test.go diff --git a/pkg/registration-operator/operators/clustermanager/controllers/statuscontroller/clustermanager_status_controller.go b/pkg/operator/operators/clustermanager/controllers/statuscontroller/clustermanager_status_controller.go similarity index 98% rename from pkg/registration-operator/operators/clustermanager/controllers/statuscontroller/clustermanager_status_controller.go rename to pkg/operator/operators/clustermanager/controllers/statuscontroller/clustermanager_status_controller.go index a1922e788..c6ecde79b 100644 --- a/pkg/registration-operator/operators/clustermanager/controllers/statuscontroller/clustermanager_status_controller.go +++ b/pkg/operator/operators/clustermanager/controllers/statuscontroller/clustermanager_status_controller.go @@ -15,7 +15,7 @@ import ( operatorv1client "open-cluster-management.io/api/client/operator/clientset/versioned/typed/operator/v1" operatorinformer "open-cluster-management.io/api/client/operator/informers/externalversions/operator/v1" operatorlister "open-cluster-management.io/api/client/operator/listers/operator/v1" - "open-cluster-management.io/ocm/pkg/registration-operator/helpers" + "open-cluster-management.io/ocm/pkg/operator/helpers" "github.com/openshift/library-go/pkg/controller/factory" "github.com/openshift/library-go/pkg/operator/events" diff --git a/pkg/registration-operator/operators/clustermanager/controllers/statuscontroller/clustermanager_status_controller_test.go b/pkg/operator/operators/clustermanager/controllers/statuscontroller/clustermanager_status_controller_test.go similarity index 98% rename from pkg/registration-operator/operators/clustermanager/controllers/statuscontroller/clustermanager_status_controller_test.go rename to pkg/operator/operators/clustermanager/controllers/statuscontroller/clustermanager_status_controller_test.go index d44cfd16e..f5d8f1d7d 100644 --- a/pkg/registration-operator/operators/clustermanager/controllers/statuscontroller/clustermanager_status_controller_test.go +++ b/pkg/operator/operators/clustermanager/controllers/statuscontroller/clustermanager_status_controller_test.go @@ -17,7 +17,7 @@ import ( operatorinformers "open-cluster-management.io/api/client/operator/informers/externalversions" operatorapiv1 "open-cluster-management.io/api/operator/v1" testingcommon "open-cluster-management.io/ocm/pkg/common/testing" - testinghelper "open-cluster-management.io/ocm/pkg/registration-operator/helpers/testing" + testinghelper "open-cluster-management.io/ocm/pkg/operator/helpers/testing" ) const testClusterManagerName = "testclustermanager" diff --git a/pkg/registration-operator/operators/clustermanager/options.go b/pkg/operator/operators/clustermanager/options.go similarity index 87% rename from pkg/registration-operator/operators/clustermanager/options.go rename to pkg/operator/operators/clustermanager/options.go index 04ebb95a2..4e2db0568 100644 --- a/pkg/registration-operator/operators/clustermanager/options.go +++ b/pkg/operator/operators/clustermanager/options.go @@ -7,18 +7,18 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" corev1informers "k8s.io/client-go/informers/core/v1" - "open-cluster-management.io/ocm/pkg/registration-operator/helpers" + "open-cluster-management.io/ocm/pkg/operator/helpers" "github.com/openshift/library-go/pkg/controller/controllercmd" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" operatorclient "open-cluster-management.io/api/client/operator/clientset/versioned" operatorinformer "open-cluster-management.io/api/client/operator/informers/externalversions" - "open-cluster-management.io/ocm/pkg/registration-operator/operators/clustermanager/controllers/certrotationcontroller" - "open-cluster-management.io/ocm/pkg/registration-operator/operators/clustermanager/controllers/clustermanagercontroller" - "open-cluster-management.io/ocm/pkg/registration-operator/operators/clustermanager/controllers/crdstatuccontroller" - "open-cluster-management.io/ocm/pkg/registration-operator/operators/clustermanager/controllers/migrationcontroller" - clustermanagerstatuscontroller "open-cluster-management.io/ocm/pkg/registration-operator/operators/clustermanager/controllers/statuscontroller" + "open-cluster-management.io/ocm/pkg/operator/operators/clustermanager/controllers/certrotationcontroller" + "open-cluster-management.io/ocm/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller" + "open-cluster-management.io/ocm/pkg/operator/operators/clustermanager/controllers/crdstatuccontroller" + "open-cluster-management.io/ocm/pkg/operator/operators/clustermanager/controllers/migrationcontroller" + clustermanagerstatuscontroller "open-cluster-management.io/ocm/pkg/operator/operators/clustermanager/controllers/statuscontroller" ) type Options struct { diff --git a/pkg/registration-operator/operators/crdmanager/manager.go b/pkg/operator/operators/crdmanager/manager.go similarity index 100% rename from pkg/registration-operator/operators/crdmanager/manager.go rename to pkg/operator/operators/crdmanager/manager.go diff --git a/pkg/registration-operator/operators/crdmanager/manager_test.go b/pkg/operator/operators/crdmanager/manager_test.go similarity index 100% rename from pkg/registration-operator/operators/crdmanager/manager_test.go rename to pkg/operator/operators/crdmanager/manager_test.go diff --git a/pkg/registration-operator/operators/klusterlet/controllers/addonsecretcontroller/controller.go b/pkg/operator/operators/klusterlet/controllers/addonsecretcontroller/controller.go similarity index 97% rename from pkg/registration-operator/operators/klusterlet/controllers/addonsecretcontroller/controller.go rename to pkg/operator/operators/klusterlet/controllers/addonsecretcontroller/controller.go index 42f4ab8fd..83f20b37b 100644 --- a/pkg/registration-operator/operators/klusterlet/controllers/addonsecretcontroller/controller.go +++ b/pkg/operator/operators/klusterlet/controllers/addonsecretcontroller/controller.go @@ -11,7 +11,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" coreinformer "k8s.io/client-go/informers/core/v1" "k8s.io/client-go/kubernetes" - "open-cluster-management.io/ocm/pkg/registration-operator/helpers" + "open-cluster-management.io/ocm/pkg/operator/helpers" ) const ( diff --git a/pkg/registration-operator/operators/klusterlet/controllers/addonsecretcontroller/controller_test.go b/pkg/operator/operators/klusterlet/controllers/addonsecretcontroller/controller_test.go similarity index 100% rename from pkg/registration-operator/operators/klusterlet/controllers/addonsecretcontroller/controller_test.go rename to pkg/operator/operators/klusterlet/controllers/addonsecretcontroller/controller_test.go diff --git a/pkg/registration-operator/operators/klusterlet/controllers/bootstrapcontroller/bootstrapcontroller.go b/pkg/operator/operators/klusterlet/controllers/bootstrapcontroller/bootstrapcontroller.go similarity index 99% rename from pkg/registration-operator/operators/klusterlet/controllers/bootstrapcontroller/bootstrapcontroller.go rename to pkg/operator/operators/klusterlet/controllers/bootstrapcontroller/bootstrapcontroller.go index 1863288b2..a90836609 100644 --- a/pkg/registration-operator/operators/klusterlet/controllers/bootstrapcontroller/bootstrapcontroller.go +++ b/pkg/operator/operators/klusterlet/controllers/bootstrapcontroller/bootstrapcontroller.go @@ -8,7 +8,7 @@ import ( operatorinformer "open-cluster-management.io/api/client/operator/informers/externalversions/operator/v1" operatorlister "open-cluster-management.io/api/client/operator/listers/operator/v1" - "open-cluster-management.io/ocm/pkg/registration-operator/helpers" + "open-cluster-management.io/ocm/pkg/operator/helpers" "github.com/openshift/library-go/pkg/controller/factory" "github.com/openshift/library-go/pkg/operator/events" diff --git a/pkg/registration-operator/operators/klusterlet/controllers/bootstrapcontroller/bootstrapcontroller_test.go b/pkg/operator/operators/klusterlet/controllers/bootstrapcontroller/bootstrapcontroller_test.go similarity index 100% rename from pkg/registration-operator/operators/klusterlet/controllers/bootstrapcontroller/bootstrapcontroller_test.go rename to pkg/operator/operators/klusterlet/controllers/bootstrapcontroller/bootstrapcontroller_test.go diff --git a/pkg/registration-operator/operators/klusterlet/controllers/klusterletcontroller/client_builder.go b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/client_builder.go similarity index 100% rename from pkg/registration-operator/operators/klusterlet/controllers/klusterletcontroller/client_builder.go rename to pkg/operator/operators/klusterlet/controllers/klusterletcontroller/client_builder.go diff --git a/pkg/registration-operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller.go b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller.go similarity index 99% rename from pkg/registration-operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller.go rename to pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller.go index c7e8c036b..523a83242 100644 --- a/pkg/registration-operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller.go +++ b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller.go @@ -27,7 +27,7 @@ import ( operatorapiv1 "open-cluster-management.io/api/operator/v1" "open-cluster-management.io/ocm/manifests" - "open-cluster-management.io/ocm/pkg/registration-operator/helpers" + "open-cluster-management.io/ocm/pkg/operator/helpers" ) type klusterletCleanupController struct { diff --git a/pkg/registration-operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller_test.go b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller_test.go similarity index 99% rename from pkg/registration-operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller_test.go rename to pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller_test.go index 4ae163544..f9d37e204 100644 --- a/pkg/registration-operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller_test.go +++ b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller_test.go @@ -11,7 +11,7 @@ import ( clienttesting "k8s.io/client-go/testing" "k8s.io/klog/v2" testingcommon "open-cluster-management.io/ocm/pkg/common/testing" - "open-cluster-management.io/ocm/pkg/registration-operator/helpers" + "open-cluster-management.io/ocm/pkg/operator/helpers" ) // TestSyncDelete test cleanup hub deploy diff --git a/pkg/registration-operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller.go b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller.go similarity index 99% rename from pkg/registration-operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller.go rename to pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller.go index b21089e82..17b271070 100644 --- a/pkg/registration-operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller.go +++ b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller.go @@ -29,7 +29,7 @@ import ( workv1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1" operatorapiv1 "open-cluster-management.io/api/operator/v1" - "open-cluster-management.io/ocm/pkg/registration-operator/helpers" + "open-cluster-management.io/ocm/pkg/operator/helpers" ) const ( diff --git a/pkg/registration-operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller_test.go b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller_test.go similarity index 99% rename from pkg/registration-operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller_test.go rename to pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller_test.go index 1bbb0f579..f61a41e43 100644 --- a/pkg/registration-operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller_test.go +++ b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller_test.go @@ -34,8 +34,8 @@ import ( operatorapiv1 "open-cluster-management.io/api/operator/v1" workapiv1 "open-cluster-management.io/api/work/v1" testingcommon "open-cluster-management.io/ocm/pkg/common/testing" - "open-cluster-management.io/ocm/pkg/registration-operator/helpers" - testinghelper "open-cluster-management.io/ocm/pkg/registration-operator/helpers/testing" + "open-cluster-management.io/ocm/pkg/operator/helpers" + testinghelper "open-cluster-management.io/ocm/pkg/operator/helpers/testing" ) type testController struct { diff --git a/pkg/registration-operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_crd_reconcile.go b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_crd_reconcile.go similarity index 97% rename from pkg/registration-operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_crd_reconcile.go rename to pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_crd_reconcile.go index 0040dc9a6..1ae157179 100644 --- a/pkg/registration-operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_crd_reconcile.go +++ b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_crd_reconcile.go @@ -17,8 +17,8 @@ import ( "k8s.io/apimachinery/pkg/util/version" operatorapiv1 "open-cluster-management.io/api/operator/v1" "open-cluster-management.io/ocm/manifests" - "open-cluster-management.io/ocm/pkg/registration-operator/helpers" - "open-cluster-management.io/ocm/pkg/registration-operator/operators/crdmanager" + "open-cluster-management.io/ocm/pkg/operator/helpers" + "open-cluster-management.io/ocm/pkg/operator/operators/crdmanager" ) var ( diff --git a/pkg/registration-operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_managed_reconcile.go b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_managed_reconcile.go similarity index 99% rename from pkg/registration-operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_managed_reconcile.go rename to pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_managed_reconcile.go index cd280c71e..9f945ce89 100644 --- a/pkg/registration-operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_managed_reconcile.go +++ b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_managed_reconcile.go @@ -20,7 +20,7 @@ import ( operatorapiv1 "open-cluster-management.io/api/operator/v1" "open-cluster-management.io/ocm/manifests" - "open-cluster-management.io/ocm/pkg/registration-operator/helpers" + "open-cluster-management.io/ocm/pkg/operator/helpers" ) var ( diff --git a/pkg/registration-operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_management_recocile.go b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_management_recocile.go similarity index 98% rename from pkg/registration-operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_management_recocile.go rename to pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_management_recocile.go index 8910dfa16..333c46515 100644 --- a/pkg/registration-operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_management_recocile.go +++ b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_management_recocile.go @@ -18,7 +18,7 @@ import ( "k8s.io/client-go/kubernetes" operatorapiv1 "open-cluster-management.io/api/operator/v1" "open-cluster-management.io/ocm/manifests" - "open-cluster-management.io/ocm/pkg/registration-operator/helpers" + "open-cluster-management.io/ocm/pkg/operator/helpers" ) var ( diff --git a/pkg/registration-operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_runtime_reconcile.go b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_runtime_reconcile.go similarity index 99% rename from pkg/registration-operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_runtime_reconcile.go rename to pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_runtime_reconcile.go index 25c51cff0..fac6d97ec 100644 --- a/pkg/registration-operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_runtime_reconcile.go +++ b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_runtime_reconcile.go @@ -18,7 +18,7 @@ import ( "k8s.io/client-go/kubernetes" operatorapiv1 "open-cluster-management.io/api/operator/v1" "open-cluster-management.io/ocm/manifests" - "open-cluster-management.io/ocm/pkg/registration-operator/helpers" + "open-cluster-management.io/ocm/pkg/operator/helpers" ) // runtimeReconcile ensure all runtime of klusterlet is applied diff --git a/pkg/registration-operator/operators/klusterlet/controllers/ssarcontroller/klusterlet_ssar_controller.go b/pkg/operator/operators/klusterlet/controllers/ssarcontroller/klusterlet_ssar_controller.go similarity index 99% rename from pkg/registration-operator/operators/klusterlet/controllers/ssarcontroller/klusterlet_ssar_controller.go rename to pkg/operator/operators/klusterlet/controllers/ssarcontroller/klusterlet_ssar_controller.go index 1a03888d3..622b80cd3 100644 --- a/pkg/registration-operator/operators/klusterlet/controllers/ssarcontroller/klusterlet_ssar_controller.go +++ b/pkg/operator/operators/klusterlet/controllers/ssarcontroller/klusterlet_ssar_controller.go @@ -23,7 +23,7 @@ import ( operatorv1client "open-cluster-management.io/api/client/operator/clientset/versioned/typed/operator/v1" operatorinformer "open-cluster-management.io/api/client/operator/informers/externalversions/operator/v1" operatorlister "open-cluster-management.io/api/client/operator/listers/operator/v1" - "open-cluster-management.io/ocm/pkg/registration-operator/helpers" + "open-cluster-management.io/ocm/pkg/operator/helpers" ) // SSARReSyncTime is exposed so that integration tests can crank up the controller sync speed. diff --git a/pkg/registration-operator/operators/klusterlet/controllers/ssarcontroller/klusterlet_ssar_controller_test.go b/pkg/operator/operators/klusterlet/controllers/ssarcontroller/klusterlet_ssar_controller_test.go similarity index 98% rename from pkg/registration-operator/operators/klusterlet/controllers/ssarcontroller/klusterlet_ssar_controller_test.go rename to pkg/operator/operators/klusterlet/controllers/ssarcontroller/klusterlet_ssar_controller_test.go index bc41b0374..d1e18e79c 100644 --- a/pkg/registration-operator/operators/klusterlet/controllers/ssarcontroller/klusterlet_ssar_controller_test.go +++ b/pkg/operator/operators/klusterlet/controllers/ssarcontroller/klusterlet_ssar_controller_test.go @@ -23,8 +23,8 @@ import ( operatorinformers "open-cluster-management.io/api/client/operator/informers/externalversions" operatorapiv1 "open-cluster-management.io/api/operator/v1" testingcommon "open-cluster-management.io/ocm/pkg/common/testing" - "open-cluster-management.io/ocm/pkg/registration-operator/helpers" - testinghelper "open-cluster-management.io/ocm/pkg/registration-operator/helpers/testing" + "open-cluster-management.io/ocm/pkg/operator/helpers" + testinghelper "open-cluster-management.io/ocm/pkg/operator/helpers/testing" ) type testController struct { diff --git a/pkg/registration-operator/operators/klusterlet/controllers/statuscontroller/klusterlet_status_controller.go b/pkg/operator/operators/klusterlet/controllers/statuscontroller/klusterlet_status_controller.go similarity index 98% rename from pkg/registration-operator/operators/klusterlet/controllers/statuscontroller/klusterlet_status_controller.go rename to pkg/operator/operators/klusterlet/controllers/statuscontroller/klusterlet_status_controller.go index 59ac0db15..30f27bdb9 100644 --- a/pkg/registration-operator/operators/klusterlet/controllers/statuscontroller/klusterlet_status_controller.go +++ b/pkg/operator/operators/klusterlet/controllers/statuscontroller/klusterlet_status_controller.go @@ -18,7 +18,7 @@ import ( operatorv1client "open-cluster-management.io/api/client/operator/clientset/versioned/typed/operator/v1" operatorinformer "open-cluster-management.io/api/client/operator/informers/externalversions/operator/v1" operatorlister "open-cluster-management.io/api/client/operator/listers/operator/v1" - "open-cluster-management.io/ocm/pkg/registration-operator/helpers" + "open-cluster-management.io/ocm/pkg/operator/helpers" ) type klusterletStatusController struct { diff --git a/pkg/registration-operator/operators/klusterlet/controllers/statuscontroller/klusterlet_status_controller_test.go b/pkg/operator/operators/klusterlet/controllers/statuscontroller/klusterlet_status_controller_test.go similarity index 98% rename from pkg/registration-operator/operators/klusterlet/controllers/statuscontroller/klusterlet_status_controller_test.go rename to pkg/operator/operators/klusterlet/controllers/statuscontroller/klusterlet_status_controller_test.go index 37f886030..c2428a28a 100644 --- a/pkg/registration-operator/operators/klusterlet/controllers/statuscontroller/klusterlet_status_controller_test.go +++ b/pkg/operator/operators/klusterlet/controllers/statuscontroller/klusterlet_status_controller_test.go @@ -16,7 +16,7 @@ import ( operatorinformers "open-cluster-management.io/api/client/operator/informers/externalversions" operatorapiv1 "open-cluster-management.io/api/operator/v1" testingcommon "open-cluster-management.io/ocm/pkg/common/testing" - testinghelper "open-cluster-management.io/ocm/pkg/registration-operator/helpers/testing" + testinghelper "open-cluster-management.io/ocm/pkg/operator/helpers/testing" ) type testController struct { diff --git a/pkg/registration-operator/operators/klusterlet/options.go b/pkg/operator/operators/klusterlet/options.go similarity index 88% rename from pkg/registration-operator/operators/klusterlet/options.go rename to pkg/operator/operators/klusterlet/options.go index a74b3294d..3e85aefc4 100644 --- a/pkg/registration-operator/operators/klusterlet/options.go +++ b/pkg/operator/operators/klusterlet/options.go @@ -14,11 +14,11 @@ import ( operatorclient "open-cluster-management.io/api/client/operator/clientset/versioned" operatorinformer "open-cluster-management.io/api/client/operator/informers/externalversions" workclientset "open-cluster-management.io/api/client/work/clientset/versioned" - "open-cluster-management.io/ocm/pkg/registration-operator/operators/klusterlet/controllers/addonsecretcontroller" - "open-cluster-management.io/ocm/pkg/registration-operator/operators/klusterlet/controllers/bootstrapcontroller" - "open-cluster-management.io/ocm/pkg/registration-operator/operators/klusterlet/controllers/klusterletcontroller" - "open-cluster-management.io/ocm/pkg/registration-operator/operators/klusterlet/controllers/ssarcontroller" - "open-cluster-management.io/ocm/pkg/registration-operator/operators/klusterlet/controllers/statuscontroller" + "open-cluster-management.io/ocm/pkg/operator/operators/klusterlet/controllers/addonsecretcontroller" + "open-cluster-management.io/ocm/pkg/operator/operators/klusterlet/controllers/bootstrapcontroller" + "open-cluster-management.io/ocm/pkg/operator/operators/klusterlet/controllers/klusterletcontroller" + "open-cluster-management.io/ocm/pkg/operator/operators/klusterlet/controllers/ssarcontroller" + "open-cluster-management.io/ocm/pkg/operator/operators/klusterlet/controllers/statuscontroller" ) // defaultSpokeComponentNamespace is the default namespace in which the operator is deployed diff --git a/pkg/registration/spoke/spokeagent.go b/pkg/registration/spoke/spokeagent.go index 14182f8ee..892a55467 100644 --- a/pkg/registration/spoke/spokeagent.go +++ b/pkg/registration/spoke/spokeagent.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "io/ioutil" "os" "path" "time" @@ -16,6 +15,7 @@ import ( addoninformers "open-cluster-management.io/api/client/addon/informers/externalversions" clusterv1client "open-cluster-management.io/api/client/cluster/clientset/versioned" clusterv1informers "open-cluster-management.io/api/client/cluster/informers/externalversions" + commonoptions "open-cluster-management.io/ocm/pkg/common/options" "open-cluster-management.io/ocm/pkg/features" "open-cluster-management.io/ocm/pkg/registration/clientcert" "open-cluster-management.io/ocm/pkg/registration/helpers" @@ -54,8 +54,8 @@ var AddOnLeaseControllerSyncInterval = 30 * time.Second // SpokeAgentOptions holds configuration for spoke cluster agent type SpokeAgentOptions struct { + AgentOptions *commonoptions.AgentOptions ComponentNamespace string - ClusterName string AgentName string BootstrapKubeconfig string HubKubeconfigSecret string @@ -63,13 +63,13 @@ type SpokeAgentOptions struct { SpokeExternalServerURLs []string ClusterHealthCheckPeriod time.Duration MaxCustomClusterClaims int - SpokeKubeconfig string ClientCertExpirationSeconds int32 } // NewSpokeAgentOptions returns a SpokeAgentOptions func NewSpokeAgentOptions() *SpokeAgentOptions { return &SpokeAgentOptions{ + AgentOptions: commonoptions.NewAgentOptions(), HubKubeconfigSecret: "hub-kubeconfig-secret", HubKubeconfigDir: "/spoke/hub-kubeconfig", ClusterHealthCheckPeriod: 1 * time.Minute, @@ -116,7 +116,7 @@ func (o *SpokeAgentOptions) RunSpokeAgent(ctx context.Context, controllerContext // load spoke client config and create spoke clients, // the registration agent may not running in the spoke/managed cluster. - spokeClientConfig, err := o.spokeKubeConfig(controllerContext) + spokeClientConfig, err := o.AgentOptions.SpokeKubeConfig(controllerContext) if err != nil { return err } @@ -135,7 +135,7 @@ func (o *SpokeAgentOptions) RunSpokeAgent(ctx context.Context, controllerContext klog.Fatal(err) } - klog.Infof("Cluster name is %q and agent name is %q", o.ClusterName, o.AgentName) + klog.Infof("Cluster name is %q and agent name is %q", o.AgentOptions.SpokeClusterName, o.AgentName) // create shared informer factory for spoke cluster spokeKubeInformerFactory := informers.NewSharedInformerFactory(spokeKubeClient, 10*time.Minute) @@ -165,7 +165,7 @@ func (o *SpokeAgentOptions) RunSpokeAgent(ctx context.Context, controllerContext // start a SpokeClusterCreatingController to make sure there is a spoke cluster on hub cluster spokeClusterCreatingController := managedcluster.NewManagedClusterCreatingController( - o.ClusterName, o.SpokeExternalServerURLs, + o.AgentOptions.SpokeClusterName, o.SpokeExternalServerURLs, spokeClusterCABundle, bootstrapClusterClient, controllerContext.EventRecorder, @@ -211,9 +211,9 @@ func (o *SpokeAgentOptions) RunSpokeAgent(ctx context.Context, controllerContext return err } - controllerName := fmt.Sprintf("BootstrapClientCertController@cluster:%s", o.ClusterName) + controllerName := fmt.Sprintf("BootstrapClientCertController@cluster:%s", o.AgentOptions.SpokeClusterName) clientCertForHubController := managedcluster.NewClientCertForHubController( - o.ClusterName, o.AgentName, o.ComponentNamespace, o.HubKubeconfigSecret, + o.AgentOptions.SpokeClusterName, o.AgentName, o.ComponentNamespace, o.HubKubeconfigSecret, kubeconfigData, // store the secret in the cluster where the agent pod runs bootstrapNamespacedManagementKubeInformerFactory.Core().V1().Secrets(), @@ -269,17 +269,17 @@ func (o *SpokeAgentOptions) RunSpokeAgent(ctx context.Context, controllerContext hubKubeClient, 10*time.Minute, informers.WithTweakListOptions(func(listOptions *metav1.ListOptions) { - listOptions.LabelSelector = fmt.Sprintf("%s=%s", clusterv1.ClusterNameLabelKey, o.ClusterName) + listOptions.LabelSelector = fmt.Sprintf("%s=%s", clusterv1.ClusterNameLabelKey, o.AgentOptions.SpokeClusterName) }), ) addOnInformerFactory := addoninformers.NewSharedInformerFactoryWithOptions( - addOnClient, 10*time.Minute, addoninformers.WithNamespace(o.ClusterName)) + addOnClient, 10*time.Minute, addoninformers.WithNamespace(o.AgentOptions.SpokeClusterName)) // create a cluster informer factory with name field selector because we just need to handle the current spoke cluster hubClusterInformerFactory := clusterv1informers.NewSharedInformerFactoryWithOptions( hubClusterClient, 10*time.Minute, clusterv1informers.WithTweakListOptions(func(listOptions *metav1.ListOptions) { - listOptions.FieldSelector = fields.OneTermEqualSelector("metadata.name", o.ClusterName).String() + listOptions.FieldSelector = fields.OneTermEqualSelector("metadata.name", o.AgentOptions.SpokeClusterName).String() }), ) @@ -298,15 +298,15 @@ func (o *SpokeAgentOptions) RunSpokeAgent(ctx context.Context, controllerContext } // create another ClientCertForHubController for client certificate rotation - controllerName := fmt.Sprintf("ClientCertController@cluster:%s", o.ClusterName) + controllerName := fmt.Sprintf("ClientCertController@cluster:%s", o.AgentOptions.SpokeClusterName) clientCertForHubController := managedcluster.NewClientCertForHubController( - o.ClusterName, o.AgentName, o.ComponentNamespace, o.HubKubeconfigSecret, + o.AgentOptions.SpokeClusterName, o.AgentName, o.ComponentNamespace, o.HubKubeconfigSecret, kubeconfigData, namespacedManagementKubeInformerFactory.Core().V1().Secrets(), csrControl, o.ClientCertExpirationSeconds, managementKubeClient, - managedcluster.GenerateStatusUpdater(hubClusterClient, o.ClusterName), + managedcluster.GenerateStatusUpdater(hubClusterClient, o.AgentOptions.SpokeClusterName), controllerContext.EventRecorder, controllerName, ) @@ -316,7 +316,7 @@ func (o *SpokeAgentOptions) RunSpokeAgent(ctx context.Context, controllerContext // create ManagedClusterJoiningController to reconcile instances of ManagedCluster on the managed cluster managedClusterJoiningController := managedcluster.NewManagedClusterJoiningController( - o.ClusterName, + o.AgentOptions.SpokeClusterName, hubClusterClient, hubClusterInformerFactory.Cluster().V1().ManagedClusters(), controllerContext.EventRecorder, @@ -324,7 +324,7 @@ func (o *SpokeAgentOptions) RunSpokeAgent(ctx context.Context, controllerContext // create ManagedClusterLeaseController to keep the spoke cluster heartbeat managedClusterLeaseController := managedcluster.NewManagedClusterLeaseController( - o.ClusterName, + o.AgentOptions.SpokeClusterName, hubKubeClient, hubClusterInformerFactory.Cluster().V1().ManagedClusters(), controllerContext.EventRecorder, @@ -332,7 +332,7 @@ func (o *SpokeAgentOptions) RunSpokeAgent(ctx context.Context, controllerContext // create NewManagedClusterStatusController to update the spoke cluster status managedClusterHealthCheckController := managedcluster.NewManagedClusterStatusController( - o.ClusterName, + o.AgentOptions.SpokeClusterName, hubClusterClient, hubClusterInformerFactory.Cluster().V1().ManagedClusters(), spokeKubeClient.Discovery(), @@ -350,7 +350,7 @@ func (o *SpokeAgentOptions) RunSpokeAgent(ctx context.Context, controllerContext if features.DefaultSpokeRegistrationMutableFeatureGate.Enabled(ocmfeature.ClusterClaim) { // create managedClusterClaimController to sync cluster claims managedClusterClaimController = managedcluster.NewManagedClusterClaimController( - o.ClusterName, + o.AgentOptions.SpokeClusterName, o.MaxCustomClusterClaims, hubClusterClient, hubClusterInformerFactory.Cluster().V1().ManagedClusters(), @@ -363,7 +363,7 @@ func (o *SpokeAgentOptions) RunSpokeAgent(ctx context.Context, controllerContext var addOnRegistrationController factory.Controller if features.DefaultSpokeRegistrationMutableFeatureGate.Enabled(ocmfeature.AddonManagement) { addOnLeaseController = addon.NewManagedClusterAddOnLeaseController( - o.ClusterName, + o.AgentOptions.SpokeClusterName, addOnClient, addOnInformerFactory.Addon().V1alpha1().ManagedClusterAddOns(), hubKubeClient.CoordinationV1(), @@ -374,7 +374,7 @@ func (o *SpokeAgentOptions) RunSpokeAgent(ctx context.Context, controllerContext ) addOnRegistrationController = addon.NewAddOnRegistrationController( - o.ClusterName, + o.AgentOptions.SpokeClusterName, o.AgentName, kubeconfigData, addOnClient, @@ -412,16 +412,13 @@ func (o *SpokeAgentOptions) RunSpokeAgent(ctx context.Context, controllerContext // AddFlags registers flags for Agent func (o *SpokeAgentOptions) AddFlags(fs *pflag.FlagSet) { features.DefaultSpokeRegistrationMutableFeatureGate.AddFlag(fs) - fs.StringVar(&o.ClusterName, "cluster-name", o.ClusterName, - "If non-empty, will use as cluster name instead of generated random name.") + o.AgentOptions.AddFlags(fs) fs.StringVar(&o.BootstrapKubeconfig, "bootstrap-kubeconfig", o.BootstrapKubeconfig, "The path of the kubeconfig file for agent bootstrap.") fs.StringVar(&o.HubKubeconfigSecret, "hub-kubeconfig-secret", o.HubKubeconfigSecret, "The name of secret in component namespace storing kubeconfig for hub.") fs.StringVar(&o.HubKubeconfigDir, "hub-kubeconfig-dir", o.HubKubeconfigDir, "The mount path of hub-kubeconfig-secret in the container.") - fs.StringVar(&o.SpokeKubeconfig, "spoke-kubeconfig", o.SpokeKubeconfig, - "The path of the kubeconfig file for managed/spoke cluster. If this is not set, will use '--kubeconfig' to build client to connect to the managed cluster.") fs.StringArrayVar(&o.SpokeExternalServerURLs, "spoke-external-server-urls", o.SpokeExternalServerURLs, "A list of reachable spoke cluster api server URLs for hub cluster.") fs.DurationVar(&o.ClusterHealthCheckPeriod, "cluster-healthcheck-period", o.ClusterHealthCheckPeriod, @@ -438,8 +435,8 @@ func (o *SpokeAgentOptions) Validate() error { return errors.New("bootstrap-kubeconfig is required") } - if o.ClusterName == "" { - return errors.New("cluster name is empty") + if err := o.AgentOptions.Validate(); err != nil { + return err } if o.AgentName == "" { @@ -469,7 +466,7 @@ func (o *SpokeAgentOptions) Validate() error { // Complete fills in missing values. func (o *SpokeAgentOptions) Complete(coreV1Client corev1client.CoreV1Interface, ctx context.Context, recorder events.Recorder) error { // get component namespace of spoke agent - nsBytes, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") + nsBytes, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") if err != nil { o.ComponentNamespace = defaultSpokeComponentNamespace } else { @@ -484,7 +481,7 @@ func (o *SpokeAgentOptions) Complete(coreV1Client corev1client.CoreV1Interface, } // load or generate cluster/agent names - o.ClusterName, o.AgentName = o.getOrGenerateClusterAgentNames() + o.AgentOptions.SpokeClusterName, o.AgentName = o.getOrGenerateClusterAgentNames() return nil } @@ -523,7 +520,7 @@ func (o *SpokeAgentOptions) hasValidHubClientConfig() (bool, error) { } certPath := path.Join(o.HubKubeconfigDir, clientcert.TLSCertFile) - certData, err := ioutil.ReadFile(path.Clean(certPath)) + certData, err := os.ReadFile(path.Clean(certPath)) if err != nil { klog.V(4).Infof("Unable to load TLS cert file %q", certPath) return false, nil @@ -534,10 +531,10 @@ func (o *SpokeAgentOptions) hasValidHubClientConfig() (bool, error) { if err != nil { return false, nil } - if clusterName != o.ClusterName || agentName != o.AgentName { + if clusterName != o.AgentOptions.SpokeClusterName || agentName != o.AgentName { klog.V(4).Infof("Certificate in file %q is issued for agent %q instead of %q", certPath, fmt.Sprintf("%s:%s", clusterName, agentName), - fmt.Sprintf("%s:%s", o.ClusterName, o.AgentName)) + fmt.Sprintf("%s:%s", o.AgentOptions.SpokeClusterName, o.AgentName)) return false, nil } @@ -560,19 +557,19 @@ func (o *SpokeAgentOptions) getOrGenerateClusterAgentNames() (string, string) { // try to load cluster/agent name from tls certification var clusterNameInCert, agentNameInCert string certPath := path.Join(o.HubKubeconfigDir, clientcert.TLSCertFile) - certData, certErr := ioutil.ReadFile(path.Clean(certPath)) + certData, certErr := os.ReadFile(path.Clean(certPath)) if certErr == nil { clusterNameInCert, agentNameInCert, _ = managedcluster.GetClusterAgentNamesFromCertificate(certData) } - clusterName := o.ClusterName + clusterName := o.AgentOptions.SpokeClusterName // if cluster name is not specified with input argument, try to load it from file if clusterName == "" { // TODO, read cluster name from openshift struct if the spoke agent is running in an openshift cluster // and then load the cluster name from the mounted secret clusterNameFilePath := path.Join(o.HubKubeconfigDir, clientcert.ClusterNameFile) - clusterNameBytes, err := ioutil.ReadFile(path.Clean(clusterNameFilePath)) + clusterNameBytes, err := os.ReadFile(path.Clean(clusterNameFilePath)) switch { case len(clusterNameInCert) > 0: // use cluster name loaded from the tls certification @@ -591,7 +588,7 @@ func (o *SpokeAgentOptions) getOrGenerateClusterAgentNames() (string, string) { // try to load agent name from the mounted secret agentNameFilePath := path.Join(o.HubKubeconfigDir, clientcert.AgentNameFile) - agentNameBytes, err := ioutil.ReadFile(path.Clean(agentNameFilePath)) + agentNameBytes, err := os.ReadFile(path.Clean(agentNameFilePath)) var agentName string switch { case len(agentNameInCert) > 0: @@ -619,22 +616,9 @@ func (o *SpokeAgentOptions) getSpokeClusterCABundle(kubeConfig *rest.Config) ([] if kubeConfig.CAData != nil { return kubeConfig.CAData, nil } - data, err := ioutil.ReadFile(kubeConfig.CAFile) + data, err := os.ReadFile(kubeConfig.CAFile) if err != nil { return nil, err } return data, nil } - -// spokeKubeConfig builds kubeconfig for the spoke/managed cluster -func (o *SpokeAgentOptions) spokeKubeConfig(controllerContext *controllercmd.ControllerContext) (*rest.Config, error) { - if o.SpokeKubeconfig == "" { - return controllerContext.KubeConfig, nil - } - - config, err := clientcmd.BuildConfigFromFlags("" /* leave masterurl as empty */, o.SpokeKubeconfig) - if err != nil { - return nil, fmt.Errorf("unable to load spoke kubeconfig from file %q: %w", o.SpokeKubeconfig, err) - } - return config, nil -} diff --git a/pkg/registration/spoke/spokeagent_test.go b/pkg/registration/spoke/spokeagent_test.go index 595243cde..22a2d4c22 100644 --- a/pkg/registration/spoke/spokeagent_test.go +++ b/pkg/registration/spoke/spokeagent_test.go @@ -3,7 +3,7 @@ package spoke import ( "bytes" "context" - "io/ioutil" + commonoptions "open-cluster-management.io/ocm/pkg/common/options" "os" "path" "testing" @@ -23,7 +23,7 @@ import ( func TestComplete(t *testing.T) { // get component namespace var componentNamespace string - nsBytes, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") + nsBytes, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") if err != nil { componentNamespace = defaultSpokeComponentNamespace } else { @@ -102,14 +102,16 @@ func TestComplete(t *testing.T) { kubeClient := kubefake.NewSimpleClientset(objects...) // create a tmp dir to dump hub kubeconfig - dir, err := ioutil.TempDir("", "hub-kubeconfig") + dir, err := os.MkdirTemp("", "hub-kubeconfig") if err != nil { t.Error("unable to create a tmp dir") } defer os.RemoveAll(dir) options := &SpokeAgentOptions{ - ClusterName: c.clusterName, + AgentOptions: &commonoptions.AgentOptions{ + SpokeClusterName: c.clusterName, + }, HubKubeconfigSecret: "hub-kubeconfig-secret", HubKubeconfigDir: dir, } @@ -120,14 +122,14 @@ func TestComplete(t *testing.T) { if options.ComponentNamespace == "" { t.Error("component namespace should not be empty") } - if options.ClusterName == "" { + if options.AgentOptions.SpokeClusterName == "" { t.Error("cluster name should not be empty") } if options.AgentName == "" { t.Error("agent name should not be empty") } - if len(c.expectedClusterName) > 0 && options.ClusterName != c.expectedClusterName { - t.Errorf("expect cluster name %q but got %q", c.expectedClusterName, options.ClusterName) + if len(c.expectedClusterName) > 0 && options.AgentOptions.SpokeClusterName != c.expectedClusterName { + t.Errorf("expect cluster name %q but got %q", c.expectedClusterName, options.AgentOptions.SpokeClusterName) } if len(c.expectedAgentName) > 0 && options.AgentName != c.expectedAgentName { t.Errorf("expect agent name %q but got %q", c.expectedAgentName, options.AgentName) @@ -139,7 +141,7 @@ func TestComplete(t *testing.T) { func TestValidate(t *testing.T) { defaultCompletedOptions := NewSpokeAgentOptions() defaultCompletedOptions.BootstrapKubeconfig = "/spoke/bootstrap/kubeconfig" - defaultCompletedOptions.ClusterName = "testcluster" + defaultCompletedOptions.AgentOptions.SpokeClusterName = "testcluster" defaultCompletedOptions.AgentName = "testagent" cases := []struct { @@ -154,19 +156,21 @@ func TestValidate(t *testing.T) { }, { name: "no cluster name", - options: &SpokeAgentOptions{BootstrapKubeconfig: "/spoke/bootstrap/kubeconfig"}, + options: &SpokeAgentOptions{BootstrapKubeconfig: "/spoke/bootstrap/kubeconfig", AgentOptions: &commonoptions.AgentOptions{}}, expectedErr: "cluster name is empty", }, { name: "no agent name", - options: &SpokeAgentOptions{BootstrapKubeconfig: "/spoke/bootstrap/kubeconfig", ClusterName: "testcluster"}, + options: &SpokeAgentOptions{BootstrapKubeconfig: "/spoke/bootstrap/kubeconfig", AgentOptions: &commonoptions.AgentOptions{SpokeClusterName: "testcluster"}}, expectedErr: "agent name is empty", }, { name: "invalid external server URLs", options: &SpokeAgentOptions{ - BootstrapKubeconfig: "/spoke/bootstrap/kubeconfig", - ClusterName: "testcluster", + BootstrapKubeconfig: "/spoke/bootstrap/kubeconfig", + AgentOptions: &commonoptions.AgentOptions{ + SpokeClusterName: "testcluster", + }, AgentName: "testagent", SpokeExternalServerURLs: []string{"https://127.0.0.1:64433", "http://127.0.0.1:8080"}, }, @@ -175,8 +179,10 @@ func TestValidate(t *testing.T) { { name: "invalid cluster healthcheck period", options: &SpokeAgentOptions{ - BootstrapKubeconfig: "/spoke/bootstrap/kubeconfig", - ClusterName: "testcluster", + BootstrapKubeconfig: "/spoke/bootstrap/kubeconfig", + AgentOptions: &commonoptions.AgentOptions{ + SpokeClusterName: "testcluster", + }, AgentName: "testagent", ClusterHealthCheckPeriod: 0, }, @@ -190,12 +196,14 @@ func TestValidate(t *testing.T) { { name: "default completed options", options: &SpokeAgentOptions{ - HubKubeconfigSecret: "hub-kubeconfig-secret", - HubKubeconfigDir: "/spoke/hub-kubeconfig", - ClusterHealthCheckPeriod: 1 * time.Minute, - MaxCustomClusterClaims: 20, - BootstrapKubeconfig: "/spoke/bootstrap/kubeconfig", - ClusterName: "testcluster", + HubKubeconfigSecret: "hub-kubeconfig-secret", + HubKubeconfigDir: "/spoke/hub-kubeconfig", + ClusterHealthCheckPeriod: 1 * time.Minute, + MaxCustomClusterClaims: 20, + BootstrapKubeconfig: "/spoke/bootstrap/kubeconfig", + AgentOptions: &commonoptions.AgentOptions{ + SpokeClusterName: "testcluster", + }, AgentName: "testagent", ClientCertExpirationSeconds: 599, }, @@ -204,12 +212,14 @@ func TestValidate(t *testing.T) { { name: "default completed options", options: &SpokeAgentOptions{ - HubKubeconfigSecret: "hub-kubeconfig-secret", - HubKubeconfigDir: "/spoke/hub-kubeconfig", - ClusterHealthCheckPeriod: 1 * time.Minute, - MaxCustomClusterClaims: 20, - BootstrapKubeconfig: "/spoke/bootstrap/kubeconfig", - ClusterName: "testcluster", + HubKubeconfigSecret: "hub-kubeconfig-secret", + HubKubeconfigDir: "/spoke/hub-kubeconfig", + ClusterHealthCheckPeriod: 1 * time.Minute, + MaxCustomClusterClaims: 20, + BootstrapKubeconfig: "/spoke/bootstrap/kubeconfig", + AgentOptions: &commonoptions.AgentOptions{ + SpokeClusterName: "testcluster", + }, AgentName: "testagent", ClientCertExpirationSeconds: 600, }, @@ -225,7 +235,7 @@ func TestValidate(t *testing.T) { } func TestHasValidHubClientConfig(t *testing.T) { - tempDir, err := ioutil.TempDir("", "testvalidhubclientconfig") + tempDir, err := os.MkdirTemp("", "testvalidhubclientconfig") if err != nil { t.Errorf("unexpected error: %v", err) } @@ -292,7 +302,9 @@ func TestHasValidHubClientConfig(t *testing.T) { } options := &SpokeAgentOptions{ - ClusterName: c.clusterName, + AgentOptions: &commonoptions.AgentOptions{ + SpokeClusterName: c.clusterName, + }, AgentName: c.agentName, HubKubeconfigDir: tempDir, } @@ -308,7 +320,7 @@ func TestHasValidHubClientConfig(t *testing.T) { } func TestGetOrGenerateClusterAgentNames(t *testing.T) { - tempDir, err := ioutil.TempDir("", "testgetorgenerateclusteragentnames") + tempDir, err := os.MkdirTemp("", "testgetorgenerateclusteragentnames") if err != nil { t.Errorf("unexpected error: %v", err) } @@ -322,12 +334,12 @@ func TestGetOrGenerateClusterAgentNames(t *testing.T) { }{ { name: "cluster name is specified", - options: &SpokeAgentOptions{ClusterName: "cluster0"}, + options: &SpokeAgentOptions{AgentOptions: &commonoptions.AgentOptions{SpokeClusterName: "cluster0"}}, expectedClusterName: "cluster0", }, { name: "cluster name and agent name are in file", - options: &SpokeAgentOptions{HubKubeconfigDir: tempDir}, + options: &SpokeAgentOptions{HubKubeconfigDir: tempDir, AgentOptions: &commonoptions.AgentOptions{}}, expectedClusterName: "cluster1", expectedAgentName: "agent1", }, @@ -356,7 +368,7 @@ func TestGetOrGenerateClusterAgentNames(t *testing.T) { } func TestGetSpokeClusterCABundle(t *testing.T) { - tempDir, err := ioutil.TempDir("", "testgetspokeclustercabundle") + tempDir, err := os.MkdirTemp("", "testgetspokeclustercabundle") if err != nil { t.Errorf("unexpected error: %v", err) } diff --git a/pkg/work/spoke/spokeagent.go b/pkg/work/spoke/spokeagent.go index 05e24ea73..7f0deaece 100644 --- a/pkg/work/spoke/spokeagent.go +++ b/pkg/work/spoke/spokeagent.go @@ -2,29 +2,26 @@ package spoke import ( "context" - "fmt" - "time" - - "open-cluster-management.io/ocm/pkg/features" - + "github.com/openshift/library-go/pkg/controller/controllercmd" + "github.com/spf13/cobra" + apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" workclientset "open-cluster-management.io/api/client/work/clientset/versioned" workinformers "open-cluster-management.io/api/client/work/informers/externalversions" ocmfeature "open-cluster-management.io/api/feature" + commonoptions "open-cluster-management.io/ocm/pkg/common/options" + "open-cluster-management.io/ocm/pkg/features" "open-cluster-management.io/ocm/pkg/work/helper" "open-cluster-management.io/ocm/pkg/work/spoke/auth" "open-cluster-management.io/ocm/pkg/work/spoke/controllers/appliedmanifestcontroller" "open-cluster-management.io/ocm/pkg/work/spoke/controllers/finalizercontroller" "open-cluster-management.io/ocm/pkg/work/spoke/controllers/manifestcontroller" "open-cluster-management.io/ocm/pkg/work/spoke/controllers/statuscontroller" - - "github.com/openshift/library-go/pkg/controller/controllercmd" - "github.com/spf13/cobra" - apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "time" ) const ( @@ -41,21 +38,17 @@ const ( // WorkloadAgentOptions defines the flags for workload agent type WorkloadAgentOptions struct { + AgentOptions *commonoptions.AgentOptions HubKubeconfigFile string - SpokeKubeconfigFile string - SpokeClusterName string AgentID string - Burst int StatusSyncInterval time.Duration AppliedManifestWorkEvictionGracePeriod time.Duration - QPS float32 } // NewWorkloadAgentOptions returns the flags with default value set func NewWorkloadAgentOptions() *WorkloadAgentOptions { return &WorkloadAgentOptions{ - QPS: 50, - Burst: 100, + AgentOptions: commonoptions.NewAgentOptions(), StatusSyncInterval: 10 * time.Second, AppliedManifestWorkEvictionGracePeriod: 10 * time.Minute, } @@ -64,15 +57,11 @@ func NewWorkloadAgentOptions() *WorkloadAgentOptions { // AddFlags register and binds the default flags func (o *WorkloadAgentOptions) AddFlags(cmd *cobra.Command) { flags := cmd.Flags() + o.AgentOptions.AddFlags(flags) features.DefaultSpokeWorkMutableFeatureGate.AddFlag(flags) // This command only supports reading from config flags.StringVar(&o.HubKubeconfigFile, "hub-kubeconfig", o.HubKubeconfigFile, "Location of kubeconfig file to connect to hub cluster.") - flags.StringVar(&o.SpokeKubeconfigFile, "spoke-kubeconfig", o.SpokeKubeconfigFile, - "Location of kubeconfig file to connect to spoke cluster. If this is not set, will use '--kubeconfig' to build client to connect to the managed cluster.") - flags.StringVar(&o.SpokeClusterName, "spoke-cluster-name", o.SpokeClusterName, "Name of spoke cluster.") flags.StringVar(&o.AgentID, "agent-id", o.AgentID, "ID of the work agent to identify the work this agent should handle after restart/recovery.") - flags.Float32Var(&o.QPS, "spoke-kube-api-qps", o.QPS, "QPS to use while talking with apiserver on spoke cluster.") - flags.IntVar(&o.Burst, "spoke-kube-api-burst", o.Burst, "Burst to use while talking with apiserver on spoke cluster.") flags.DurationVar(&o.StatusSyncInterval, "status-sync-interval", o.StatusSyncInterval, "Interval to sync resource status to hub.") flags.DurationVar(&o.AppliedManifestWorkEvictionGracePeriod, "appliedmanifestwork-eviction-grace-period", o.AppliedManifestWorkEvictionGracePeriod, "Grace period for appliedmanifestwork eviction") } @@ -96,17 +85,15 @@ func (o *WorkloadAgentOptions) RunWorkloadAgent(ctx context.Context, controllerC return err } // Only watch the cluster namespace on hub - workInformerFactory := workinformers.NewSharedInformerFactoryWithOptions(hubWorkClient, 5*time.Minute, workinformers.WithNamespace(o.SpokeClusterName)) + workInformerFactory := workinformers.NewSharedInformerFactoryWithOptions(hubWorkClient, 5*time.Minute, workinformers.WithNamespace(o.AgentOptions.SpokeClusterName)) // load spoke client config and create spoke clients, // the work agent may not running in the spoke/managed cluster. - spokeRestConfig, err := o.spokeKubeConfig(controllerContext) + spokeRestConfig, err := o.AgentOptions.SpokeKubeConfig(controllerContext) if err != nil { return err } - spokeRestConfig.QPS = o.QPS - spokeRestConfig.Burst = o.Burst spokeDynamicClient, err := dynamic.NewForConfig(spokeRestConfig) if err != nil { return err @@ -138,7 +125,7 @@ func (o *WorkloadAgentOptions) RunWorkloadAgent(ctx context.Context, controllerC spokeRestConfig, spokeKubeClient, workInformerFactory.Work().V1().ManifestWorks(), - o.SpokeClusterName, + o.AgentOptions.SpokeClusterName, controllerContext.EventRecorder, restMapper, ).NewExecutorValidator(ctx, features.DefaultSpokeWorkMutableFeatureGate.Enabled(ocmfeature.ExecutorValidatingCaches)) @@ -148,9 +135,9 @@ func (o *WorkloadAgentOptions) RunWorkloadAgent(ctx context.Context, controllerC spokeDynamicClient, spokeKubeClient, spokeAPIExtensionClient, - hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName), + hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName), workInformerFactory.Work().V1().ManifestWorks(), - workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.SpokeClusterName), + workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.AgentOptions.SpokeClusterName), spokeWorkClient.WorkV1().AppliedManifestWorks(), spokeWorkInformerFactory.Work().V1().AppliedManifestWorks(), hubhash, agentID, @@ -159,9 +146,9 @@ func (o *WorkloadAgentOptions) RunWorkloadAgent(ctx context.Context, controllerC ) addFinalizerController := finalizercontroller.NewAddFinalizerController( controllerContext.EventRecorder, - hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName), + hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName), workInformerFactory.Work().V1().ManifestWorks(), - workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.SpokeClusterName), + workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.AgentOptions.SpokeClusterName), ) appliedManifestWorkFinalizeController := finalizercontroller.NewAppliedManifestWorkFinalizeController( controllerContext.EventRecorder, @@ -172,9 +159,9 @@ func (o *WorkloadAgentOptions) RunWorkloadAgent(ctx context.Context, controllerC ) manifestWorkFinalizeController := finalizercontroller.NewManifestWorkFinalizeController( controllerContext.EventRecorder, - hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName), + hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName), workInformerFactory.Work().V1().ManifestWorks(), - workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.SpokeClusterName), + workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.AgentOptions.SpokeClusterName), spokeWorkClient.WorkV1().AppliedManifestWorks(), spokeWorkInformerFactory.Work().V1().AppliedManifestWorks(), hubhash, @@ -182,7 +169,7 @@ func (o *WorkloadAgentOptions) RunWorkloadAgent(ctx context.Context, controllerC unmanagedAppliedManifestWorkController := finalizercontroller.NewUnManagedAppliedWorkController( controllerContext.EventRecorder, workInformerFactory.Work().V1().ManifestWorks(), - workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.SpokeClusterName), + workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.AgentOptions.SpokeClusterName), spokeWorkClient.WorkV1().AppliedManifestWorks(), spokeWorkInformerFactory.Work().V1().AppliedManifestWorks(), o.AppliedManifestWorkEvictionGracePeriod, @@ -191,9 +178,9 @@ func (o *WorkloadAgentOptions) RunWorkloadAgent(ctx context.Context, controllerC appliedManifestWorkController := appliedmanifestcontroller.NewAppliedManifestWorkController( controllerContext.EventRecorder, spokeDynamicClient, - hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName), + hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName), workInformerFactory.Work().V1().ManifestWorks(), - workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.SpokeClusterName), + workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.AgentOptions.SpokeClusterName), spokeWorkClient.WorkV1().AppliedManifestWorks(), spokeWorkInformerFactory.Work().V1().AppliedManifestWorks(), hubhash, @@ -201,9 +188,9 @@ func (o *WorkloadAgentOptions) RunWorkloadAgent(ctx context.Context, controllerC availableStatusController := statuscontroller.NewAvailableStatusController( controllerContext.EventRecorder, spokeDynamicClient, - hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName), + hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName), workInformerFactory.Work().V1().ManifestWorks(), - workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.SpokeClusterName), + workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.AgentOptions.SpokeClusterName), o.StatusSyncInterval, ) @@ -219,16 +206,3 @@ func (o *WorkloadAgentOptions) RunWorkloadAgent(ctx context.Context, controllerC <-ctx.Done() return nil } - -// spokeKubeConfig builds kubeconfig for the spoke/managed cluster -func (o *WorkloadAgentOptions) spokeKubeConfig(controllerContext *controllercmd.ControllerContext) (*rest.Config, error) { - if o.SpokeKubeconfigFile == "" { - return controllerContext.KubeConfig, nil - } - - spokeRestConfig, err := clientcmd.BuildConfigFromFlags("" /* leave masterurl as empty */, o.SpokeKubeconfigFile) - if err != nil { - return nil, fmt.Errorf("unable to load spoke kubeconfig from file %q: %w", o.SpokeKubeconfigFile, err) - } - return spokeRestConfig, nil -} diff --git a/test/e2e/common.go b/test/e2e/common.go index 668e48744..96a2febd2 100644 --- a/test/e2e/common.go +++ b/test/e2e/common.go @@ -40,7 +40,7 @@ import ( clusterv1 "open-cluster-management.io/api/cluster/v1" operatorapiv1 "open-cluster-management.io/api/operator/v1" workapiv1 "open-cluster-management.io/api/work/v1" - "open-cluster-management.io/ocm/pkg/registration-operator/helpers" + "open-cluster-management.io/ocm/pkg/operator/helpers" ) type Tester struct { diff --git a/test/integration/operator/clustermanager_hosted_test.go b/test/integration/operator/clustermanager_hosted_test.go index 4f6aab5f5..12622524c 100644 --- a/test/integration/operator/clustermanager_hosted_test.go +++ b/test/integration/operator/clustermanager_hosted_test.go @@ -17,7 +17,7 @@ import ( "k8s.io/client-go/util/cert" operatorapiv1 "open-cluster-management.io/api/operator/v1" v1 "open-cluster-management.io/api/operator/v1" - "open-cluster-management.io/ocm/pkg/registration-operator/helpers" + "open-cluster-management.io/ocm/pkg/operator/helpers" ) func updateDeploymentStatus(kubeClient kubernetes.Interface, namespace, deploymentName string) { diff --git a/test/integration/operator/clustermanager_test.go b/test/integration/operator/clustermanager_test.go index a47817eb4..c1a9d4dfe 100644 --- a/test/integration/operator/clustermanager_test.go +++ b/test/integration/operator/clustermanager_test.go @@ -20,9 +20,9 @@ import ( "github.com/openshift/library-go/pkg/controller/controllercmd" operatorapiv1 "open-cluster-management.io/api/operator/v1" - "open-cluster-management.io/ocm/pkg/registration-operator/helpers" - "open-cluster-management.io/ocm/pkg/registration-operator/operators/clustermanager" - certrotation "open-cluster-management.io/ocm/pkg/registration-operator/operators/clustermanager/controllers/certrotationcontroller" + "open-cluster-management.io/ocm/pkg/operator/helpers" + "open-cluster-management.io/ocm/pkg/operator/operators/clustermanager" + certrotation "open-cluster-management.io/ocm/pkg/operator/operators/clustermanager/controllers/certrotationcontroller" ) func startHubOperator(ctx context.Context, mode v1.InstallMode) { diff --git a/test/integration/operator/doc.go b/test/integration/operator/doc.go index ac3f3dbc3..bf633227d 100644 --- a/test/integration/operator/doc.go +++ b/test/integration/operator/doc.go @@ -1,4 +1,4 @@ -// Package integration provides integration tests for open-cluster-management registration-operator, the test cases include +// Package integration provides integration tests for open-cluster-management operator, the test cases include // - deploy/update/remove the cluster manager // - deploy/update/remove the klusterlet package operator diff --git a/test/integration/operator/integration_suite_test.go b/test/integration/operator/integration_suite_test.go index cd73b4836..41d532f86 100644 --- a/test/integration/operator/integration_suite_test.go +++ b/test/integration/operator/integration_suite_test.go @@ -24,8 +24,8 @@ import ( operatorclient "open-cluster-management.io/api/client/operator/clientset/versioned" operatorapiv1 "open-cluster-management.io/api/operator/v1" - "open-cluster-management.io/ocm/pkg/registration-operator/operators/klusterlet/controllers/bootstrapcontroller" - "open-cluster-management.io/ocm/pkg/registration-operator/operators/klusterlet/controllers/ssarcontroller" + "open-cluster-management.io/ocm/pkg/operator/operators/klusterlet/controllers/bootstrapcontroller" + "open-cluster-management.io/ocm/pkg/operator/operators/klusterlet/controllers/ssarcontroller" ) func TestIntegration(t *testing.T) { @@ -73,7 +73,7 @@ var _ = ginkgo.BeforeSuite(func() { var err error - // install registration-operator CRDs and start a local kube-apiserver + // install operator CRDs and start a local kube-apiserver testEnv = &envtest.Environment{ ErrorIfCRDPathMissing: true, CRDDirectoryPaths: []string{ diff --git a/test/integration/operator/klusterlet_hosted_test.go b/test/integration/operator/klusterlet_hosted_test.go index 6a0746b85..1c79ce46c 100644 --- a/test/integration/operator/klusterlet_hosted_test.go +++ b/test/integration/operator/klusterlet_hosted_test.go @@ -9,7 +9,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/rand" operatorapiv1 "open-cluster-management.io/api/operator/v1" - "open-cluster-management.io/ocm/pkg/registration-operator/helpers" + "open-cluster-management.io/ocm/pkg/operator/helpers" "open-cluster-management.io/ocm/test/integration/util" ) diff --git a/test/integration/operator/klusterlet_test.go b/test/integration/operator/klusterlet_test.go index f494fbb39..3e2c74daf 100644 --- a/test/integration/operator/klusterlet_test.go +++ b/test/integration/operator/klusterlet_test.go @@ -18,8 +18,8 @@ import ( "k8s.io/apimachinery/pkg/util/rand" "k8s.io/client-go/rest" operatorapiv1 "open-cluster-management.io/api/operator/v1" - "open-cluster-management.io/ocm/pkg/registration-operator/helpers" - "open-cluster-management.io/ocm/pkg/registration-operator/operators/klusterlet" + "open-cluster-management.io/ocm/pkg/operator/helpers" + "open-cluster-management.io/ocm/pkg/operator/operators/klusterlet" "open-cluster-management.io/ocm/test/integration/util" ) diff --git a/test/integration/registration/addon_lease_test.go b/test/integration/registration/addon_lease_test.go index 1d56d0bb3..45c9f080f 100644 --- a/test/integration/registration/addon_lease_test.go +++ b/test/integration/registration/addon_lease_test.go @@ -3,6 +3,7 @@ package registration_test import ( "context" "fmt" + commonoptions "open-cluster-management.io/ocm/pkg/common/options" "open-cluster-management.io/ocm/test/integration/util" "path" "time" @@ -167,13 +168,15 @@ var _ = ginkgo.Describe("Addon Lease Resync", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) agentOptions := spoke.SpokeAgentOptions{ - ClusterName: managedClusterName, + AgentOptions: commonoptions.NewAgentOptions(), BootstrapKubeconfig: bootstrapKubeConfigFile, HubKubeconfigSecret: hubKubeconfigSecret, HubKubeconfigDir: hubKubeconfigDir, ClusterHealthCheckPeriod: 1 * time.Minute, } + agentOptions.AgentOptions.SpokeClusterName = managedClusterName + cancel = runAgent("addontest", agentOptions, spokeCfg) }) diff --git a/test/integration/registration/addon_registration_test.go b/test/integration/registration/addon_registration_test.go index 7aac30cd6..a887f1210 100644 --- a/test/integration/registration/addon_registration_test.go +++ b/test/integration/registration/addon_registration_test.go @@ -3,6 +3,7 @@ package registration_test import ( "context" "fmt" + commonoptions "open-cluster-management.io/ocm/pkg/common/options" "open-cluster-management.io/ocm/test/integration/util" "path" "reflect" @@ -41,13 +42,15 @@ var _ = ginkgo.Describe("Addon Registration", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) agentOptions := spoke.SpokeAgentOptions{ - ClusterName: managedClusterName, + AgentOptions: commonoptions.NewAgentOptions(), BootstrapKubeconfig: bootstrapKubeConfigFile, HubKubeconfigSecret: hubKubeconfigSecret, HubKubeconfigDir: hubKubeconfigDir, ClusterHealthCheckPeriod: 1 * time.Minute, } + agentOptions.AgentOptions.SpokeClusterName = managedClusterName + // run registration agent cancel = runAgent("addontest", agentOptions, spokeCfg) }) diff --git a/test/integration/registration/certificate_rotation_test.go b/test/integration/registration/certificate_rotation_test.go index d8bd9e580..f0ccaa5ce 100644 --- a/test/integration/registration/certificate_rotation_test.go +++ b/test/integration/registration/certificate_rotation_test.go @@ -1,6 +1,7 @@ package registration_test import ( + commonoptions "open-cluster-management.io/ocm/pkg/common/options" "open-cluster-management.io/ocm/test/integration/util" "path" "time" @@ -20,13 +21,15 @@ var _ = ginkgo.Describe("Certificate Rotation", func() { hubKubeconfigDir := path.Join(util.TestDir, "rotationtest", "hub-kubeconfig") agentOptions := spoke.SpokeAgentOptions{ - ClusterName: managedClusterName, + AgentOptions: commonoptions.NewAgentOptions(), BootstrapKubeconfig: bootstrapKubeConfigFile, HubKubeconfigSecret: hubKubeconfigSecret, HubKubeconfigDir: hubKubeconfigDir, ClusterHealthCheckPeriod: 1 * time.Minute, } + agentOptions.AgentOptions.SpokeClusterName = managedClusterName + // run registration agent cancel := runAgent("rotationtest", agentOptions, spokeCfg) defer cancel() diff --git a/test/integration/registration/disaster_recovery_test.go b/test/integration/registration/disaster_recovery_test.go index 413febb74..2b2e6f92e 100644 --- a/test/integration/registration/disaster_recovery_test.go +++ b/test/integration/registration/disaster_recovery_test.go @@ -3,6 +3,7 @@ package registration_test import ( "context" "fmt" + commonoptions "open-cluster-management.io/ocm/pkg/common/options" "open-cluster-management.io/ocm/test/integration/util" "os" "path" @@ -91,12 +92,13 @@ var _ = ginkgo.Describe("Disaster Recovery", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) agentOptions := spoke.SpokeAgentOptions{ - ClusterName: managedClusterName, + AgentOptions: commonoptions.NewAgentOptions(), BootstrapKubeconfig: bootstrapKubeConfigFile, HubKubeconfigSecret: hubKubeconfigSecret, HubKubeconfigDir: hubKubeconfigDir, ClusterHealthCheckPeriod: 1 * time.Minute, } + agentOptions.AgentOptions.SpokeClusterName = managedClusterName return runAgent("addontest", agentOptions, spokeCfg) } diff --git a/test/integration/registration/managedcluster_lease_test.go b/test/integration/registration/managedcluster_lease_test.go index 44cc2404f..35277427d 100644 --- a/test/integration/registration/managedcluster_lease_test.go +++ b/test/integration/registration/managedcluster_lease_test.go @@ -3,6 +3,7 @@ package registration_test import ( "context" "fmt" + commonoptions "open-cluster-management.io/ocm/pkg/common/options" "open-cluster-management.io/ocm/test/integration/util" "path" "time" @@ -32,12 +33,13 @@ var _ = ginkgo.Describe("Cluster Lease Update", func() { ginkgo.It("managed cluster lease should be updated constantly", func() { // run registration agent agentOptions := spoke.SpokeAgentOptions{ - ClusterName: managedClusterName, + AgentOptions: commonoptions.NewAgentOptions(), BootstrapKubeconfig: bootstrapKubeConfigFile, HubKubeconfigSecret: hubKubeconfigSecret, HubKubeconfigDir: hubKubeconfigDir, ClusterHealthCheckPeriod: 1 * time.Minute, } + agentOptions.AgentOptions.SpokeClusterName = managedClusterName cancel := runAgent("cluster-leasetest", agentOptions, spokeCfg) defer cancel() @@ -50,12 +52,13 @@ var _ = ginkgo.Describe("Cluster Lease Update", func() { ginkgo.It("managed cluster available condition should be recovered after its lease update is recovered", func() { // run registration agent agentOptions := spoke.SpokeAgentOptions{ - ClusterName: managedClusterName, + AgentOptions: commonoptions.NewAgentOptions(), BootstrapKubeconfig: bootstrapKubeConfigFile, HubKubeconfigSecret: hubKubeconfigSecret, HubKubeconfigDir: hubKubeconfigDir, ClusterHealthCheckPeriod: 1 * time.Minute, } + agentOptions.AgentOptions.SpokeClusterName = managedClusterName stop := runAgent("cluster-availabletest", agentOptions, spokeCfg) bootstrapManagedCluster(managedClusterName, hubKubeconfigSecret, util.TestLeaseDurationSeconds) @@ -69,12 +72,13 @@ var _ = ginkgo.Describe("Cluster Lease Update", func() { assertAvailableCondition(managedClusterName, metav1.ConditionUnknown, gracePeriod) agentOptions = spoke.SpokeAgentOptions{ - ClusterName: managedClusterName, + AgentOptions: commonoptions.NewAgentOptions(), BootstrapKubeconfig: bootstrapKubeConfigFile, HubKubeconfigSecret: hubKubeconfigSecret, HubKubeconfigDir: hubKubeconfigDir, ClusterHealthCheckPeriod: 1 * time.Minute, } + agentOptions.AgentOptions.SpokeClusterName = managedClusterName stop = runAgent("cluster-availabletest", agentOptions, spokeCfg) defer stop() @@ -86,12 +90,13 @@ var _ = ginkgo.Describe("Cluster Lease Update", func() { ginkgo.It("managed cluster available condition should be recovered after the cluster is restored", func() { // run registration agent agentOptions := spoke.SpokeAgentOptions{ - ClusterName: managedClusterName, + AgentOptions: commonoptions.NewAgentOptions(), BootstrapKubeconfig: bootstrapKubeConfigFile, HubKubeconfigSecret: hubKubeconfigSecret, HubKubeconfigDir: hubKubeconfigDir, ClusterHealthCheckPeriod: 1 * time.Minute, } + agentOptions.AgentOptions.SpokeClusterName = managedClusterName cancel := runAgent("cluster-leasetest", agentOptions, spokeCfg) defer cancel() @@ -136,12 +141,13 @@ var _ = ginkgo.Describe("Cluster Lease Update", func() { ginkgo.It("should use a short lease duration", func() { // run registration agent agentOptions := spoke.SpokeAgentOptions{ - ClusterName: managedClusterName, + AgentOptions: commonoptions.NewAgentOptions(), BootstrapKubeconfig: bootstrapKubeConfigFile, HubKubeconfigSecret: hubKubeconfigSecret, HubKubeconfigDir: hubKubeconfigDir, ClusterHealthCheckPeriod: 1 * time.Minute, } + agentOptions.AgentOptions.SpokeClusterName = managedClusterName stop := runAgent("cluster-leasetest", agentOptions, spokeCfg) bootstrapManagedCluster(managedClusterName, hubKubeconfigSecret, 60) diff --git a/test/integration/registration/spokeagent_recovery_test.go b/test/integration/registration/spokeagent_recovery_test.go index d71ee4769..85c20d7a6 100644 --- a/test/integration/registration/spokeagent_recovery_test.go +++ b/test/integration/registration/spokeagent_recovery_test.go @@ -2,6 +2,7 @@ package registration_test import ( "fmt" + commonoptions "open-cluster-management.io/ocm/pkg/common/options" "open-cluster-management.io/ocm/test/integration/util" "path" "reflect" @@ -34,12 +35,13 @@ var _ = ginkgo.Describe("Agent Recovery", func() { // run registration agent with an invalid bootstrap kubeconfig agentOptions := spoke.SpokeAgentOptions{ - ClusterName: managedClusterName, + AgentOptions: commonoptions.NewAgentOptions(), BootstrapKubeconfig: bootstrapFile, HubKubeconfigSecret: hubKubeconfigSecret, HubKubeconfigDir: hubKubeconfigDir, ClusterHealthCheckPeriod: 1 * time.Minute, } + agentOptions.AgentOptions.SpokeClusterName = managedClusterName cancel := runAgent("bootstrap-recoverytest", agentOptions, spokeCfg) defer cancel() @@ -121,12 +123,13 @@ var _ = ginkgo.Describe("Agent Recovery", func() { // run registration agent agentOptions := spoke.SpokeAgentOptions{ - ClusterName: spokeClusterName, + AgentOptions: commonoptions.NewAgentOptions(), BootstrapKubeconfig: bootstrapKubeConfigFile, HubKubeconfigSecret: hubKubeconfigSecret, HubKubeconfigDir: hubKubeconfigDir, ClusterHealthCheckPeriod: 1 * time.Minute, } + agentOptions.AgentOptions.SpokeClusterName = spokeClusterName cancel := runAgent("hubkubeconfig-recoverytest", agentOptions, spokeCfg) defer cancel() diff --git a/test/integration/registration/spokeagent_restart_test.go b/test/integration/registration/spokeagent_restart_test.go index a452249d6..db7cdd718 100644 --- a/test/integration/registration/spokeagent_restart_test.go +++ b/test/integration/registration/spokeagent_restart_test.go @@ -3,6 +3,7 @@ package registration_test import ( "context" "fmt" + commonoptions "open-cluster-management.io/ocm/pkg/common/options" "open-cluster-management.io/ocm/test/integration/util" "path" "time" @@ -33,12 +34,13 @@ var _ = ginkgo.Describe("Agent Restart", func() { ginkgo.By("run registration agent") agentOptions := spoke.SpokeAgentOptions{ - ClusterName: managedClusterName, + AgentOptions: commonoptions.NewAgentOptions(), BootstrapKubeconfig: bootstrapFile, HubKubeconfigSecret: hubKubeconfigSecret, HubKubeconfigDir: hubKubeconfigDir, ClusterHealthCheckPeriod: 1 * time.Minute, } + agentOptions.AgentOptions.SpokeClusterName = managedClusterName stopAgent := runAgent("restart-test", agentOptions, spokeCfg) @@ -109,12 +111,13 @@ var _ = ginkgo.Describe("Agent Restart", func() { ginkgo.By("Restart registration agent") agentOptions = spoke.SpokeAgentOptions{ - ClusterName: managedClusterName, + AgentOptions: commonoptions.NewAgentOptions(), BootstrapKubeconfig: bootstrapFile, HubKubeconfigSecret: hubKubeconfigSecret, HubKubeconfigDir: hubKubeconfigDir, ClusterHealthCheckPeriod: 1 * time.Minute, } + agentOptions.AgentOptions.SpokeClusterName = managedClusterName stopAgent = runAgent("restart-test", agentOptions, spokeCfg) defer stopAgent() @@ -161,13 +164,13 @@ var _ = ginkgo.Describe("Agent Restart", func() { ginkgo.By("run registration agent") agentOptions := spoke.SpokeAgentOptions{ - ClusterName: managedClusterName, + AgentOptions: commonoptions.NewAgentOptions(), BootstrapKubeconfig: bootstrapFile, HubKubeconfigSecret: hubKubeconfigSecret, HubKubeconfigDir: hubKubeconfigDir, ClusterHealthCheckPeriod: 1 * time.Minute, } - + agentOptions.AgentOptions.SpokeClusterName = managedClusterName stopAgent := runAgent("restart-test", agentOptions, spokeCfg) ginkgo.By("Check existence of csr and ManagedCluster") @@ -223,12 +226,13 @@ var _ = ginkgo.Describe("Agent Restart", func() { ginkgo.By("Restart registration agent with a new cluster name") managedClusterName = "restart-test-cluster3" agentOptions = spoke.SpokeAgentOptions{ - ClusterName: managedClusterName, + AgentOptions: commonoptions.NewAgentOptions(), BootstrapKubeconfig: bootstrapFile, HubKubeconfigSecret: hubKubeconfigSecret, HubKubeconfigDir: hubKubeconfigDir, ClusterHealthCheckPeriod: 1 * time.Minute, } + agentOptions.AgentOptions.SpokeClusterName = managedClusterName stopAgent = runAgent("restart-test", agentOptions, spokeCfg) defer stopAgent() diff --git a/test/integration/registration/spokecluster_autoapproval_test.go b/test/integration/registration/spokecluster_autoapproval_test.go index f4334b681..f94614c74 100644 --- a/test/integration/registration/spokecluster_autoapproval_test.go +++ b/test/integration/registration/spokecluster_autoapproval_test.go @@ -2,6 +2,7 @@ package registration_test import ( "fmt" + commonoptions "open-cluster-management.io/ocm/pkg/common/options" "open-cluster-management.io/ocm/test/integration/util" "path" "time" @@ -27,12 +28,13 @@ var _ = ginkgo.Describe("Cluster Auto Approval", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) agentOptions := spoke.SpokeAgentOptions{ - ClusterName: managedClusterName, + AgentOptions: commonoptions.NewAgentOptions(), BootstrapKubeconfig: bootstrapFile, HubKubeconfigSecret: hubKubeconfigSecret, HubKubeconfigDir: hubKubeconfigDir, ClusterHealthCheckPeriod: 1 * time.Minute, } + agentOptions.AgentOptions.SpokeClusterName = managedClusterName // run registration agent cancel := runAgent("autoapprovaltest", agentOptions, spokeCfg) diff --git a/test/integration/registration/spokecluster_claim_test.go b/test/integration/registration/spokecluster_claim_test.go index 8cd7fdf8a..4d67401e0 100644 --- a/test/integration/registration/spokecluster_claim_test.go +++ b/test/integration/registration/spokecluster_claim_test.go @@ -3,6 +3,7 @@ package registration_test import ( "context" "fmt" + commonoptions "open-cluster-management.io/ocm/pkg/common/options" "open-cluster-management.io/ocm/test/integration/util" "path" "reflect" @@ -48,13 +49,14 @@ var _ = ginkgo.Describe("Cluster Claim", func() { // run registration agent agentOptions := spoke.SpokeAgentOptions{ - ClusterName: managedClusterName, + AgentOptions: commonoptions.NewAgentOptions(), BootstrapKubeconfig: bootstrapKubeConfigFile, HubKubeconfigSecret: hubKubeconfigSecret, HubKubeconfigDir: hubKubeconfigDir, ClusterHealthCheckPeriod: 1 * time.Minute, MaxCustomClusterClaims: maxCustomClusterClaims, } + agentOptions.AgentOptions.SpokeClusterName = managedClusterName cancel = runAgent("claimtest", agentOptions, spokeCfg) }) diff --git a/test/integration/registration/spokecluster_joining_test.go b/test/integration/registration/spokecluster_joining_test.go index b70077e11..295d609b6 100644 --- a/test/integration/registration/spokecluster_joining_test.go +++ b/test/integration/registration/spokecluster_joining_test.go @@ -2,6 +2,7 @@ package registration_test import ( "fmt" + commonoptions "open-cluster-management.io/ocm/pkg/common/options" "open-cluster-management.io/ocm/test/integration/util" "path" "time" @@ -24,12 +25,13 @@ var _ = ginkgo.Describe("Joining Process", func() { // run registration agent agentOptions := spoke.SpokeAgentOptions{ - ClusterName: managedClusterName, + AgentOptions: commonoptions.NewAgentOptions(), BootstrapKubeconfig: bootstrapKubeConfigFile, HubKubeconfigSecret: hubKubeconfigSecret, HubKubeconfigDir: hubKubeconfigDir, ClusterHealthCheckPeriod: 1 * time.Minute, } + agentOptions.AgentOptions.SpokeClusterName = managedClusterName cancel := runAgent("joiningtest", agentOptions, spokeCfg) defer cancel() diff --git a/test/integration/registration/spokecluster_status_test.go b/test/integration/registration/spokecluster_status_test.go index d0ca4b0e8..43afe5dd7 100644 --- a/test/integration/registration/spokecluster_status_test.go +++ b/test/integration/registration/spokecluster_status_test.go @@ -2,6 +2,7 @@ package registration_test import ( "fmt" + commonoptions "open-cluster-management.io/ocm/pkg/common/options" "open-cluster-management.io/ocm/test/integration/util" "path" "time" @@ -30,12 +31,13 @@ var _ = ginkgo.Describe("Collecting Node Resource", func() { // run registration agent agentOptions := spoke.SpokeAgentOptions{ - ClusterName: managedClusterName, + AgentOptions: commonoptions.NewAgentOptions(), BootstrapKubeconfig: bootstrapKubeConfigFile, HubKubeconfigSecret: hubKubeconfigSecret, HubKubeconfigDir: hubKubeconfigDir, ClusterHealthCheckPeriod: 1 * time.Minute, } + agentOptions.AgentOptions.SpokeClusterName = managedClusterName cancel := runAgent("resorucetest", agentOptions, spokeCfg) defer cancel() diff --git a/test/integration/registration/taint_add_test.go b/test/integration/registration/taint_add_test.go index 7c6683f6e..c617f9834 100644 --- a/test/integration/registration/taint_add_test.go +++ b/test/integration/registration/taint_add_test.go @@ -10,6 +10,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/rand" v1 "open-cluster-management.io/api/cluster/v1" + commonoptions "open-cluster-management.io/ocm/pkg/common/options" "open-cluster-management.io/ocm/pkg/registration/helpers" "open-cluster-management.io/ocm/pkg/registration/hub/taint" "open-cluster-management.io/ocm/pkg/registration/spoke" @@ -34,12 +35,13 @@ var _ = ginkgo.Describe("ManagedCluster Taints Update", func() { // run registration agent go func() { agentOptions := spoke.SpokeAgentOptions{ - ClusterName: managedClusterName, + AgentOptions: commonoptions.NewAgentOptions(), BootstrapKubeconfig: bootstrapKubeConfigFile, HubKubeconfigSecret: hubKubeconfigSecret, HubKubeconfigDir: hubKubeconfigDir, ClusterHealthCheckPeriod: 1 * time.Minute, } + agentOptions.AgentOptions.SpokeClusterName = managedClusterName err := agentOptions.RunSpokeAgent(ctx, &controllercmd.ControllerContext{ KubeConfig: spokeCfg, EventRecorder: util.NewIntegrationTestEventRecorder("cluster-tainttest"), diff --git a/test/integration/work/deleteoption_test.go b/test/integration/work/deleteoption_test.go index f0e819208..aee4bd533 100644 --- a/test/integration/work/deleteoption_test.go +++ b/test/integration/work/deleteoption_test.go @@ -3,6 +3,7 @@ package work import ( "context" "fmt" + commonoptions "open-cluster-management.io/ocm/pkg/common/options" "open-cluster-management.io/ocm/test/integration/util" "time" @@ -29,11 +30,12 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { ginkgo.BeforeEach(func() { o = spoke.NewWorkloadAgentOptions() o.HubKubeconfigFile = hubKubeconfigFileName - o.SpokeClusterName = utilrand.String(5) + o.AgentOptions = commonoptions.NewAgentOptions() + o.AgentOptions.SpokeClusterName = utilrand.String(5) o.StatusSyncInterval = 3 * time.Second ns := &corev1.Namespace{} - ns.Name = o.SpokeClusterName + ns.Name = o.AgentOptions.SpokeClusterName _, err := spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) @@ -46,7 +48,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { }) ginkgo.JustBeforeEach(func() { - work = util.NewManifestWork(o.SpokeClusterName, "", manifests) + work = util.NewManifestWork(o.AgentOptions.SpokeClusterName, "", manifests) gomega.Expect(err).ToNot(gomega.HaveOccurred()) }) @@ -54,7 +56,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { if cancel != nil { cancel() } - err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), o.SpokeClusterName, metav1.DeleteOptions{}) + err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), o.AgentOptions.SpokeClusterName, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) }) @@ -63,15 +65,15 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { var anotherAppliedManifestWorkName string ginkgo.BeforeEach(func() { manifests = []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), - util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})), + util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), + util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})), } // Create another manifestworks with one shared resource. - anotherWork = util.NewManifestWork(o.SpokeClusterName, "sharing-resource-work", []workapiv1.Manifest{manifests[0]}) + anotherWork = util.NewManifestWork(o.AgentOptions.SpokeClusterName, "sharing-resource-work", []workapiv1.Manifest{manifests[0]}) }) ginkgo.JustBeforeEach(func() { - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) appliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, work.Name) @@ -81,7 +83,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue, []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - anotherWork, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), anotherWork, metav1.CreateOptions{}) + anotherWork, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), anotherWork, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) util.AssertWorkCondition(anotherWork.Namespace, anotherWork.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue, []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) @@ -94,7 +96,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { ginkgo.It("shared resource between the manifestwork should be kept when one manifestwork is deleted", func() { // ensure configmap exists and get its uid util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) - curentConfigMap, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + curentConfigMap, err := spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) currentUID := curentConfigMap.UID @@ -130,7 +132,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) // Delete one manifestwork - err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) + err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) // Ensure the appliedmanifestwork of deleted manifestwork is removed so it won't try to delete shared resource @@ -147,7 +149,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { // Ensure the configmap is kept and tracked by anotherappliedmanifestwork. gomega.Eventually(func() error { - configMap, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + configMap, err := spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) if err != nil { return err } @@ -178,7 +180,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { ginkgo.It("shared resource between the manifestwork should be kept when the shared resource is removed from one manifestwork", func() { // ensure configmap exists and get its uid util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) - curentConfigMap, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + curentConfigMap, err := spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) currentUID := curentConfigMap.UID @@ -214,10 +216,10 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) // Update one manifestwork to remove the shared resource - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) work.Spec.Workload.Manifests = []workapiv1.Manifest{manifests[1]} - _, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + _, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) // Ensure the resource is not tracked by the appliedmanifestwork. @@ -238,7 +240,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { // Ensure the configmap is kept and tracked by anotherappliedmanifestwork gomega.Eventually(func() error { - configMap, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + configMap, err := spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) if err != nil { return err } @@ -271,8 +273,8 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { ginkgo.Context("Delete options", func() { ginkgo.BeforeEach(func() { manifests = []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), - util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})), + util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), + util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})), } }) @@ -281,7 +283,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { PropagationPolicy: workapiv1.DeletePropagationPolicyTypeOrphan, } - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) appliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, work.Name) @@ -296,7 +298,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { // Ensure ownership of configmap is updated gomega.Eventually(func() error { - cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) if err != nil { return err } @@ -309,7 +311,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) gomega.Eventually(func() error { - cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm2", metav1.GetOptions{}) + cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm2", metav1.GetOptions{}) if err != nil { return err } @@ -322,12 +324,12 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) // Delete the work - err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) + err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) // Wait for deletion of manifest work gomega.Eventually(func() bool { - _, err := hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + _, err := hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) return errors.IsNotFound(err) }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) @@ -343,14 +345,14 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { { Group: "", Resource: "configmaps", - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: "cm1", }, }, }, } - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) appliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, work.Name) @@ -365,7 +367,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { // Ensure ownership of configmap is updated gomega.Eventually(func() error { - cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) if err != nil { return err } @@ -378,21 +380,21 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) // Delete the work - err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) + err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) // Wait for deletion of manifest work gomega.Eventually(func() bool { - _, err := hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + _, err := hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) return errors.IsNotFound(err) }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) // One of the resource should be deleted. - _, err = spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm2", metav1.GetOptions{}) + _, err = spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm2", metav1.GetOptions{}) gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) // One of the resource should be kept - _, err = spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + _, err = spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) }) @@ -404,14 +406,14 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { { Group: "", Resource: "configmaps", - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: "cm1", }, }, }, } - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) appliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, work.Name) @@ -426,7 +428,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { // Ensure ownership of configmap is updated gomega.Eventually(func() error { - cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) if err != nil { return err } @@ -440,15 +442,15 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { // Remove the resource from the manifests gomega.Eventually(func() error { - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) if err != nil { return err } work.Spec.Workload.Manifests = []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})), + util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})), } - _, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + _, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) @@ -459,7 +461,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { // Sleep 5 second and check the resource should be kept time.Sleep(5 * time.Second) - _, err = spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + _, err = spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) }) @@ -471,14 +473,14 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { { Group: "", Resource: "configmaps", - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: "cm1", }, }, }, } - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) appliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, work.Name) @@ -493,7 +495,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { // Ensure ownership of configmap is updated gomega.Eventually(func() error { - cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) if err != nil { return err } @@ -507,19 +509,19 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { // Remove the delete option gomega.Eventually(func() error { - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) if err != nil { return err } work.Spec.DeleteOption = nil - _, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + _, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) // Ensure ownership of configmap is updated gomega.Eventually(func() error { - cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) if err != nil { return err } @@ -532,19 +534,19 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) // Delete the work - err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) + err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) // Wait for deletion of manifest work gomega.Eventually(func() bool { - _, err := hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + _, err := hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) return errors.IsNotFound(err) }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) // All of the resource should be deleted. - _, err = spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm2", metav1.GetOptions{}) + _, err = spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm2", metav1.GetOptions{}) gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) - _, err = spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + _, err = spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) }) }) diff --git a/test/integration/work/executor_test.go b/test/integration/work/executor_test.go index 89b515379..ed10d2a2b 100644 --- a/test/integration/work/executor_test.go +++ b/test/integration/work/executor_test.go @@ -3,6 +3,7 @@ package work import ( "context" "encoding/json" + commonoptions "open-cluster-management.io/ocm/pkg/common/options" "open-cluster-management.io/ocm/pkg/features" "open-cluster-management.io/ocm/test/integration/util" "time" @@ -33,13 +34,14 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { ginkgo.BeforeEach(func() { o = spoke.NewWorkloadAgentOptions() o.HubKubeconfigFile = hubKubeconfigFileName - o.SpokeClusterName = utilrand.String(5) + o.AgentOptions = commonoptions.NewAgentOptions() + o.AgentOptions.SpokeClusterName = utilrand.String(5) o.StatusSyncInterval = 3 * time.Second err := features.DefaultSpokeWorkMutableFeatureGate.Set("ExecutorValidatingCaches=true") gomega.Expect(err).NotTo(gomega.HaveOccurred()) ns := &corev1.Namespace{} - ns.Name = o.SpokeClusterName + ns.Name = o.AgentOptions.SpokeClusterName _, err = spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) @@ -53,7 +55,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { }) ginkgo.JustBeforeEach(func() { - work = util.NewManifestWork(o.SpokeClusterName, "", manifests) + work = util.NewManifestWork(o.AgentOptions.SpokeClusterName, "", manifests) gomega.Expect(err).ToNot(gomega.HaveOccurred()) work.Spec.Executor = executor }) @@ -63,7 +65,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { cancel() } err := spokeKubeClient.CoreV1().Namespaces().Delete( - context.Background(), o.SpokeClusterName, metav1.DeleteOptions{}) + context.Background(), o.AgentOptions.SpokeClusterName, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) }) @@ -71,14 +73,14 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { executorName := "test-executor" ginkgo.BeforeEach(func() { manifests = []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), - util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})), + util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), + util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})), } executor = &workapiv1.ManifestWorkExecutor{ Subject: workapiv1.ManifestWorkExecutorSubject{ Type: workapiv1.ExecutorSubjectTypeServiceAccount, ServiceAccount: &workapiv1.ManifestWorkSubjectServiceAccount{ - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: executorName, }, }, @@ -86,7 +88,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { }) ginkgo.It("Executor does not have permission", func() { - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create( + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create( context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) @@ -103,10 +105,10 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { ginkgo.It("Executor does not have permission to partial resources", func() { roleName := "role1" - _, err = spokeKubeClient.RbacV1().Roles(o.SpokeClusterName).Create( + _, err = spokeKubeClient.RbacV1().Roles(o.AgentOptions.SpokeClusterName).Create( context.TODO(), &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: roleName, }, Rules: []rbacv1.PolicyRule{ @@ -119,16 +121,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { }, }, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - _, err = spokeKubeClient.RbacV1().RoleBindings(o.SpokeClusterName).Create( + _, err = spokeKubeClient.RbacV1().RoleBindings(o.AgentOptions.SpokeClusterName).Create( context.TODO(), &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: roleName, }, Subjects: []rbacv1.Subject{ { Kind: "ServiceAccount", - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: executorName, }, }, @@ -140,7 +142,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { }, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create( + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create( context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) @@ -154,20 +156,20 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { // ensure configmap cm1 exist and cm2 not exist util.AssertExistenceOfConfigMaps( []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), + util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), }, spokeKubeClient, eventuallyTimeout, eventuallyInterval) util.AssertNonexistenceOfConfigMaps( []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm2", map[string]string{"a": "b"}, []string{})), + util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm2", map[string]string{"a": "b"}, []string{})), }, spokeKubeClient, eventuallyTimeout, eventuallyInterval) }) ginkgo.It("Executor has permission for all resources", func() { roleName := "role1" - _, err = spokeKubeClient.RbacV1().Roles(o.SpokeClusterName).Create( + _, err = spokeKubeClient.RbacV1().Roles(o.AgentOptions.SpokeClusterName).Create( context.TODO(), &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: roleName, }, Rules: []rbacv1.PolicyRule{ @@ -180,16 +182,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { }, }, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - _, err = spokeKubeClient.RbacV1().RoleBindings(o.SpokeClusterName).Create( + _, err = spokeKubeClient.RbacV1().RoleBindings(o.AgentOptions.SpokeClusterName).Create( context.TODO(), &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: roleName, }, Subjects: []rbacv1.Subject{ { Kind: "ServiceAccount", - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: executorName, }, }, @@ -201,7 +203,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { }, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create( + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create( context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) @@ -221,14 +223,14 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { executorName := "test-executor" ginkgo.BeforeEach(func() { manifests = []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), - util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})), + util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), + util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})), } executor = &workapiv1.ManifestWorkExecutor{ Subject: workapiv1.ManifestWorkExecutorSubject{ Type: workapiv1.ExecutorSubjectTypeServiceAccount, ServiceAccount: &workapiv1.ManifestWorkSubjectServiceAccount{ - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: executorName, }, }, @@ -237,10 +239,10 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { ginkgo.It("Executor does not have delete permission and delete option is foreground", func() { roleName := "role1" - _, err = spokeKubeClient.RbacV1().Roles(o.SpokeClusterName).Create( + _, err = spokeKubeClient.RbacV1().Roles(o.AgentOptions.SpokeClusterName).Create( context.TODO(), &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: roleName, }, Rules: []rbacv1.PolicyRule{ @@ -253,16 +255,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { }, }, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - _, err = spokeKubeClient.RbacV1().RoleBindings(o.SpokeClusterName).Create( + _, err = spokeKubeClient.RbacV1().RoleBindings(o.AgentOptions.SpokeClusterName).Create( context.TODO(), &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: roleName, }, Subjects: []rbacv1.Subject{ { Kind: "ServiceAccount", - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: executorName, }, }, @@ -274,7 +276,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { }, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create( + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create( context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) @@ -291,10 +293,10 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { ginkgo.It("Executor does not have delete permission and delete option is orphan", func() { roleName := "role1" - _, err = spokeKubeClient.RbacV1().Roles(o.SpokeClusterName).Create( + _, err = spokeKubeClient.RbacV1().Roles(o.AgentOptions.SpokeClusterName).Create( context.TODO(), &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: roleName, }, Rules: []rbacv1.PolicyRule{ @@ -307,16 +309,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { }, }, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - _, err = spokeKubeClient.RbacV1().RoleBindings(o.SpokeClusterName).Create( + _, err = spokeKubeClient.RbacV1().RoleBindings(o.AgentOptions.SpokeClusterName).Create( context.TODO(), &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: roleName, }, Subjects: []rbacv1.Subject{ { Kind: "ServiceAccount", - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: executorName, }, }, @@ -331,7 +333,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { work.Spec.DeleteOption = &workapiv1.DeleteOption{ PropagationPolicy: workapiv1.DeletePropagationPolicyTypeOrphan, } - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create( + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create( context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) @@ -348,10 +350,10 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { ginkgo.It("Executor does not have delete permission and delete option is selectively orphan", func() { roleName := "role1" - _, err = spokeKubeClient.RbacV1().Roles(o.SpokeClusterName).Create( + _, err = spokeKubeClient.RbacV1().Roles(o.AgentOptions.SpokeClusterName).Create( context.TODO(), &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: roleName, }, Rules: []rbacv1.PolicyRule{ @@ -364,16 +366,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { }, }, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - _, err = spokeKubeClient.RbacV1().RoleBindings(o.SpokeClusterName).Create( + _, err = spokeKubeClient.RbacV1().RoleBindings(o.AgentOptions.SpokeClusterName).Create( context.TODO(), &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: roleName, }, Subjects: []rbacv1.Subject{ { Kind: "ServiceAccount", - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: executorName, }, }, @@ -391,13 +393,13 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { OrphaningRules: []workapiv1.OrphaningRule{ { Resource: "configmaps", - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: "cm1", }, }, }, } - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create( + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create( context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) @@ -411,11 +413,11 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { // ensure configmap cm1 exist and cm2 not exist util.AssertExistenceOfConfigMaps( []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), + util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), }, spokeKubeClient, eventuallyTimeout, eventuallyInterval) util.AssertNonexistenceOfConfigMaps( []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm2", map[string]string{"a": "b"}, []string{})), + util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm2", map[string]string{"a": "b"}, []string{})), }, spokeKubeClient, eventuallyTimeout, eventuallyInterval) }) }) @@ -424,20 +426,20 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { executorName := "test-executor" ginkgo.BeforeEach(func() { manifests = []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), - util.ToManifest(util.NewRoleForManifest(o.SpokeClusterName, "role-cm-creator", rbacv1.PolicyRule{ + util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), + util.ToManifest(util.NewRoleForManifest(o.AgentOptions.SpokeClusterName, "role-cm-creator", rbacv1.PolicyRule{ Verbs: []string{"create", "update", "patch", "get", "list", "delete"}, APIGroups: []string{""}, Resources: []string{"configmaps"}, })), - util.ToManifest(util.NewRoleBindingForManifest(o.SpokeClusterName, "role-cm-creator-binding", + util.ToManifest(util.NewRoleBindingForManifest(o.AgentOptions.SpokeClusterName, "role-cm-creator-binding", rbacv1.RoleRef{ Kind: "Role", Name: "role-cm-creator", }, rbacv1.Subject{ Kind: "ServiceAccount", - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: executorName, })), } @@ -445,7 +447,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { Subject: workapiv1.ManifestWorkExecutorSubject{ Type: workapiv1.ExecutorSubjectTypeServiceAccount, ServiceAccount: &workapiv1.ManifestWorkSubjectServiceAccount{ - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: executorName, }, }, @@ -454,11 +456,11 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { ginkgo.It("no permission", func() { roleName := "role1" - _, err = spokeKubeClient.RbacV1().Roles(o.SpokeClusterName).Create( + _, err = spokeKubeClient.RbacV1().Roles(o.AgentOptions.SpokeClusterName).Create( context.TODO(), &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ Name: roleName, - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, }, Rules: []rbacv1.PolicyRule{ { @@ -470,16 +472,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { }, }, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - _, err = spokeKubeClient.RbacV1().RoleBindings(o.SpokeClusterName).Create( + _, err = spokeKubeClient.RbacV1().RoleBindings(o.AgentOptions.SpokeClusterName).Create( context.TODO(), &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: roleName, - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, }, Subjects: []rbacv1.Subject{ { Kind: "ServiceAccount", - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: executorName, }, }, @@ -491,7 +493,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { }, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create( + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create( context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) @@ -507,17 +509,17 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { // ensure configmap not exist util.AssertNonexistenceOfConfigMaps( []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), + util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), }, spokeKubeClient, eventuallyTimeout, eventuallyInterval) }) ginkgo.It("no permission for already existing resource", func() { roleName := "role1" - _, err = spokeKubeClient.RbacV1().Roles(o.SpokeClusterName).Create( + _, err = spokeKubeClient.RbacV1().Roles(o.AgentOptions.SpokeClusterName).Create( context.TODO(), &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ Name: roleName, - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, }, Rules: []rbacv1.PolicyRule{ { @@ -529,16 +531,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { }, }, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - _, err = spokeKubeClient.RbacV1().RoleBindings(o.SpokeClusterName).Create( + _, err = spokeKubeClient.RbacV1().RoleBindings(o.AgentOptions.SpokeClusterName).Create( context.TODO(), &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: roleName, - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, }, Subjects: []rbacv1.Subject{ { Kind: "ServiceAccount", - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: executorName, }, }, @@ -551,11 +553,11 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { gomega.Expect(err).ToNot(gomega.HaveOccurred()) // make the role exist with lower permission - _, err = spokeKubeClient.RbacV1().Roles(o.SpokeClusterName).Create( + _, err = spokeKubeClient.RbacV1().Roles(o.AgentOptions.SpokeClusterName).Create( context.TODO(), &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ Name: "role-cm-creator", - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, }, Rules: []rbacv1.PolicyRule{ { @@ -567,7 +569,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { }, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create( + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create( context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) @@ -584,17 +586,17 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { // ensure configmap not exist util.AssertNonexistenceOfConfigMaps( []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), + util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), }, spokeKubeClient, eventuallyTimeout, eventuallyInterval) }) ginkgo.It("with permission", func() { roleName := "role1" - _, err = spokeKubeClient.RbacV1().Roles(o.SpokeClusterName).Create( + _, err = spokeKubeClient.RbacV1().Roles(o.AgentOptions.SpokeClusterName).Create( context.TODO(), &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ Name: roleName, - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, }, Rules: []rbacv1.PolicyRule{ { @@ -611,16 +613,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { }, }, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - _, err = spokeKubeClient.RbacV1().RoleBindings(o.SpokeClusterName).Create( + _, err = spokeKubeClient.RbacV1().RoleBindings(o.AgentOptions.SpokeClusterName).Create( context.TODO(), &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: roleName, - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, }, Subjects: []rbacv1.Subject{ { Kind: "ServiceAccount", - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: executorName, }, }, @@ -632,7 +634,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { }, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create( + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create( context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) @@ -648,17 +650,17 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { // ensure configmaps exist util.AssertExistenceOfConfigMaps( []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), + util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), }, spokeKubeClient, eventuallyTimeout, eventuallyInterval) }) ginkgo.It("with permission for already exist resource", func() { roleName := "role1" - _, err = spokeKubeClient.RbacV1().Roles(o.SpokeClusterName).Create( + _, err = spokeKubeClient.RbacV1().Roles(o.AgentOptions.SpokeClusterName).Create( context.TODO(), &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ Name: roleName, - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, }, Rules: []rbacv1.PolicyRule{ { @@ -675,16 +677,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { }, }, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - _, err = spokeKubeClient.RbacV1().RoleBindings(o.SpokeClusterName).Create( + _, err = spokeKubeClient.RbacV1().RoleBindings(o.AgentOptions.SpokeClusterName).Create( context.TODO(), &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: roleName, - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, }, Subjects: []rbacv1.Subject{ { Kind: "ServiceAccount", - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: executorName, }, }, @@ -697,11 +699,11 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { gomega.Expect(err).ToNot(gomega.HaveOccurred()) // make the role exist with lower permission - _, err = spokeKubeClient.RbacV1().Roles(o.SpokeClusterName).Create( + _, err = spokeKubeClient.RbacV1().Roles(o.AgentOptions.SpokeClusterName).Create( context.TODO(), &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ Name: "role-cm-creator", - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, }, Rules: []rbacv1.PolicyRule{ { @@ -713,7 +715,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { }, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create( + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create( context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) @@ -729,7 +731,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { // ensure configmaps exist util.AssertExistenceOfConfigMaps( []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), + util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), }, spokeKubeClient, eventuallyTimeout, eventuallyInterval) }) }) @@ -785,13 +787,13 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { } ginkgo.BeforeEach(func() { manifests = []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), + util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), } executor = &workapiv1.ManifestWorkExecutor{ Subject: workapiv1.ManifestWorkExecutorSubject{ Type: workapiv1.ExecutorSubjectTypeServiceAccount, ServiceAccount: &workapiv1.ManifestWorkSubjectServiceAccount{ - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: executorName, }, }, @@ -799,7 +801,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { }) ginkgo.It("Permission change", func() { - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create( + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create( context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) @@ -813,8 +815,8 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { ginkgo.By("ensure configmaps do not exist") util.AssertNonexistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) - createRBAC(o.SpokeClusterName, executorName) - addConfigMapToManifestWork(hubWorkClient, work.Name, o.SpokeClusterName, "cm2") + createRBAC(o.AgentOptions.SpokeClusterName, executorName) + addConfigMapToManifestWork(hubWorkClient, work.Name, o.AgentOptions.SpokeClusterName, "cm2") util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue, []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) @@ -825,8 +827,8 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { ginkgo.By("ensure configmaps cm1 and cm2 exist") util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) - deleteRBAC(o.SpokeClusterName, executorName) - addConfigMapToManifestWork(hubWorkClient, work.Name, o.SpokeClusterName, "cm3") + deleteRBAC(o.AgentOptions.SpokeClusterName, executorName) + addConfigMapToManifestWork(hubWorkClient, work.Name, o.AgentOptions.SpokeClusterName, "cm3") util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionFalse, []metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionFalse, metav1.ConditionFalse}, eventuallyTimeout, eventuallyInterval) @@ -837,15 +839,15 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { ginkgo.By("ensure configmap cm1 cm2 exist(will not delete the applied resource even the permison is revoked) but cm3 does not exist") util.AssertExistenceOfConfigMaps( []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, nil)), + util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, nil)), }, spokeKubeClient, eventuallyTimeout, eventuallyInterval) util.AssertExistenceOfConfigMaps( []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm2", map[string]string{"a": "b"}, nil)), + util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm2", map[string]string{"a": "b"}, nil)), }, spokeKubeClient, eventuallyTimeout, eventuallyInterval) util.AssertNonexistenceOfConfigMaps( []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm3", map[string]string{"a": "b"}, nil)), + util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm3", map[string]string{"a": "b"}, nil)), }, spokeKubeClient, eventuallyTimeout, eventuallyInterval) }) }) diff --git a/test/integration/work/statusfeedback_test.go b/test/integration/work/statusfeedback_test.go index 36299a3e6..76bc75893 100644 --- a/test/integration/work/statusfeedback_test.go +++ b/test/integration/work/statusfeedback_test.go @@ -3,6 +3,7 @@ package work import ( "context" "fmt" + commonoptions "open-cluster-management.io/ocm/pkg/common/options" "open-cluster-management.io/ocm/pkg/features" "open-cluster-management.io/ocm/test/integration/util" "time" @@ -32,11 +33,12 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { ginkgo.BeforeEach(func() { o = spoke.NewWorkloadAgentOptions() o.HubKubeconfigFile = hubKubeconfigFileName - o.SpokeClusterName = utilrand.String(5) + o.AgentOptions = commonoptions.NewAgentOptions() + o.AgentOptions.SpokeClusterName = utilrand.String(5) o.StatusSyncInterval = 3 * time.Second ns := &corev1.Namespace{} - ns.Name = o.SpokeClusterName + ns.Name = o.AgentOptions.SpokeClusterName _, err = spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) @@ -45,18 +47,18 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { }) ginkgo.JustBeforeEach(func() { - work = util.NewManifestWork(o.SpokeClusterName, "", manifests) + work = util.NewManifestWork(o.AgentOptions.SpokeClusterName, "", manifests) gomega.Expect(err).ToNot(gomega.HaveOccurred()) }) ginkgo.AfterEach(func() { - err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), o.SpokeClusterName, metav1.DeleteOptions{}) + err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), o.AgentOptions.SpokeClusterName, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) }) ginkgo.Context("Deployment Status feedback", func() { ginkgo.BeforeEach(func() { - u, _, err := util.NewDeployment(o.SpokeClusterName, "deploy1", "sa") + u, _, err := util.NewDeployment(o.AgentOptions.SpokeClusterName, "deploy1", "sa") gomega.Expect(err).ToNot(gomega.HaveOccurred()) manifests = append(manifests, util.ToManifest(u)) @@ -77,7 +79,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { ResourceIdentifier: workapiv1.ResourceIdentifier{ Group: "apps", Resource: "deployments", - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: "deploy1", }, FeedbackRules: []workapiv1.FeedbackRule{ @@ -88,7 +90,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { }, } - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, @@ -98,7 +100,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { // Update Deployment status on spoke gomega.Eventually(func() error { - deploy, err := spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) + deploy, err := spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) if err != nil { return err } @@ -107,13 +109,13 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { deploy.Status.Replicas = 3 deploy.Status.ReadyReplicas = 2 - _, err = spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{}) + _, err = spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{}) return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) // Check if we get status of deployment on work api gomega.Eventually(func() error { - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) if err != nil { return err } @@ -160,7 +162,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { // Update replica of deployment gomega.Eventually(func() error { - deploy, err := spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) + deploy, err := spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) if err != nil { return err } @@ -169,13 +171,13 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { deploy.Status.Replicas = 3 deploy.Status.ReadyReplicas = 3 - _, err = spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{}) + _, err = spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{}) return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) // Check if the status of deployment is synced on work api gomega.Eventually(func() error { - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) if err != nil { return err } @@ -227,7 +229,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { ResourceIdentifier: workapiv1.ResourceIdentifier{ Group: "apps", Resource: "deployments", - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: "deploy1", }, FeedbackRules: []workapiv1.FeedbackRule{ @@ -248,7 +250,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { }, } - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue, @@ -257,7 +259,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) gomega.Eventually(func() error { - deploy, err := spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) + deploy, err := spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) if err != nil { return err } @@ -269,13 +271,13 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { }, } - _, err = spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{}) + _, err = spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{}) return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) // Check if we get status of deployment on work api gomega.Eventually(func() error { - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) if err != nil { return err } @@ -308,7 +310,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { }) ginkgo.It("should return none for resources with no wellKnowne status", func() { - sa, _ := util.NewServiceAccount(o.SpokeClusterName, "sa") + sa, _ := util.NewServiceAccount(o.AgentOptions.SpokeClusterName, "sa") work.Spec.Workload.Manifests = append(work.Spec.Workload.Manifests, util.ToManifest(sa)) work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{ @@ -316,7 +318,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { ResourceIdentifier: workapiv1.ResourceIdentifier{ Group: "apps", Resource: "deployments", - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: "deploy1", }, FeedbackRules: []workapiv1.FeedbackRule{ @@ -329,7 +331,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { ResourceIdentifier: workapiv1.ResourceIdentifier{ Group: "", Resource: "serviceaccounts", - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: "sa", }, FeedbackRules: []workapiv1.FeedbackRule{ @@ -340,7 +342,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { }, } - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue, @@ -350,7 +352,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { // Update Deployment status on spoke gomega.Eventually(func() error { - deploy, err := spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) + deploy, err := spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) if err != nil { return err } @@ -359,13 +361,13 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { deploy.Status.Replicas = 3 deploy.Status.ReadyReplicas = 2 - _, err = spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{}) + _, err = spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{}) return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) // Check if we get status of deployment on work api gomega.Eventually(func() error { - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) if err != nil { return err } @@ -421,7 +423,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { ResourceIdentifier: workapiv1.ResourceIdentifier{ Group: "apps", Resource: "deployments", - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: "deploy1", }, FeedbackRules: []workapiv1.FeedbackRule{ @@ -438,7 +440,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { }, } - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue, @@ -450,7 +452,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { ginkgo.Context("Deployment Status feedback with RawJsonString enabled", func() { ginkgo.BeforeEach(func() { - u, _, err := util.NewDeployment(o.SpokeClusterName, "deploy1", "sa") + u, _, err := util.NewDeployment(o.AgentOptions.SpokeClusterName, "deploy1", "sa") gomega.Expect(err).ToNot(gomega.HaveOccurred()) manifests = append(manifests, util.ToManifest(u)) @@ -473,7 +475,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { ResourceIdentifier: workapiv1.ResourceIdentifier{ Group: "apps", Resource: "deployments", - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: "deploy1", }, FeedbackRules: []workapiv1.FeedbackRule{ @@ -490,7 +492,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { }, } - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue, @@ -499,7 +501,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) gomega.Eventually(func() error { - deploy, err := spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) + deploy, err := spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) if err != nil { return err } @@ -511,13 +513,13 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { }, } - _, err = spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{}) + _, err = spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{}) return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) // Check if we get status of deployment on work api gomega.Eventually(func() error { - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) if err != nil { return err } diff --git a/test/integration/work/unmanaged_appliedwork_test.go b/test/integration/work/unmanaged_appliedwork_test.go index 96c28cd35..623284806 100644 --- a/test/integration/work/unmanaged_appliedwork_test.go +++ b/test/integration/work/unmanaged_appliedwork_test.go @@ -3,6 +3,7 @@ package work import ( "context" "fmt" + commonoptions "open-cluster-management.io/ocm/pkg/common/options" util "open-cluster-management.io/ocm/test/integration/util" "os" "path" @@ -33,13 +34,14 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() { ginkgo.BeforeEach(func() { o = spoke.NewWorkloadAgentOptions() o.HubKubeconfigFile = hubKubeconfigFileName - o.SpokeClusterName = utilrand.String(5) + o.AgentOptions = commonoptions.NewAgentOptions() + o.AgentOptions.SpokeClusterName = utilrand.String(5) o.StatusSyncInterval = 3 * time.Second o.AgentID = utilrand.String(5) o.AppliedManifestWorkEvictionGracePeriod = 10 * time.Second ns = &corev1.Namespace{} - ns.Name = o.SpokeClusterName + ns.Name = o.AgentOptions.SpokeClusterName _, err := spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) @@ -48,11 +50,11 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() { go startWorkAgent(ctx, o) manifests = []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, nil)), + util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, nil)), } - work = util.NewManifestWork(o.SpokeClusterName, "unmanaged-appliedwork", manifests) - _, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + work = util.NewManifestWork(o.AgentOptions.SpokeClusterName, "unmanaged-appliedwork", manifests) + _, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) appliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, work.Name) @@ -62,7 +64,7 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() { if cancel != nil { cancel() } - err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), o.SpokeClusterName, metav1.DeleteOptions{}) + err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), o.AgentOptions.SpokeClusterName, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) }) @@ -126,7 +128,7 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() { newOption := spoke.NewWorkloadAgentOptions() newOption.HubKubeconfigFile = newHubKubeConfigFile - newOption.SpokeClusterName = o.SpokeClusterName + newOption.AgentOptions.SpokeClusterName = o.AgentOptions.SpokeClusterName newOption.AgentID = utilrand.String(5) newOption.AppliedManifestWorkEvictionGracePeriod = 5 * time.Second @@ -135,7 +137,7 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() { go startWorkAgent(ctx, newOption) // Create the same manifestwork with the same name on new hub. - work, err = newWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + work, err = newWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) util.AssertWorkCondition(work.Namespace, work.Name, newWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue, @@ -145,7 +147,7 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() { // ensure the resource has two ownerrefs gomega.Eventually(func() error { - cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.TODO(), "cm1", metav1.GetOptions{}) + cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.TODO(), "cm1", metav1.GetOptions{}) if err != nil { return err } @@ -171,7 +173,7 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() { newOption := spoke.NewWorkloadAgentOptions() newOption.HubKubeconfigFile = newHubKubeConfigFile - newOption.SpokeClusterName = o.SpokeClusterName + newOption.AgentOptions.SpokeClusterName = o.AgentOptions.SpokeClusterName newOption.AgentID = o.AgentID newOption.AppliedManifestWorkEvictionGracePeriod = 5 * time.Second @@ -180,7 +182,7 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() { go startWorkAgent(ctx, newOption) // Create the same manifestwork with the same name. - work, err = newWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + work, err = newWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) util.AssertWorkCondition(work.Namespace, work.Name, newWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue, @@ -202,7 +204,7 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() { // ensure the resource has only one ownerref gomega.Eventually(func() error { - cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.TODO(), "cm1", metav1.GetOptions{}) + cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.TODO(), "cm1", metav1.GetOptions{}) if err != nil { return err } @@ -261,7 +263,7 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() { go startWorkAgent(ctx, o) // recreate the work on the hub - _, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + _, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) // ensure the appliemanifestwork eviction is stopped diff --git a/test/integration/work/updatestrategy_test.go b/test/integration/work/updatestrategy_test.go index bfeac9b9c..c2c2ffecc 100644 --- a/test/integration/work/updatestrategy_test.go +++ b/test/integration/work/updatestrategy_test.go @@ -3,6 +3,7 @@ package work import ( "context" "fmt" + commonoptions "open-cluster-management.io/ocm/pkg/common/options" "open-cluster-management.io/ocm/test/integration/util" "time" @@ -30,11 +31,12 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { ginkgo.BeforeEach(func() { o = spoke.NewWorkloadAgentOptions() o.HubKubeconfigFile = hubKubeconfigFileName - o.SpokeClusterName = utilrand.String(5) + o.AgentOptions = commonoptions.NewAgentOptions() + o.AgentOptions.SpokeClusterName = utilrand.String(5) o.StatusSyncInterval = 3 * time.Second ns := &corev1.Namespace{} - ns.Name = o.SpokeClusterName + ns.Name = o.AgentOptions.SpokeClusterName _, err := spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) @@ -47,14 +49,14 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { }) ginkgo.JustBeforeEach(func() { - work = util.NewManifestWork(o.SpokeClusterName, "", manifests) + work = util.NewManifestWork(o.AgentOptions.SpokeClusterName, "", manifests) }) ginkgo.AfterEach(func() { if cancel != nil { cancel() } - err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), o.SpokeClusterName, metav1.DeleteOptions{}) + err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), o.AgentOptions.SpokeClusterName, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) }) @@ -62,7 +64,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { var object *unstructured.Unstructured ginkgo.BeforeEach(func() { - object, _, err = util.NewDeployment(o.SpokeClusterName, "deploy1", "sa") + object, _, err = util.NewDeployment(o.AgentOptions.SpokeClusterName, "deploy1", "sa") gomega.Expect(err).ToNot(gomega.HaveOccurred()) manifests = append(manifests, util.ToManifest(object)) }) @@ -73,7 +75,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { ResourceIdentifier: workapiv1.ResourceIdentifier{ Group: "apps", Resource: "deployments", - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: "deploy1", }, UpdateStrategy: &workapiv1.UpdateStrategy{ @@ -82,7 +84,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { }, } - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue, @@ -92,13 +94,13 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas") gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Eventually(func() error { - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) if err != nil { return err } work.Spec.Workload.Manifests[0] = util.ToManifest(object) - _, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + _, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) @@ -106,7 +108,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) gomega.Eventually(func() error { - deploy, err := spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) + deploy, err := spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) if err != nil { return err } @@ -124,7 +126,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { var object *unstructured.Unstructured ginkgo.BeforeEach(func() { - object, _, err = util.NewDeployment(o.SpokeClusterName, "deploy1", "sa") + object, _, err = util.NewDeployment(o.AgentOptions.SpokeClusterName, "deploy1", "sa") gomega.Expect(err).ToNot(gomega.HaveOccurred()) manifests = append(manifests, util.ToManifest(object)) }) @@ -135,7 +137,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { ResourceIdentifier: workapiv1.ResourceIdentifier{ Group: "apps", Resource: "deployments", - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: "deploy1", }, UpdateStrategy: &workapiv1.UpdateStrategy{ @@ -144,7 +146,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { }, } - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue, @@ -153,18 +155,18 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { // update work err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas") gomega.Eventually(func() error { - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) if err != nil { return err } work.Spec.Workload.Manifests[0] = util.ToManifest(object) - _, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + _, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) gomega.Eventually(func() error { - deploy, err := spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) + deploy, err := spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) if err != nil { return err } @@ -183,7 +185,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { ResourceIdentifier: workapiv1.ResourceIdentifier{ Group: "apps", Resource: "deployments", - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: "deploy1", }, UpdateStrategy: &workapiv1.UpdateStrategy{ @@ -192,7 +194,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { }, } - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue, @@ -203,7 +205,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { gomega.Expect(err).ToNot(gomega.HaveOccurred()) patch, err := object.MarshalJSON() gomega.Expect(err).ToNot(gomega.HaveOccurred()) - _, err = spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).Patch( + _, err = spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).Patch( context.Background(), "deploy1", types.ApplyPatchType, []byte(patch), metav1.PatchOptions{Force: pointer.Bool(true), FieldManager: "test-integration"}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) @@ -211,13 +213,13 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas") gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Eventually(func() error { - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) if err != nil { return err } work.Spec.Workload.Manifests[0] = util.ToManifest(object) - _, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + _, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) @@ -228,13 +230,13 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { // remove the replica field and the apply should work unstructured.RemoveNestedField(object.Object, "spec", "replicas") gomega.Eventually(func() error { - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) if err != nil { return err } work.Spec.Workload.Manifests[0] = util.ToManifest(object) - _, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + _, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) @@ -248,7 +250,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { ResourceIdentifier: workapiv1.ResourceIdentifier{ Group: "apps", Resource: "deployments", - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: "deploy1", }, UpdateStrategy: &workapiv1.UpdateStrategy{ @@ -257,7 +259,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { }, } - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue, @@ -267,13 +269,13 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { objCopy := object.DeepCopy() // work1 does not want to own replica field unstructured.RemoveNestedField(objCopy.Object, "spec", "replicas") - work1 := util.NewManifestWork(o.SpokeClusterName, "another", []workapiv1.Manifest{util.ToManifest(objCopy)}) + work1 := util.NewManifestWork(o.AgentOptions.SpokeClusterName, "another", []workapiv1.Manifest{util.ToManifest(objCopy)}) work1.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{ { ResourceIdentifier: workapiv1.ResourceIdentifier{ Group: "apps", Resource: "deployments", - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: "deploy1", }, UpdateStrategy: &workapiv1.UpdateStrategy{ @@ -286,7 +288,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { }, } - _, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work1, metav1.CreateOptions{}) + _, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work1, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) util.AssertWorkCondition(work1.Namespace, work1.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue, @@ -296,13 +298,13 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas") gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Eventually(func() error { - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) if err != nil { return err } work.Spec.Workload.Manifests[0] = util.ToManifest(object) - _, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + _, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) @@ -311,7 +313,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) gomega.Eventually(func() error { - deploy, err := spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) + deploy, err := spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) if err != nil { return err } @@ -327,13 +329,13 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { err = unstructured.SetNestedField(object.Object, "another-sa", "spec", "template", "spec", "serviceAccountName") gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Eventually(func() error { - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) if err != nil { return err } work.Spec.Workload.Manifests[0] = util.ToManifest(object) - _, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + _, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) @@ -348,7 +350,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { ResourceIdentifier: workapiv1.ResourceIdentifier{ Group: "apps", Resource: "deployments", - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: "deploy1", }, UpdateStrategy: &workapiv1.UpdateStrategy{ @@ -357,7 +359,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { }, } - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue, @@ -367,13 +369,13 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { objCopy := object.DeepCopy() // work1 does not want to own replica field unstructured.RemoveNestedField(objCopy.Object, "spec", "replicas") - work1 := util.NewManifestWork(o.SpokeClusterName, "another", []workapiv1.Manifest{util.ToManifest(objCopy)}) + work1 := util.NewManifestWork(o.AgentOptions.SpokeClusterName, "another", []workapiv1.Manifest{util.ToManifest(objCopy)}) work1.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{ { ResourceIdentifier: workapiv1.ResourceIdentifier{ Group: "apps", Resource: "deployments", - Namespace: o.SpokeClusterName, + Namespace: o.AgentOptions.SpokeClusterName, Name: "deploy1", }, UpdateStrategy: &workapiv1.UpdateStrategy{ @@ -386,14 +388,14 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { }, } - _, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work1, metav1.CreateOptions{}) + _, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work1, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) util.AssertWorkCondition(work1.Namespace, work1.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue, []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) gomega.Eventually(func() error { - deploy, err := spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) + deploy, err := spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) if err != nil { return err } @@ -407,18 +409,18 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { // update deleteOption of the first work gomega.Eventually(func() error { - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) if err != nil { return err } work.Spec.DeleteOption = &workapiv1.DeleteOption{PropagationPolicy: workapiv1.DeletePropagationPolicyTypeOrphan} - _, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + _, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) gomega.Eventually(func() error { - deploy, err := spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) + deploy, err := spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) if err != nil { return err } diff --git a/test/integration/work/work_test.go b/test/integration/work/work_test.go index 94659414c..19d2fb7e9 100644 --- a/test/integration/work/work_test.go +++ b/test/integration/work/work_test.go @@ -3,6 +3,7 @@ package work import ( "context" "fmt" + commonoptions "open-cluster-management.io/ocm/pkg/common/options" "open-cluster-management.io/ocm/test/integration/util" "time" @@ -44,12 +45,13 @@ var _ = ginkgo.Describe("ManifestWork", func() { ginkgo.BeforeEach(func() { o = spoke.NewWorkloadAgentOptions() o.HubKubeconfigFile = hubKubeconfigFileName - o.SpokeClusterName = utilrand.String(5) + o.AgentOptions = commonoptions.NewAgentOptions() + o.AgentOptions.SpokeClusterName = utilrand.String(5) o.StatusSyncInterval = 3 * time.Second o.AppliedManifestWorkEvictionGracePeriod = 5 * time.Second ns := &corev1.Namespace{} - ns.Name = o.SpokeClusterName + ns.Name = o.AgentOptions.SpokeClusterName _, err := spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) @@ -62,30 +64,30 @@ var _ = ginkgo.Describe("ManifestWork", func() { }) ginkgo.JustBeforeEach(func() { - work = util.NewManifestWork(o.SpokeClusterName, "", manifests) - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + work = util.NewManifestWork(o.AgentOptions.SpokeClusterName, "", manifests) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) appliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, work.Name) gomega.Expect(err).ToNot(gomega.HaveOccurred()) }) ginkgo.AfterEach(func() { - err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) + err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) if !errors.IsNotFound(err) { gomega.Expect(err).ToNot(gomega.HaveOccurred()) } gomega.Eventually(func() error { - _, err := hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + _, err := hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) if errors.IsNotFound(err) { return nil } if err != nil { return err } - return fmt.Errorf("work %s in namespace %s still exists", work.Name, o.SpokeClusterName) + return fmt.Errorf("work %s in namespace %s still exists", work.Name, o.AgentOptions.SpokeClusterName) }, eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed()) - err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), o.SpokeClusterName, metav1.DeleteOptions{}) + err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), o.AgentOptions.SpokeClusterName, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) if cancel != nil { @@ -96,7 +98,7 @@ var _ = ginkgo.Describe("ManifestWork", func() { ginkgo.Context("With a single manifest", func() { ginkgo.BeforeEach(func() { manifests = []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, nil)), + util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, nil)), } }) @@ -116,13 +118,13 @@ var _ = ginkgo.Describe("ManifestWork", func() { []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) newManifests := []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm2", map[string]string{"x": "y"}, nil)), + util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm2", map[string]string{"x": "y"}, nil)), } - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) work.Spec.Workload.Manifests = newManifests - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) util.AssertExistenceOfConfigMaps(newManifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) @@ -144,14 +146,14 @@ var _ = ginkgo.Describe("ManifestWork", func() { return nil }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - _, err = spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + _, err = spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) }) ginkgo.It("should delete work successfully", func() { util.AssertFinalizerAdded(work.Namespace, work.Name, hubWorkClient, eventuallyTimeout, eventuallyInterval) - err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) + err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) util.AssertWorkDeleted(work.Namespace, work.Name, hubHash, manifests, hubWorkClient, spokeKubeClient, eventuallyTimeout, eventuallyInterval) @@ -162,8 +164,8 @@ var _ = ginkgo.Describe("ManifestWork", func() { ginkgo.BeforeEach(func() { manifests = []workapiv1.Manifest{ util.ToManifest(util.NewConfigmap("non-existent-namespace", "cm1", map[string]string{"a": "b"}, nil)), - util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm2", map[string]string{"c": "d"}, nil)), - util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm3", map[string]string{"e": "f"}, nil)), + util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm2", map[string]string{"c": "d"}, nil)), + util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm3", map[string]string{"e": "f"}, nil)), } }) @@ -185,15 +187,15 @@ var _ = ginkgo.Describe("ManifestWork", func() { []metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) newManifests := []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, nil)), - util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm2", map[string]string{"x": "y"}, nil)), - util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm4", map[string]string{"e": "f"}, nil)), + util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, nil)), + util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm2", map[string]string{"x": "y"}, nil)), + util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm4", map[string]string{"e": "f"}, nil)), } - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) work.Spec.Workload.Manifests = newManifests - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) util.AssertExistenceOfConfigMaps(newManifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) @@ -218,14 +220,14 @@ var _ = ginkgo.Describe("ManifestWork", func() { return nil }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - _, err = spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm3", metav1.GetOptions{}) + _, err = spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm3", metav1.GetOptions{}) gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) }) ginkgo.It("should delete work successfully", func() { util.AssertFinalizerAdded(work.Namespace, work.Name, hubWorkClient, eventuallyTimeout, eventuallyInterval) - err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) + err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) util.AssertWorkDeleted(work.Namespace, work.Name, hubHash, manifests, hubWorkClient, spokeKubeClient, eventuallyTimeout, eventuallyInterval) @@ -249,7 +251,7 @@ var _ = ginkgo.Describe("ManifestWork", func() { objects = append(objects, obj) // cr - obj, gvr, err = util.GuestbookCr(o.SpokeClusterName, "guestbook1") + obj, gvr, err = util.GuestbookCr(o.AgentOptions.SpokeClusterName, "guestbook1") gomega.Expect(err).ToNot(gomega.HaveOccurred()) gvrs = append(gvrs, gvr) objects = append(objects, obj) @@ -291,7 +293,7 @@ var _ = ginkgo.Describe("ManifestWork", func() { util.AssertAppliedResources(hubHash, work.Name, gvrs, namespaces, names, hubWorkClient, eventuallyTimeout, eventuallyInterval) // update object label - obj, gvr, err := util.GuestbookCr(o.SpokeClusterName, "guestbook1") + obj, gvr, err := util.GuestbookCr(o.AgentOptions.SpokeClusterName, "guestbook1") gomega.Expect(err).ToNot(gomega.HaveOccurred()) cr, err := util.GetResource(obj.GetNamespace(), obj.GetName(), gvr, spokeDynamicClient) @@ -341,7 +343,7 @@ var _ = ginkgo.Describe("ManifestWork", func() { util.AssertAppliedResources(hubHash, work.Name, gvrs, namespaces, names, hubWorkClient, eventuallyTimeout, eventuallyInterval) // update object finalizer - obj, gvr, err := util.GuestbookCr(o.SpokeClusterName, "guestbook1") + obj, gvr, err := util.GuestbookCr(o.AgentOptions.SpokeClusterName, "guestbook1") gomega.Expect(err).ToNot(gomega.HaveOccurred()) cr, err := util.GetResource(obj.GetNamespace(), obj.GetName(), gvr, spokeDynamicClient) @@ -405,12 +407,12 @@ var _ = ginkgo.Describe("ManifestWork", func() { util.AssertAppliedResources(hubHash, work.Name, gvrs, namespaces, names, hubWorkClient, eventuallyTimeout, eventuallyInterval) // delete manifest work - err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) + err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) // wait for deletion of manifest work gomega.Eventually(func() bool { - _, err := hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + _, err := hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) return errors.IsNotFound(err) }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) @@ -444,19 +446,19 @@ var _ = ginkgo.Describe("ManifestWork", func() { gvrs = nil objects = nil - u, gvr := util.NewServiceAccount(o.SpokeClusterName, "sa") + u, gvr := util.NewServiceAccount(o.AgentOptions.SpokeClusterName, "sa") gvrs = append(gvrs, gvr) objects = append(objects, u) - u, gvr = util.NewRole(o.SpokeClusterName, "role1") + u, gvr = util.NewRole(o.AgentOptions.SpokeClusterName, "role1") gvrs = append(gvrs, gvr) objects = append(objects, u) - u, gvr = util.NewRoleBinding(o.SpokeClusterName, "rolebinding1", "sa", "role1") + u, gvr = util.NewRoleBinding(o.AgentOptions.SpokeClusterName, "rolebinding1", "sa", "role1") gvrs = append(gvrs, gvr) objects = append(objects, u) - u, gvr, err = util.NewDeployment(o.SpokeClusterName, "deploy1", "sa") + u, gvr, err = util.NewDeployment(o.AgentOptions.SpokeClusterName, "deploy1", "sa") gomega.Expect(err).ToNot(gomega.HaveOccurred()) gvrs = append(gvrs, gvr) objects = append(objects, u) @@ -511,9 +513,9 @@ var _ = ginkgo.Describe("ManifestWork", func() { ginkgo.By("update manifests in work") oldServiceAccount := objects[0] gvrs[0], gvrs[3] = gvrs[3], gvrs[0] - u, _ := util.NewServiceAccount(o.SpokeClusterName, "admin") + u, _ := util.NewServiceAccount(o.AgentOptions.SpokeClusterName, "admin") objects[3] = u - u, _, err = util.NewDeployment(o.SpokeClusterName, "deploy1", "admin") + u, _, err = util.NewDeployment(o.AgentOptions.SpokeClusterName, "deploy1", "admin") gomega.Expect(err).ToNot(gomega.HaveOccurred()) objects[0] = u @@ -527,10 +529,10 @@ var _ = ginkgo.Describe("ManifestWork", func() { updateTime := metav1.Now() time.Sleep(1 * time.Second) - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) work.Spec.Workload.Manifests = newManifests - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) ginkgo.By("check existence of all maintained resources") @@ -544,7 +546,7 @@ var _ = ginkgo.Describe("ManifestWork", func() { ginkgo.By("check if deployment is updated") gomega.Eventually(func() error { - u, err := util.GetResource(o.SpokeClusterName, objects[0].GetName(), gvrs[0], spokeDynamicClient) + u, err := util.GetResource(o.AgentOptions.SpokeClusterName, objects[0].GetName(), gvrs[0], spokeDynamicClient) if err != nil { return err } @@ -558,7 +560,7 @@ var _ = ginkgo.Describe("ManifestWork", func() { ginkgo.By("check if LastTransitionTime is updated") gomega.Eventually(func() error { - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) if err != nil { return err } @@ -602,14 +604,14 @@ var _ = ginkgo.Describe("ManifestWork", func() { var finalizer = "cluster.open-cluster-management.io/testing" ginkgo.BeforeEach(func() { manifests = []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{finalizer})), - util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{finalizer})), - util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm3", map[string]string{"e": "f"}, []string{finalizer})), + util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{finalizer})), + util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{finalizer})), + util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm3", map[string]string{"e": "f"}, []string{finalizer})), } }) ginkgo.AfterEach(func() { - err = util.RemoveConfigmapFinalizers(spokeKubeClient, o.SpokeClusterName, "cm1", "cm2", "cm3") + err = util.RemoveConfigmapFinalizers(spokeKubeClient, o.AgentOptions.SpokeClusterName, "cm1", "cm2", "cm3") gomega.Expect(err).ToNot(gomega.HaveOccurred()) }) @@ -621,10 +623,10 @@ var _ = ginkgo.Describe("ManifestWork", func() { util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue, []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) work.Spec.Workload.Manifests = manifests[1:] - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) util.AssertExistenceOfConfigMaps(manifests[1:], spokeKubeClient, eventuallyTimeout, eventuallyInterval) @@ -636,7 +638,7 @@ var _ = ginkgo.Describe("ManifestWork", func() { go func() { time.Sleep(2 * time.Second) // remove finalizers of cm1 - _ = util.RemoveConfigmapFinalizers(spokeKubeClient, o.SpokeClusterName, "cm1") + _ = util.RemoveConfigmapFinalizers(spokeKubeClient, o.AgentOptions.SpokeClusterName, "cm1") }() // check if resource created by stale manifest is deleted once it is removed from applied resource list @@ -655,7 +657,7 @@ var _ = ginkgo.Describe("ManifestWork", func() { return nil }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - _, err = spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + _, err = spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) })