Skip to content

Commit

Permalink
Update
Browse files Browse the repository at this point in the history
  • Loading branch information
ukff committed Dec 13, 2024
1 parent bf89e52 commit e19e4db
Show file tree
Hide file tree
Showing 5 changed files with 59 additions and 52 deletions.
15 changes: 8 additions & 7 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,9 @@ MANIFEST_PATH=./manifests/btp-operator
MANIFEST_FILE=btp-manager.yaml

# Image URL to use all building/pushing image targets
IMG_REGISTRY_PORT ?= 5001
IMG_REGISTRY ?= k3d-kyma-registry:$(IMG_REGISTRY_PORT)
IMG ?= $(IMG_REGISTRY)/btp-manager:$(MODULE_VERSION)
IMG_REGISTRY_PORT ?= 5000
IMG_REGISTRY ?= ukff/btpmgr
IMG ?= $(IMG_REGISTRY):latest

COMPONENT_CLI_VERSION ?= latest

Expand Down Expand Up @@ -89,14 +89,11 @@ run: manifests generate fmt vet ## Run a controller from your host.
go run ./main.go

.PHONY: docker-build
docker-build: test ## Build docker image with the manager.
docker-build: ## Build docker image with the manager.
IMG=$(IMG) docker build -t ${IMG} .

.PHONY: docker-push
docker-push: ## Push docker image with the manager.
ifneq (,$(GCR_DOCKER_PASSWORD))
docker login $(IMG_REGISTRY) -u oauth2accesstoken --password $(GCR_DOCKER_PASSWORD)
endif
docker push ${IMG}

##@ Deployment
Expand Down Expand Up @@ -126,6 +123,10 @@ deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
$(KUSTOMIZE) build config/default | kubectl apply -f -

.PHONY:
debug: module-image
echo "debugging"

.PHONY: undeploy
undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
$(KUSTOMIZE) build config/default | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
Expand Down
4 changes: 2 additions & 2 deletions config/manager/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,5 +4,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
images:
- name: controller
newName: europe-docker.pkg.dev/kyma-project/prod/btp-manager
newTag: 0.0.23-test
newName: ukff/btpmgr
newTag: latest
83 changes: 42 additions & 41 deletions controllers/btpoperator_controller_secret_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,70 +2,66 @@ package controllers

import (
"fmt"
appsv1 "k8s.io/api/apps/v1"

Check failure on line 5 in controllers/btpoperator_controller_secret_test.go

View workflow job for this annotation

GitHub Actions / run-go-linter

File is not `goimports`-ed (goimports)
"strings"

"github.com/kyma-project/btp-manager/api/v1alpha1"
"github.com/kyma-project/btp-manager/internal/conditions"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)

var _ = Describe("BTP Operator controller - sap btp manager secret changes", Label("secret"), func() {
When("sap btp secret is updated with new clust id", func() {

When("change cluster id in sap btp secret", func() {
It("should restart and update secret", func() {
var match bool
cr := &v1alpha1.BtpOperator{}
sapBtpSecret := &corev1.Secret{}
clusterSecret := &corev1.Secret{}
configMap := &corev1.ConfigMap{}
var cr *v1alpha1.BtpOperator

sapBtpSecret, err := createCorrectSecretFromYaml()

Expect(err).To(BeNil())
Eventually(func() error {
return k8sClient.Patch(ctx, sapBtpSecret, client.Apply, client.ForceOwnership, client.FieldOwner(operatorName))
return k8sClient.Create(ctx, sapBtpSecret)
}).WithTimeout(k8sOpsTimeout).WithPolling(k8sOpsPollingInterval).Should(Succeed())

cr = createDefaultBtpOperator()
Expect(k8sClient.Create(ctx, cr)).To(Succeed())
Eventually(updateCh).WithTimeout(k8sOpsTimeout).WithPolling(k8sOpsPollingInterval).Should(Receive(matchReadyCondition(v1alpha1.StateReady, metav1.ConditionTrue, conditions.ReconcileSucceeded)))
Eventually(updateCh).WithTimeout(k8sOpsTimeout).WithPolling(k8sOpsPollingInterval).Should(Receive(matchReadyCondition(v1alpha1.StateProcessing, metav1.ConditionFalse, conditions.Initialized)))

btpServiceOperatorDeployment := &appsv1.Deployment{}
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: DeploymentName, Namespace: kymaNamespace}, btpServiceOperatorDeployment)
}).WithTimeout(k8sOpsTimeout).WithPolling(k8sOpsPollingInterval).Should(Succeed())

// check before integrity
clusterSecret = generateClusterIDSecret("test_cluster_id")
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: SecretName, Namespace: kymaNamespace}, sapBtpSecret)
}).WithTimeout(k8sOpsTimeout).WithPolling(k8sOpsPollingInterval).Should(Succeed())

Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: clusterIdSecretName, Namespace: kymaNamespace}, clusterSecret)
return k8sClient.Create(ctx, clusterSecret)
}).WithTimeout(k8sOpsTimeout).WithPolling(k8sOpsPollingInterval).Should(Succeed())

Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Namespace: kymaNamespace, Name: btpServiceOperatorConfigMap}, configMap)
}).WithTimeout(k8sOpsTimeout).WithPolling(k8sOpsPollingInterval).Should(Succeed())

match = (string(sapBtpSecret.Data[sapBtpManagerSecretClusterIdKey]) == string(clusterSecret.Data[clusterIdSecretKey])) && (string(sapBtpSecret.Data[sapBtpManagerSecretClusterIdKey]) == configMap.Data[clusterIdKeyConfigMap])
fmt.Println("sapBtpSecret.Data[sapBtpManagerClusterIdKey]: ", string(sapBtpSecret.Data[sapBtpManagerSecretClusterIdKey]))
fmt.Println("clusterSecret.Data[initialClusterIdKey]: ", string(clusterSecret.Data[clusterIdSecretKey]))
fmt.Println("configMap.Data[clusterIdKey]: ", configMap.Data[clusterIdKeyConfigMap])

Expect(match).To(BeTrue())

// simulate update
if sapBtpSecret.Data == nil {
sapBtpSecret.Data = make(map[string][]byte)
}
sapBtpSecret.Data[sapBtpManagerSecretClusterIdKey] = []byte("new-cluster-id")
Eventually(func() error { return k8sClient.Update(ctx, sapBtpSecret) }).WithTimeout(k8sOpsTimeout).WithPolling(k8sOpsPollingInterval).Should(Succeed())
_, err = reconciler.Reconcile(ctx, reconcile.Request{NamespacedName: client.ObjectKey{Namespace: defaultNamespace, Name: btpOperatorName}})
if err != nil {
return
}

// simulate creation of secret created by SAP BTP Operator
clusterSecret = generateClusterIDSecret("new-cluster-id")
Eventually(func() error {
return k8sClient.Update(ctx, clusterSecret)
}).WithTimeout(k8sOpsTimeout).WithPolling(k8sOpsPollingInterval).Should(Succeed())

Eventually(updateCh).Should(Receive(matchReadyCondition(v1alpha1.StateReady, metav1.ConditionTrue, conditions.ReconcileSucceeded)))

// check integrity after update
Expand All @@ -81,33 +77,38 @@ var _ = Describe("BTP Operator controller - sap btp manager secret changes", Lab
return k8sClient.Get(ctx, client.ObjectKey{Namespace: kymaNamespace, Name: btpServiceOperatorConfigMap}, configMap)
}).WithTimeout(k8sOpsTimeout).WithPolling(k8sOpsPollingInterval).Should(Succeed())

match = (sapBtpSecret.StringData[clusterIdKeyConfigMap] == clusterSecret.StringData[clusterIdSecretKey]) && (sapBtpSecret.StringData[clusterIdKeyConfigMap] == configMap.Data[clusterIdKeyConfigMap])
match = match && sapBtpSecret.StringData[clusterIdKeyConfigMap] == "new-cluster-id"
fmt.Println("sapBtpSecret.Data[sapBtpManagerClusterIdKey]: ", string(sapBtpSecret.Data[sapBtpManagerSecretClusterIdKey]))
fmt.Println("clusterSecret.Data[initialClusterIdKey]: ", string(clusterSecret.Data[clusterIdSecretKey]))
fmt.Println("configMap.Data[clusterIdKey]: ", configMap.Data[clusterIdKeyConfigMap])
Expect(match).To(BeTrue())
Expect(isMatch(clusterSecret, sapBtpSecret, configMap)).To(BeTrue())

GinkgoWriter.Println("start AfterEach")

cr = &v1alpha1.BtpOperator{}
Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: defaultNamespace, Name: btpOperatorName}, cr)).Should(Succeed())
Expect(k8sClient.Delete(ctx, cr)).Should(Succeed())
Eventually(updateCh).Should(Receive(matchDeleted()))
Expect(isCrNotFound()).To(BeTrue())
deleteSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: SecretName,
Namespace: kymaNamespace,
},
}
Eventually(func() error { return k8sClient.Delete(ctx, deleteSecret) }).WithTimeout(k8sOpsTimeout).WithPolling(k8sOpsPollingInterval).Should(Succeed())
GinkgoWriter.Println("end AfterEach")
Eventually(func() error { return k8sClient.Delete(ctx, sapBtpSecret) }).WithTimeout(k8sOpsTimeout).WithPolling(k8sOpsPollingInterval).Should(Succeed())
})
})
})
})

func validateDataIntegrity() {
func isMatch(clusterId, secretName *corev1.Secret, configMap *corev1.ConfigMap) bool {
match1 := clusterId.StringData[sapBtpManagerSecretClusterIdKey] == secretName.StringData[clusterIdSecretKey]

match2 := strings.EqualFold(string(clusterId.Data[sapBtpManagerSecretClusterIdKey]), configMap.Data[clusterIdKeyConfigMap])
fmt.Printf("string(clusterId.Data[sapBtpManagerSecretClusterIdKey] %s \n", string(clusterId.Data[sapBtpManagerSecretClusterIdKey]))
fmt.Printf("configMap.Data[clusterIdKeyConfigMap] %s \n", configMap.Data[clusterIdKeyConfigMap])

match := match1 && match2 && strings.EqualFold(string(clusterId.Data[sapBtpManagerSecretClusterIdKey]), "new-cluster-id")
return match
}

func generateClusterIDSecret(key string) *corev1.Secret {
clusterSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: clusterIdSecretName,
Namespace: kymaNamespace,
},
Data: map[string][]byte{
clusterIdSecretKey: []byte(key),
},
}
return clusterSecret
}
4 changes: 2 additions & 2 deletions scripts/testing/set-env-vars.sh
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ export KUBEBUILDER_ASSETS="$(${ENVTEST} use ${ENVTEST_K8S_VERSION} --bin-dir ${L
export USE_EXISTING_CLUSTER=${USE_EXISTING_CLUSTER:="false"}

# if you plan to debug or run on existing cluster increase the timeout (30 seconds should be ok)
export SINGLE_TEST_TIMEOUT=${SINGLE_TEST_TIMEOUT:="5s"}
export SINGLE_TEST_TIMEOUT=${SINGLE_TEST_TIMEOUT:="15s"}

# if you plan to debug or run on existing cluster increase the timeout (180 seconds should be ok)
export SUITE_TIMEOUT=${SUITE_TIMEOUT:=30s}
Expand All @@ -21,7 +21,7 @@ export SUITE_TIMEOUT=${SUITE_TIMEOUT:=30s}
export GINKGO_VERBOSE_FLAG=${GINKGO_VERBOSE_FLAG:="ginkgo.succinct"}

# GINKGO_LABEL_FILTER="provisioning,test-update"
export GINKGO_LABEL_FILTER=${GINKGO_LABEL_FILTER:=""}
export GINKGO_LABEL_FILTER=${GINKGO_LABEL_FILTER:="secret"}

# should be false for env-test cluster, may be true for existing cluster
export DISABLE_WEBHOOK_FILTER_FOR_TESTS=${DISABLE_WEBHOOK_FILTER_FOR_TESTS:="false"}
Expand Down
5 changes: 5 additions & 0 deletions test.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
#!/usr/bin/env bash
$(make debug)
$(kubectl rollout restart deployment btp-manager-controller-manager -n kyma-system)
$(clear)
$(kubectl logs -f deployment/btp-manager-controller-manager -n kyma-system)

0 comments on commit e19e4db

Please sign in to comment.