diff --git a/Dockerfile b/Dockerfile index de14b4db..86936536 100644 --- a/Dockerfile +++ b/Dockerfile @@ -24,7 +24,7 @@ RUN make go-build # Use distroless as minimal base image to package the manager binary # Refer to https://github.com/GoogleContainerTools/distroless for more details -FROM gcr.io/distroless/static:nonroot +FROM registry.access.redhat.com/ubi9/ubi-minimal WORKDIR / COPY --from=builder /workspace/bin/manager . COPY --from=builder /workspace/bin/status-reporter . diff --git a/bundle/manifests/ocs-client-operator.clusterserviceversion.yaml b/bundle/manifests/ocs-client-operator.clusterserviceversion.yaml index 39deab88..77cf24ad 100644 --- a/bundle/manifests/ocs-client-operator.clusterserviceversion.yaml +++ b/bundle/manifests/ocs-client-operator.clusterserviceversion.yaml @@ -7,7 +7,7 @@ metadata: categories: Storage console.openshift.io/plugins: '["odf-client-console"]' containerImage: quay.io/ocs-dev/ocs-client-operator:latest - createdAt: "2024-10-07T10:43:06Z" + createdAt: "2024-10-14T12:21:27Z" description: OpenShift Data Foundation client operator enables consumption of storage services from a remote centralized OpenShift Data Foundation provider cluster. @@ -326,6 +326,17 @@ spec: - list - update - watch + - apiGroups: + - ramendr.openshift.io + resources: + - drclusterconfigs + verbs: + - create + - delete + - get + - list + - update + - watch - apiGroups: - security.openshift.io resources: diff --git a/cmd/main.go b/cmd/main.go index 1df18300..f9f2d91d 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -104,7 +104,6 @@ func main() { ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) - storageclustersSelector := fields.SelectorFromSet(fields.Set{"metadata.name": "storageclusters.ocs.openshift.io"}) subscriptionwebhookSelector := fields.SelectorFromSet(fields.Set{"metadata.name": templates.SubscriptionWebhookName}) mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ Scheme: scheme, @@ -114,10 +113,6 @@ func main() { LeaderElectionID: "7cb6f2e5.ocs.openshift.io", Cache: cache.Options{ ByObject: map[client.Object]cache.ByObject{ - &extv1.CustomResourceDefinition{}: { - // only cache storagecluster crd - Field: storageclustersSelector, - }, &admrv1.ValidatingWebhookConfiguration{}: { // only cache our validation webhook Field: subscriptionwebhookSelector, diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 0075aa3e..8c2e7ee9 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -283,6 +283,17 @@ rules: - list - update - watch +- apiGroups: + - ramendr.openshift.io + resources: + - drclusterconfigs + verbs: + - create + - delete + - get + - list + - update + - watch - apiGroups: - security.openshift.io resources: diff --git a/hack/entrypoint.sh b/hack/entrypoint.sh index bc58ad14..0085cc91 100755 --- a/hack/entrypoint.sh +++ b/hack/entrypoint.sh @@ -3,7 +3,7 @@ RESTART_EXIT_CODE=42 while true; do - ./usr/local/bin/ocs-operator $@ + ./manager $@ EXIT_CODE=$? if [ $EXIT_CODE -ne $RESTART_EXIT_CODE ]; then exit $EXIT_CODE diff --git a/internal/controller/storageclaim_controller.go b/internal/controller/storageclaim_controller.go index 1f323ad7..33d0bdda 100644 --- a/internal/controller/storageclaim_controller.go +++ b/internal/controller/storageclaim_controller.go @@ -147,6 +147,7 @@ func (r *StorageClaimReconciler) SetupWithManager(mgr ctrl.Manager) error { //+kubebuilder:rbac:groups=core,resources=persistentvolumes,verbs=get;list;watch //+kubebuilder:rbac:groups=snapshot.storage.k8s.io,resources=volumesnapshotcontents,verbs=get;list;watch //+kubebuilder:rbac:groups=csi.ceph.io,resources=clientprofiles,verbs=get;list;update;create;watch;delete +//+kubebuilder:rbac:groups=ramendr.openshift.io,resources=drclusterconfigs,verbs=get;list;update;create;watch;delete // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. diff --git a/pkg/utils/predicates.go b/pkg/utils/predicates.go index 236410d6..02c10ca5 100644 --- a/pkg/utils/predicates.go +++ b/pkg/utils/predicates.go @@ -19,13 +19,13 @@ func CrdCreateAndDeletePredicate(log *logr.Logger, crdName string, crdExists boo return predicate.Funcs{ CreateFunc: func(_ event.CreateEvent) bool { if !crdExists { - log.Info("CustomResourceDefinition %s was Created.", crdName) + log.Info("CustomResourceDefinition was Created.", "CustomResourceDefinition", crdName) } return !crdExists }, DeleteFunc: func(_ event.DeleteEvent) bool { if crdExists { - log.Info("CustomResourceDefinition %s was Deleted.", crdName) + log.Info("CustomResourceDefinition was Deleted.", "CustomResourceDefinition", crdName) } return crdExists },