Skip to content

Commit

Permalink
Update csiaddons to v0.5.0 & point volrep to csiaddons
Browse files Browse the repository at this point in the history
This commit updates csiaddons to v0.5.0 and other deps.
This also points volrep to kubernetes-csi-addons repo now.

Signed-off-by: Rakshith R <[email protected]>
  • Loading branch information
Rakshith-R committed Sep 7, 2022
1 parent 14d43fb commit e00aef2
Show file tree
Hide file tree
Showing 11 changed files with 235 additions and 299 deletions.
57 changes: 32 additions & 25 deletions controllers/drcluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"

csiaddonsv1alpha1 "github.com/csi-addons/kubernetes-csi-addons/api/v1alpha1"
csiaddonsv1alpha1 "github.com/csi-addons/kubernetes-csi-addons/apis/csiaddons/v1alpha1"
"github.com/go-logr/logr"
ramen "github.com/ramendr/ramen/api/v1alpha1"
"github.com/ramendr/ramen/controllers/util"
Expand Down Expand Up @@ -313,17 +313,17 @@ func (u *drclusterInstance) finalizerRemove() error {
}

// TODO:
// 1) For now by default fenceStatus is ClusterFenceStateUnfenced.
// However, we need to handle explicit unfencing operation to unfence
// a fenced cluster below, by deleting the fencing CR created by
// ramen.
//
// 2) How to differentiate between ClusterFenceStateUnfenced being
// set because a manually fenced cluster is manually unfenced against the
// requirement to unfence a cluster that has been fenced by ramen.
// 1. For now by default fenceStatus is ClusterFenceStateUnfenced.
// However, we need to handle explicit unfencing operation to unfence
// a fenced cluster below, by deleting the fencing CR created by
// ramen.
//
// 3) Handle Ramen driven fencing here
// 2. How to differentiate between ClusterFenceStateUnfenced being
// set because a manually fenced cluster is manually unfenced against the
// requirement to unfence a cluster that has been fenced by ramen.
//
// 3) Handle Ramen driven fencing here
func (u *drclusterInstance) clusterFenceHandle() (bool, error) {
switch u.object.Spec.ClusterFence {
case ramen.ClusterFenceStateUnfenced:
Expand Down Expand Up @@ -434,16 +434,19 @@ func (u *drclusterInstance) clusterUnfence() (bool, error) {
return u.cleanClusters([]ramen.DRCluster{*u.object, peerCluster})
}

//
// if the fencing CR (via MCV) exists; then
// if the status of fencing CR shows fenced
// return dontRequeue, nil
// else
// return requeue, error
// endif
//
// if the status of fencing CR shows fenced
// return dontRequeue, nil
// else
// return requeue, error
// endif
//
// else
// Create the fencing CR MW with Fenced state
// return requeue, nil
//
// Create the fencing CR MW with Fenced state
// return requeue, nil
//
// endif
func (u *drclusterInstance) fenceClusterOnCluster(peerCluster *ramen.DRCluster) (bool, error) {
if !u.isFencingOrFenced() {
Expand Down Expand Up @@ -497,16 +500,19 @@ func (u *drclusterInstance) fenceClusterOnCluster(peerCluster *ramen.DRCluster)
return false, nil
}

//
// if the fencing CR (via MCV) exist; then
// if the status of fencing CR shows unfenced
// return dontRequeue, nil
// else
// return requeue, error
// endif
//
// if the status of fencing CR shows unfenced
// return dontRequeue, nil
// else
// return requeue, error
// endif
//
// else
// Create the fencing CR MW with Unfenced state
// return requeue, nil
//
// Create the fencing CR MW with Unfenced state
// return requeue, nil
//
// endif
func (u *drclusterInstance) unfenceClusterOnCluster(peerCluster *ramen.DRCluster) (bool, error) {
if !u.isUnfencingOrUnfenced() {
Expand Down Expand Up @@ -1005,6 +1011,7 @@ func setDRClusterUnfencingFailedCondition(conditions *[]metav1.Condition, observ
// fails. Since, cleaning is always called after a successful
// Unfence operation, unfence = true, fence = false, clean = false
// TODO: Remove the linter skip when this function is used
//
//nolint:deadcode,unused
func setDRClusterCleaningFailedCondition(conditions *[]metav1.Condition, observedGeneration int64, message string) {
setStatusCondition(conditions, metav1.Condition{
Expand Down
2 changes: 1 addition & 1 deletion controllers/drcluster_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ package controllers_test
import (
"context"

csiaddonsv1alpha1 "github.com/csi-addons/kubernetes-csi-addons/api/v1alpha1"
csiaddonsv1alpha1 "github.com/csi-addons/kubernetes-csi-addons/apis/csiaddons/v1alpha1"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gstruct"
Expand Down
2 changes: 1 addition & 1 deletion controllers/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/manager"

volsyncv1alpha1 "github.com/backube/volsync/api/v1alpha1"
volrep "github.com/csi-addons/volume-replication-operator/api/v1alpha1"
volrep "github.com/csi-addons/kubernetes-csi-addons/apis/replication.storage/v1alpha1"
snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
ocmclv1 "github.com/open-cluster-management/api/cluster/v1"
ocmworkv1 "github.com/open-cluster-management/api/work/v1"
Expand Down
16 changes: 9 additions & 7 deletions controllers/util/mcv_util.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ import (
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"

csiaddonsv1alpha1 "github.com/csi-addons/kubernetes-csi-addons/api/v1alpha1"
csiaddonsv1alpha1 "github.com/csi-addons/kubernetes-csi-addons/apis/csiaddons/v1alpha1"
rmn "github.com/ramendr/ramen/api/v1alpha1"
viewv1beta1 "github.com/stolostron/multicloud-operators-foundation/pkg/apis/view/v1beta1"
ctrl "sigs.k8s.io/controller-runtime"
Expand Down Expand Up @@ -150,9 +150,10 @@ func (m ManagedClusterViewGetterImpl) GetNamespaceFromManagedCluster(
/*
Description: queries a managed cluster for a resource type, and populates a variable with the results.
Requires:
1) meta: information of the new/existing resource; defines which cluster(s) to search
2) viewscope: query information for managed cluster resource. Example: resource, name.
3) interface: empty variable to populate results into
1. meta: information of the new/existing resource; defines which cluster(s) to search
2. viewscope: query information for managed cluster resource. Example: resource, name.
3. interface: empty variable to populate results into
Returns: error if encountered (nil if no error occurred). See results on interface object.
*/
func (m ManagedClusterViewGetterImpl) getManagedClusterResource(
Expand Down Expand Up @@ -198,9 +199,10 @@ func (m ManagedClusterViewGetterImpl) getManagedClusterResource(
/*
Description: create a new ManagedClusterView object, or update the existing one with the same name.
Requires:
1) meta: specifies MangedClusterView name and managed cluster search information
2) viewscope: once the managed cluster is found, use this information to find the resource.
Optional params: Namespace, Resource, Group, Version, Kind. Resource can be used by itself, Kind requires Version
1. meta: specifies MangedClusterView name and managed cluster search information
2. viewscope: once the managed cluster is found, use this information to find the resource.
Optional params: Namespace, Resource, Group, Version, Kind. Resource can be used by itself, Kind requires Version
Returns: ManagedClusterView, error
*/
func (m ManagedClusterViewGetterImpl) getOrCreateManagedClusterView(
Expand Down
2 changes: 1 addition & 1 deletion controllers/util/mw_util.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ import (
dto "github.com/prometheus/client_model/go"
"sigs.k8s.io/controller-runtime/pkg/metrics"

csiaddonsv1alpha1 "github.com/csi-addons/kubernetes-csi-addons/api/v1alpha1"
csiaddonsv1alpha1 "github.com/csi-addons/kubernetes-csi-addons/apis/csiaddons/v1alpha1"
rmn "github.com/ramendr/ramen/api/v1alpha1"
)

Expand Down
2 changes: 1 addition & 1 deletion controllers/volumereplicationgroup_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ import (
"github.com/go-logr/logr"
"golang.org/x/time/rate"

volrep "github.com/csi-addons/volume-replication-operator/api/v1alpha1"
volrep "github.com/csi-addons/kubernetes-csi-addons/apis/replication.storage/v1alpha1"
"github.com/google/uuid"
corev1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
Expand Down
102 changes: 56 additions & 46 deletions controllers/vrg_volrep.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@ import (
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/go-logr/logr"

volrep "github.com/csi-addons/volume-replication-operator/api/v1alpha1"
volrepController "github.com/csi-addons/volume-replication-operator/controllers"
volrep "github.com/csi-addons/kubernetes-csi-addons/apis/replication.storage/v1alpha1"
volrepController "github.com/csi-addons/kubernetes-csi-addons/controllers/replication.storage"
corev1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
Expand Down Expand Up @@ -271,13 +271,13 @@ func (v *VRGInstance) protectPVC(pvc *corev1.PersistentVolumeClaim,

// This function indicates whether to proceed with the pvc processing
// or not. It mainly checks the following things.
// - Whether pvc is bound or not. If not bound, then no need to
// process the pvc any further. It can be skipped until it is ready.
// - Whether the pvc is being deleted and VR protection finalizer is
// not there. If the finalizer is there, then VolumeReplicationGroup
// need to remove the finalizer for the pvc being deleted. However,
// if the finalizer is not there, then no need to process the pvc
// any further and it can be skipped. The pvc will go away eventually.
// - Whether pvc is bound or not. If not bound, then no need to
// process the pvc any further. It can be skipped until it is ready.
// - Whether the pvc is being deleted and VR protection finalizer is
// not there. If the finalizer is there, then VolumeReplicationGroup
// need to remove the finalizer for the pvc being deleted. However,
// if the finalizer is not there, then no need to process the pvc
// any further and it can be skipped. The pvc will go away eventually.
func skipPVC(pvc *corev1.PersistentVolumeClaim, log logr.Logger) (bool, string) {
if pvc.Status.Phase != corev1.ClaimBound {
log.Info("Skipping handling of VR as PersistentVolumeClaim is not bound", "pvcPhase", pvc.Status.Phase)
Expand Down Expand Up @@ -782,9 +782,9 @@ func (v *VRGInstance) DeletePVs(s3ProfileName string) (err error) {
// processVRAsPrimary processes VR to change its state to primary, with the assumption that the
// related PVC is prepared for VR protection
// Return values are:
// - a boolean indicating if a reconcile requeue is required
// - a boolean indicating if VR is already at the desired state
// - any errors during processing
// - a boolean indicating if a reconcile requeue is required
// - a boolean indicating if VR is already at the desired state
// - any errors during processing
func (v *VRGInstance) processVRAsPrimary(vrNamespacedName types.NamespacedName, log logr.Logger) (bool, bool, error) {
if v.instance.Spec.Async != nil {
return v.createOrUpdateVR(vrNamespacedName, volrep.Primary, log)
Expand All @@ -811,9 +811,9 @@ func (v *VRGInstance) processVRAsPrimary(vrNamespacedName types.NamespacedName,
// processVRAsSecondary processes VR to change its state to secondary, with the assumption that the
// related PVC is prepared for VR as secondary
// Return values are:
// - a boolean indicating if a reconcile requeue is required
// - a boolean indicating if VR is already at the desired state
// - any errors during processing
// - a boolean indicating if a reconcile requeue is required
// - a boolean indicating if VR is already at the desired state
// - any errors during processing
func (v *VRGInstance) processVRAsSecondary(vrNamespacedName types.NamespacedName, log logr.Logger) (bool, bool, error) {
if v.instance.Spec.Async != nil {
return v.createOrUpdateVR(vrNamespacedName, volrep.Secondary, log)
Expand Down Expand Up @@ -845,9 +845,9 @@ func (v *VRGInstance) processVRAsSecondary(vrNamespacedName types.NamespacedName
// would get a reconcile. And then the conditions for the appropriate Protected PVC can
// be set as either Replicating or Error.
// Return values are:
// - a boolean indicating if a reconcile requeue is required
// - a boolean indicating if VR is already at the desired state
// - any errors during processing
// - a boolean indicating if a reconcile requeue is required
// - a boolean indicating if VR is already at the desired state
// - any errors during processing
func (v *VRGInstance) createOrUpdateVR(vrNamespacedName types.NamespacedName,
state volrep.ReplicationState, log logr.Logger) (bool, bool, error) {
const requeue = true
Expand Down Expand Up @@ -908,9 +908,9 @@ func (v *VRGInstance) autoResync(state volrep.ReplicationState) bool {
}

// updateVR updates the VR to the desired state and returns,
// - a boolean indicating if a reconcile requeue is required
// - a boolean indicating if VR is already at the desired state
// - any errors during the process of updating the resource
// - a boolean indicating if a reconcile requeue is required
// - a boolean indicating if VR is already at the desired state
// - any errors during the process of updating the resource
func (v *VRGInstance) updateVR(volRep *volrep.VolumeReplication,
state volrep.ReplicationState, log logr.Logger) (bool, bool, error) {
const requeue = true
Expand Down Expand Up @@ -1126,9 +1126,9 @@ func (v *VRGInstance) checkVRStatus(volRep *volrep.VolumeReplication) bool {

// validateVRStatus validates if the VolumeReplication resource has the desired status for the
// current generation and returns true if so, false otherwise
// - When replication state is Primary, only Completed condition is checked.
// - When replication state is Secondary, all 3 conditions for Completed/Degraded/Resyncing is
// checked and ensured healthy.
// - When replication state is Primary, only Completed condition is checked.
// - When replication state is Secondary, all 3 conditions for Completed/Degraded/Resyncing is
// checked and ensured healthy.
func (v *VRGInstance) validateVRStatus(volRep *volrep.VolumeReplication, state ramendrv1alpha1.ReplicationState) bool {
var (
stateString string
Expand Down Expand Up @@ -1178,12 +1178,12 @@ func (v *VRGInstance) validateVRStatus(volRep *volrep.VolumeReplication, state r
// validateAdditionalVRStatusForSecondary returns true if resync status is complete as secondary, false otherwise
// Return available if resync is happening as secondary or resync is complete as secondary.
// i.e. For VolRep the following conditions should be met
// 1) Data Sync is happening
// VolRep.Status.Conditions[Degraded].Status = True &&
// VolRep.Status.Conditions[Resyncing].Status = True
// 2) Data Sync is complete.
// VolRep.Status.Conditions[Degraded].Status = False &&
// VolRep.Status.Conditions[Resyncing].Status = False
// 1. Data Sync is happening
// VolRep.Status.Conditions[Degraded].Status = True &&
// VolRep.Status.Conditions[Resyncing].Status = True
// 2. Data Sync is complete.
// VolRep.Status.Conditions[Degraded].Status = False &&
// VolRep.Status.Conditions[Resyncing].Status = False
//
// With 1st condition being met,
// ProtectedPVC.Conditions[DataReady] = True
Expand Down Expand Up @@ -1772,37 +1772,47 @@ func (v *VRGInstance) addPVRestoreAnnotation(pv *corev1.PersistentVolume) {
pv.ObjectMeta.Annotations[PVRestoreAnnotation] = "True"
}

//
// Follow this logic to update VRG (and also ProtectedPVC) conditions for VolRep
// while reconciling VolumeReplicationGroup resource.
//
// For both Primary and Secondary:
// if getting VolRep fails and volrep does not exist:
// ProtectedPVC.conditions.Available.Status = False
// ProtectedPVC.conditions.Available.Reason = Progressing
// return
//
// ProtectedPVC.conditions.Available.Status = False
// ProtectedPVC.conditions.Available.Reason = Progressing
// return
//
// if getting VolRep fails and some other error:
// ProtectedPVC.conditions.Available.Status = Unknown
// ProtectedPVC.conditions.Available.Reason = Error
//
// ProtectedPVC.conditions.Available.Status = Unknown
// ProtectedPVC.conditions.Available.Reason = Error
//
// This below if condition check helps in undersanding whether
// promotion/demotion has been successfully completed or not.
// if VolRep.Status.Conditions[Completed].Status == True
// ProtectedPVC.conditions.Available.Status = True
// ProtectedPVC.conditions.Available.Reason = Replicating
//
// ProtectedPVC.conditions.Available.Status = True
// ProtectedPVC.conditions.Available.Reason = Replicating
//
// else
// ProtectedPVC.conditions.Available.Status = False
// ProtectedPVC.conditions.Available.Reason = Error
//
// ProtectedPVC.conditions.Available.Status = False
// ProtectedPVC.conditions.Available.Reason = Error
//
// if all ProtectedPVCs are Replicating, then
// VRG.conditions.Available.Status = true
// VRG.conditions.Available.Reason = Replicating
//
// VRG.conditions.Available.Status = true
// VRG.conditions.Available.Reason = Replicating
//
// if atleast one ProtectedPVC.conditions[Available].Reason == Error
// VRG.conditions.Available.Status = false
// VRG.conditions.Available.Reason = Error
//
// VRG.conditions.Available.Status = false
// VRG.conditions.Available.Reason = Error
//
// if no ProtectedPVCs is in error and atleast one is progressing, then
// VRG.conditions.Available.Status = false
// VRG.conditions.Available.Reason = Progressing
//
// VRG.conditions.Available.Status = false
// VRG.conditions.Available.Reason = Progressing
//
//nolint:funlen
func (v *VRGInstance) aggregateVolRepDataReadyCondition() {
Expand Down
7 changes: 4 additions & 3 deletions controllers/vrg_volrep_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@ import (
"strings"
"time"

volrep "github.com/csi-addons/volume-replication-operator/api/v1alpha1"
volrepController "github.com/csi-addons/volume-replication-operator/controllers"
volrep "github.com/csi-addons/kubernetes-csi-addons/apis/replication.storage/v1alpha1"
volrepController "github.com/csi-addons/kubernetes-csi-addons/controllers/replication.storage"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
ramendrv1alpha1 "github.com/ramendr/ramen/api/v1alpha1"
Expand Down Expand Up @@ -573,8 +573,9 @@ type template struct {
s3Profiles []string
}

//nolint:gosec
// we want the math rand version here and not the crypto rand. This way we can debug the tests by repeating the seed.
//
//nolint:gosec
func newRandomNamespaceSuffix() string {
randomSuffix := make([]byte, namespaceLen)

Expand Down
Loading

0 comments on commit e00aef2

Please sign in to comment.