Skip to content

Commit

Permalink
util: add support to configure mirror daemon count
Browse files Browse the repository at this point in the history
Currently we are assuming that only one
rbd mirror daemon running on the ceph cluster
but that is not true for many cases and it
can be more that one, this PR make this as a
configurable parameter.

fixes: ceph#4312

Signed-off-by: Madhu Rajanna <[email protected]>
  • Loading branch information
Madhu-1 authored and mergify[bot] committed Apr 22, 2024
1 parent 51d1d46 commit 4c2d2ca
Show file tree
Hide file tree
Showing 6 changed files with 116 additions and 1 deletion.
1 change: 1 addition & 0 deletions charts/ceph-csi-rbd/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ serviceAccounts:
# - "<MONValue2>"
# rbd:
# netNamespaceFilePath: "{{ .kubeletDir }}/plugins/{{ .driverName }}/net"
# mirrorDaemonCount: 1
# readAffinity:
# enabled: true
# crushLocationLabels:
Expand Down
3 changes: 3 additions & 0 deletions deploy/csi-config-map-sample.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@ kind: ConfigMap
# NOTE: The given radosNamespace must already exists in the pool.
# NOTE: Make sure you don't add radosNamespace option to a currently in use
# configuration as it will cause issues.
# The "rbd.mirrorDaemonCount" is optional and represents the total number of
# RBD mirror daemons running on the ceph cluster.
# The field "cephFS.subvolumeGroup" is optional and defaults to "csi".
# NOTE: The given subvolumeGroup must already exist in the filesystem.
# The "cephFS.netNamespaceFilePath" fields are the various network namespace
Expand Down Expand Up @@ -64,6 +66,7 @@ data:
"rbd": {
"netNamespaceFilePath": "<kubeletRootPath>/plugins/rbd.csi.ceph.com/net",
"radosNamespace": "<rados-namespace>",
"mirrorDaemonCount": 1,
},
"monitors": [
"<MONValue1>",
Expand Down
6 changes: 5 additions & 1 deletion internal/rbd/rbd_util.go
Original file line number Diff line number Diff line change
Expand Up @@ -553,9 +553,13 @@ func (ri *rbdImage) isInUse() (bool, error) {
// because we opened the image, there is at least one watcher
defaultWatchers := 1
if mirrorInfo.Primary {
count, err := util.GetRBDMirrorDaemonCount(util.CsiConfigFile, ri.ClusterID)
if err != nil {
return false, err
}
// if rbd mirror daemon is running, a watcher will be added by the rbd
// mirror daemon for mirrored images.
defaultWatchers++
defaultWatchers += count
}

return len(watchers) > defaultWatchers, nil
Expand Down
17 changes: 17 additions & 0 deletions internal/util/csiconfig.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ const (
"clusterID": "<cluster-id>",
"rbd": {
"radosNamespace": "<rados-namespace>"
"mirrorDaemonCount": 1
},
"monitors": [
"<monitor-value>",
Expand Down Expand Up @@ -105,6 +106,22 @@ func GetRadosNamespace(pathToConfig, clusterID string) (string, error) {
return cluster.RBD.RadosNamespace, nil
}

// GetRBDMirrorDaemonCount returns the number of mirror daemon count for the
// given clusterID.
func GetRBDMirrorDaemonCount(pathToConfig, clusterID string) (int, error) {
cluster, err := readClusterInfo(pathToConfig, clusterID)
if err != nil {
return 0, err
}

// if it is empty, set the default to 1 which is most common in a cluster.
if cluster.RBD.MirrorDaemonCount == 0 {
return 1, nil
}

return cluster.RBD.MirrorDaemonCount, nil
}

// CephFSSubvolumeGroup returns the subvolumeGroup for CephFS volumes. If not set, it returns the default value "csi".
func CephFSSubvolumeGroup(pathToConfig, clusterID string) (string, error) {
cluster, err := readClusterInfo(pathToConfig, clusterID)
Expand Down
88 changes: 88 additions & 0 deletions internal/util/csiconfig_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,14 @@ limitations under the License.
package util

import (
"bytes"
"encoding/json"
"os"
"testing"

cephcsi "github.com/ceph/ceph-csi/api/deploy/kubernetes"

"github.com/stretchr/testify/require"
)

var (
Expand Down Expand Up @@ -530,3 +533,88 @@ func TestGetCephFSMountOptions(t *testing.T) {
})
}
}

func TestGetRBDMirrorDaemonCount(t *testing.T) {
t.Parallel()
tests := []struct {
name string
clusterID string
want int
}{
{
name: "get rbd mirror daemon count for cluster-1",
clusterID: "cluster-1",
want: 2,
},
{
name: "get rbd mirror daemon count for cluster-2",
clusterID: "cluster-2",
want: 4,
},
{
name: "when rbd mirror daemon count is empty",
clusterID: "cluster-3",
want: 1, // default mirror daemon count
},
}

csiConfig := []cephcsi.ClusterInfo{
{
ClusterID: "cluster-1",
Monitors: []string{"ip-1", "ip-2"},
RBD: cephcsi.RBD{
MirrorDaemonCount: 2,
},
},
{
ClusterID: "cluster-2",
Monitors: []string{"ip-3", "ip-4"},
RBD: cephcsi.RBD{
MirrorDaemonCount: 4,
},
},
{
ClusterID: "cluster-3",
Monitors: []string{"ip-5", "ip-6"},
},
}
csiConfigFileContent, err := json.Marshal(csiConfig)
if err != nil {
t.Errorf("failed to marshal csi config info %v", err)
}
tmpConfPath := t.TempDir() + "/ceph-csi.json"
err = os.WriteFile(tmpConfPath, csiConfigFileContent, 0o600)
if err != nil {
t.Errorf("failed to write %s file content: %v", CsiConfigFile, err)
}
for _, tt := range tests {
ts := tt
t.Run(ts.name, func(t *testing.T) {
t.Parallel()
var got int
got, err = GetRBDMirrorDaemonCount(tmpConfPath, ts.clusterID)
if err != nil {
t.Errorf("GetRBDMirrorDaemonCount() error = %v", err)

return
}
if got != ts.want {
t.Errorf("GetRBDMirrorDaemonCount() = %v, want %v", got, ts.want)
}
})
}

// when mirrorDaemonCount is set as string
csiConfigFileContent = bytes.Replace(
csiConfigFileContent,
[]byte(`"mirrorDaemonCount":2`),
[]byte(`"mirrorDaemonCount":"2"`),
1)
tmpCSIConfPath := t.TempDir() + "/ceph-csi.json"
err = os.WriteFile(tmpCSIConfPath, csiConfigFileContent, 0o600)
if err != nil {
t.Errorf("failed to write %s file content: %v", CsiConfigFile, err)
}
_, err = GetRBDMirrorDaemonCount(tmpCSIConfPath, "test")
require.Error(t, err)
}

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

0 comments on commit 4c2d2ca

Please sign in to comment.