Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(lvm disk detection): add support to detect disks used by LVM localpv #619

Open
wants to merge 2 commits into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions blockdevice/blockdevice.go
Original file line number Diff line number Diff line change
Expand Up @@ -431,6 +431,9 @@ const (

// Jiva
Jiva StorageEngine = "jiva"

// LVMLocalPV
LVMLocalPV StorageEngine = "lvm-localpv"
)

// Status is used to represent the status of the blockdevice
Expand Down
43 changes: 43 additions & 0 deletions cmd/ndm_daemonset/probe/addhandler.go
Original file line number Diff line number Diff line change
Expand Up @@ -317,6 +317,13 @@ func (pe *ProbeEvent) handleUnmanagedDevices(bd blockdevice.BlockDevice, bdAPILi
} else if !ok {
return false, nil
}

// handle if the device is used by lvm localPV
if ok, err := pe.deviceInUseByLVMLocalPV(bd, bdAPIList); err != nil {
return ok, err
} else if !ok {
return false, nil
}
return true, nil
}

Expand Down Expand Up @@ -384,6 +391,42 @@ func (pe *ProbeEvent) deviceInUseByZFSLocalPV(bd blockdevice.BlockDevice, bdAPIL
return false, nil
}

// deviceInUseByLVMLocalPV check if the device is in use by lvm localPV and returns true if further processing of
// event is required. If the device has lvm pv on it, then a blockdevice resource will be created and lvm PV tag
// will be added on to the resource
func (pe *ProbeEvent) deviceInUseByLVMLocalPV(bd blockdevice.BlockDevice, bdAPIList *apis.BlockDeviceList) (bool, error) {
if !bd.DevUse.InUse {
return true, nil
}

// not in use by lvm localpv
if bd.DevUse.UsedBy != blockdevice.LVMLocalPV {
return true, nil
}

klog.Infof("device: %s in use by lvm-localPV", bd.DevPath)

uuid, ok := generateUUID(bd)
if !ok {
klog.Errorf("unable to generate uuid for lvm-localPV device: %s", bd.DevPath)
return false, fmt.Errorf("error generating uuid for lvm-localPV disk: %s", bd.DevPath)
}

bd.UUID = uuid

deviceInfo := pe.Controller.NewDeviceInfoFromBlockDevice(&bd)
bdAPI := deviceInfo.ToDevice()
bdAPI.Labels[kubernetes.BlockDeviceTagLabel] = string(blockdevice.LVMLocalPV)

err := pe.Controller.CreateBlockDevice(bdAPI)
if err != nil {
klog.Errorf("unable to push %s (%s) to etcd", bd.UUID, bd.DevPath)
return false, err
}
klog.Infof("Pushed lvm-localPV device: %s (%s) to etcd", bd.UUID, bd.DevPath)
return false, nil
}

// upgradeDeviceInUseByCStor handles the upgrade if the device is used by cstor. returns true if further processing
// is required
func (pe *ProbeEvent) upgradeDeviceInUseByCStor(bd blockdevice.BlockDevice, bdAPIList *apis.BlockDeviceList) (bool, error) {
Expand Down
137 changes: 137 additions & 0 deletions cmd/ndm_daemonset/probe/addhandler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -405,6 +405,143 @@ func TestDeviceInUseByZFSLocalPV(t *testing.T) {
}
}

func TestProbeEvent_deviceInUseByLVMLocalPV(t *testing.T) {
fakeFsID := "fake-fs-uuid"
fakeBD := blockdevice.BlockDevice{
FSInfo: blockdevice.FileSystemInformation{
FileSystemUUID: fakeFsID,
},
}
fakeUUID, _ := generateUUID(fakeBD)

tests := map[string]struct {
bd blockdevice.BlockDevice
bdAPIList *apis.BlockDeviceList
createdOrUpdatedBDName string
want bool
wantErr bool
}{
"device not in use": {
bd: blockdevice.BlockDevice{
DevUse: blockdevice.DeviceUsage{
InUse: false,
},
},
bdAPIList: &apis.BlockDeviceList{},
createdOrUpdatedBDName: "",
want: true,
wantErr: false,
},
"device in use, not by lvm localPV": {
bd: blockdevice.BlockDevice{
DevUse: blockdevice.DeviceUsage{
InUse: true,
UsedBy: blockdevice.CStor,
},

},
bdAPIList: &apis.BlockDeviceList{},
createdOrUpdatedBDName: "",
want: true,
wantErr: false,
},
"deviceType disk, used by lvm PV and is connected to the cluster for the first time": {
bd: blockdevice.BlockDevice{
Identifier: blockdevice.Identifier{
DevPath: "/dev/sda",
},
DeviceAttributes: blockdevice.DeviceAttribute{
DeviceType: blockdevice.BlockDeviceTypeDisk,
},
DevUse: blockdevice.DeviceUsage{
InUse: true,
UsedBy: blockdevice.LVMLocalPV,
},
FSInfo: blockdevice.FileSystemInformation{
FileSystemUUID: fakeFsID,
},
},
bdAPIList: &apis.BlockDeviceList{},
createdOrUpdatedBDName: fakeUUID,
want: false,
wantErr: false,
},
"deviceType disk, used by lvm PV and is moved from disconnected and reconnected to the node at a different path": {
bd: blockdevice.BlockDevice{
Identifier: blockdevice.Identifier{
DevPath: "/dev/sda",
},
DeviceAttributes: blockdevice.DeviceAttribute{
DeviceType: blockdevice.BlockDeviceTypeDisk,
},
DevUse: blockdevice.DeviceUsage{
InUse: true,
UsedBy: blockdevice.LVMLocalPV,
},
FSInfo: blockdevice.FileSystemInformation{
FileSystemUUID: fakeFsID,
},
},
bdAPIList: &apis.BlockDeviceList{
Items: []apis.BlockDevice{
{
ObjectMeta: metav1.ObjectMeta{
Name: fakeUUID,
},
Spec: apis.DeviceSpec{
Path: "/dev/sdb",
},
},
},
},
createdOrUpdatedBDName: fakeUUID,
want: false,
wantErr: false,
},
}
for name, tt := range tests {
t.Run(name, func(t *testing.T) {
s := scheme.Scheme
s.AddKnownTypes(apis.GroupVersion, &apis.BlockDevice{})
s.AddKnownTypes(apis.GroupVersion, &apis.BlockDeviceList{})
cl := fake.NewFakeClientWithScheme(s)

// initialize client with all the bd resources
for _, bdAPI := range tt.bdAPIList.Items {
cl.Create(context.TODO(), &bdAPI)
}

ctrl := &controller.Controller{
Clientset: cl,
}
pe := &ProbeEvent{
Controller: ctrl,
}
got, err := pe.deviceInUseByLVMLocalPV(tt.bd, tt.bdAPIList)
if (err != nil) != tt.wantErr {
t.Errorf("deviceInUseByLVMLocalPV() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("deviceInUseByLVMLocalPV() got = %v, want %v", got, tt.want)
}

// check if a BD has been created or updated
if len(tt.createdOrUpdatedBDName) != 0 {
gotBDAPI := &apis.BlockDevice{}
err := cl.Get(context.TODO(), client.ObjectKey{Name: tt.createdOrUpdatedBDName}, gotBDAPI)
if err != nil {
t.Errorf("error in getting blockdevice %s", tt.createdOrUpdatedBDName)
}
// verify the block-device-tag on the resource, also verify the path and node name
assert.Equal(t, string(blockdevice.LVMLocalPV), gotBDAPI.GetLabels()[kubernetes.BlockDeviceTagLabel])
assert.Equal(t, tt.bd.DevPath, gotBDAPI.Spec.Path)
assert.Equal(t, tt.bd.NodeAttributes[blockdevice.NodeName], gotBDAPI.Spec.NodeAttributes.NodeName)
}
})
}
}

func TestIsParentDeviceInUse(t *testing.T) {
cache := map[string]blockdevice.BlockDevice{
"/dev/sda": {
Expand Down
13 changes: 13 additions & 0 deletions cmd/ndm_daemonset/probe/usedbyprobe.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ const (
k8sLocalVolumePath1 = "kubernetes.io/local-volume"
k8sLocalVolumePath2 = "kubernetes.io~local-volume"
zfsFileSystemLabel = "zfs_member"
lvmFileSystemLabel = "LVM2_member"
)

var (
Expand Down Expand Up @@ -149,6 +150,18 @@ func (sp *usedbyProbe) FillBlockDeviceDetails(blockDevice *blockdevice.BlockDevi
}
}

// checking for lvm localPV
usedByProbe := newUsedByProbe(blockDevice.DevPath)
// check for LVM file system
fstype := usedByProbe.BlkidIdentifier.GetOnDiskFileSystem()
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this a reliable way to check if the device is in use by LVM localPV. Because nodes having LVM setup will also have the same lvm filesystem on the disks.

we need to check if this method helps us to identify between lvm localPV and LVM that has been setup by user.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This same issue is there for cstor and zfs -localpv. To identify that, we check if the device is exclusively locked. which happens only if kernel zfs is using the disk.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Because nodes having LVM setup will also have the same lvm filesystem on the disks.

By this you mean, disks that are used manually by LVM utils?


if fstype == lvmFileSystemLabel {
blockDevice.DevUse.InUse = true
blockDevice.DevUse.UsedBy = blockdevice.LVMLocalPV
klog.V(4).Infof("device: %s Used by: %s filled by used-by probe", blockDevice.DevPath, blockDevice.DevUse.UsedBy)
return
}

// create a device identifier for reading the spdk super block from the disk
spdkIdentifier := &spdk.DeviceIdentifier{
DevPath: blockDevice.DevPath,
Expand Down