From 56fcbd2d9110ce2c5c7289376a5cf927985e52f5 Mon Sep 17 00:00:00 2001 From: MatthieuFin Date: Tue, 13 Feb 2024 15:23:30 +0100 Subject: [PATCH 1/7] feat(cinder-csi-plugin): add support multi cloud openstack cluster (#2035) * feat(cinder-csi-plugin-controller): add support --cloud-name arg which permit to manage multiple config Global sections * feat(cinder-csi-plugin-controller): add support of key `cloud` from secret specified in storageClass to reference specific config Global section * feat(cinder-csi-plugin-node): add support --cloud-name arg which permit to specify one of config Global section Signed-off-by: MatthieuFin --- cmd/cinder-csi-plugin/main.go | 21 +- pkg/csi/cinder/controllerserver.go | 266 ++++++++++++++---- pkg/csi/cinder/controllerserver_test.go | 15 +- pkg/csi/cinder/driver.go | 4 +- pkg/csi/cinder/nodeserver_test.go | 18 +- pkg/csi/cinder/openstack/fixtures/clouds.yaml | 22 ++ pkg/csi/cinder/openstack/openstack.go | 55 ++-- pkg/csi/cinder/openstack/openstack_test.go | 133 +++++++-- pkg/csi/cinder/utils.go | 4 +- tests/sanity/cinder/sanity_test.go | 6 +- 10 files changed, 419 insertions(+), 125 deletions(-) diff --git a/cmd/cinder-csi-plugin/main.go b/cmd/cinder-csi-plugin/main.go index 6ce8556689..537fdb940e 100644 --- a/cmd/cinder-csi-plugin/main.go +++ b/cmd/cinder-csi-plugin/main.go @@ -34,6 +34,7 @@ var ( endpoint string nodeID string cloudConfig []string + cloudNames []string cluster string httpEndpoint string provideControllerService bool @@ -65,6 +66,8 @@ func main() { klog.Fatalf("Unable to mark flag cloud-config to be required: %v", err) } + cmd.PersistentFlags().StringSliceVar(&cloudNames, "cloud-name", []string{""}, "CSI driver cloud name in config files. This option can be given multiple times to manage multiple openstack clouds") + cmd.PersistentFlags().StringVar(&cluster, "cluster", "", "The identifier of the cluster that the plugin is running in.") cmd.PersistentFlags().StringVar(&httpEndpoint, "http-endpoint", "", "The TCP network address where the HTTP server for providing metrics for diagnostics, will listen (example: `:8080`). The default is empty string, which means the server is disabled.") @@ -82,14 +85,18 @@ func handle() { d := cinder.NewDriver(&cinder.DriverOpts{Endpoint: endpoint, ClusterID: cluster}) openstack.InitOpenStackProvider(cloudConfig, httpEndpoint) - cloud, err := openstack.GetOpenStackProvider() - if err != nil { - klog.Warningf("Failed to GetOpenStackProvider: %v", err) - return + var err error + clouds := make(map[string]openstack.IOpenStack) + for _, cloudName := range cloudNames { + clouds[cloudName], err = openstack.GetOpenStackProvider(cloudName) + if err != nil { + klog.Warningf("Failed to GetOpenStackProvider %s: %v", cloudName, err) + return + } } if provideControllerService { - d.SetupControllerService(cloud) + d.SetupControllerService(clouds) } if provideNodeService { @@ -97,9 +104,9 @@ func handle() { mount := mount.GetMountProvider() //Initialize Metadata - metadata := metadata.GetMetadataProvider(cloud.GetMetadataOpts().SearchOrder) + metadata := metadata.GetMetadataProvider(clouds[cloudNames[0]].GetMetadataOpts().SearchOrder) - d.SetupNodeService(cloud, mount, metadata) + d.SetupNodeService(clouds[cloudNames[0]], mount, metadata) } d.Run() diff --git a/pkg/csi/cinder/controllerserver.go b/pkg/csi/cinder/controllerserver.go index cecc753c35..86d5066520 100644 --- a/pkg/csi/cinder/controllerserver.go +++ b/pkg/csi/cinder/controllerserver.go @@ -18,7 +18,9 @@ package cinder import ( "context" + "encoding/json" "fmt" + "sort" "strconv" "github.com/container-storage-interface/spec/lib/go/csi" @@ -26,6 +28,7 @@ import ( "github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/snapshots" "github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes" "github.com/kubernetes-csi/csi-lib-utils/protosanitizer" + "golang.org/x/exp/maps" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/timestamppb" @@ -38,7 +41,7 @@ import ( type controllerServer struct { Driver *Driver - Cloud openstack.IOpenStack + Clouds map[string]openstack.IOpenStack } const ( @@ -48,6 +51,13 @@ const ( func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) { klog.V(4).Infof("CreateVolume: called with args %+v", protosanitizer.StripSecrets(*req)) + // Volume cloud + volCloud := req.GetSecrets()["cloud"] + cloud, cloudExist := cs.Clouds[volCloud] + if !cloudExist { + return nil, status.Error(codes.InvalidArgument, "[CreateVolume] specified cloud undefined") + } + // Volume Name volName := req.GetName() volCapabilities := req.GetVolumeCapabilities() @@ -80,7 +90,6 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol } } - cloud := cs.Cloud ignoreVolumeAZ := cloud.GetBlockStorageOpts().IgnoreVolumeAZ // Verify a volume with the provided name doesn't already exist for this tenant @@ -187,12 +196,19 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol func (cs *controllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) { klog.V(4).Infof("DeleteVolume: called with args %+v", protosanitizer.StripSecrets(*req)) + // Volume cloud + volCloud := req.GetSecrets()["cloud"] + cloud, cloudExist := cs.Clouds[volCloud] + if !cloudExist { + return nil, status.Errorf(codes.InvalidArgument, "[DeleteVolume] specified cloud \"%s\" undefined", volCloud) + } + // Volume Delete volID := req.GetVolumeId() if len(volID) == 0 { return nil, status.Error(codes.InvalidArgument, "DeleteVolume Volume ID must be provided") } - err := cs.Cloud.DeleteVolume(volID) + err := cloud.DeleteVolume(volID) if err != nil { if cpoerrors.IsNotFound(err) { klog.V(3).Infof("Volume %s is already deleted.", volID) @@ -210,6 +226,13 @@ func (cs *controllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol func (cs *controllerServer) ControllerPublishVolume(ctx context.Context, req *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) { klog.V(4).Infof("ControllerPublishVolume: called with args %+v", protosanitizer.StripSecrets(*req)) + // Volume cloud + volCloud := req.GetSecrets()["cloud"] + cloud, cloudExist := cs.Clouds[volCloud] + if !cloudExist { + return nil, status.Error(codes.InvalidArgument, "[ControllerPublishVolume] specified cloud undefined") + } + // Volume Attach instanceID := req.GetNodeId() volumeID := req.GetVolumeId() @@ -225,7 +248,7 @@ func (cs *controllerServer) ControllerPublishVolume(ctx context.Context, req *cs return nil, status.Error(codes.InvalidArgument, "[ControllerPublishVolume] Volume capability must be provided") } - _, err := cs.Cloud.GetVolume(volumeID) + _, err := cloud.GetVolume(volumeID) if err != nil { if cpoerrors.IsNotFound(err) { return nil, status.Errorf(codes.NotFound, "[ControllerPublishVolume] Volume %s not found", volumeID) @@ -233,7 +256,7 @@ func (cs *controllerServer) ControllerPublishVolume(ctx context.Context, req *cs return nil, status.Errorf(codes.Internal, "[ControllerPublishVolume] get volume failed with error %v", err) } - _, err = cs.Cloud.GetInstanceByID(instanceID) + _, err = cloud.GetInstanceByID(instanceID) if err != nil { if cpoerrors.IsNotFound(err) { return nil, status.Errorf(codes.NotFound, "[ControllerPublishVolume] Instance %s not found", instanceID) @@ -241,20 +264,20 @@ func (cs *controllerServer) ControllerPublishVolume(ctx context.Context, req *cs return nil, status.Errorf(codes.Internal, "[ControllerPublishVolume] GetInstanceByID failed with error %v", err) } - _, err = cs.Cloud.AttachVolume(instanceID, volumeID) + _, err = cloud.AttachVolume(instanceID, volumeID) if err != nil { klog.Errorf("Failed to AttachVolume: %v", err) return nil, status.Errorf(codes.Internal, "[ControllerPublishVolume] Attach Volume failed with error %v", err) } - err = cs.Cloud.WaitDiskAttached(instanceID, volumeID) + err = cloud.WaitDiskAttached(instanceID, volumeID) if err != nil { klog.Errorf("Failed to WaitDiskAttached: %v", err) return nil, status.Errorf(codes.Internal, "[ControllerPublishVolume] failed to attach volume: %v", err) } - devicePath, err := cs.Cloud.GetAttachmentDiskPath(instanceID, volumeID) + devicePath, err := cloud.GetAttachmentDiskPath(instanceID, volumeID) if err != nil { klog.Errorf("Failed to GetAttachmentDiskPath: %v", err) return nil, status.Errorf(codes.Internal, "[ControllerPublishVolume] failed to get device path of attached volume: %v", err) @@ -274,6 +297,13 @@ func (cs *controllerServer) ControllerPublishVolume(ctx context.Context, req *cs func (cs *controllerServer) ControllerUnpublishVolume(ctx context.Context, req *csi.ControllerUnpublishVolumeRequest) (*csi.ControllerUnpublishVolumeResponse, error) { klog.V(4).Infof("ControllerUnpublishVolume: called with args %+v", protosanitizer.StripSecrets(*req)) + // Volume cloud + volCloud := req.GetSecrets()["cloud"] + cloud, cloudExist := cs.Clouds[volCloud] + if !cloudExist { + return nil, status.Error(codes.InvalidArgument, "[ControllerUnpublishVolume] specified cloud undefined") + } + // Volume Detach instanceID := req.GetNodeId() volumeID := req.GetVolumeId() @@ -281,7 +311,7 @@ func (cs *controllerServer) ControllerUnpublishVolume(ctx context.Context, req * if len(volumeID) == 0 { return nil, status.Error(codes.InvalidArgument, "[ControllerUnpublishVolume] Volume ID must be provided") } - _, err := cs.Cloud.GetInstanceByID(instanceID) + _, err := cloud.GetInstanceByID(instanceID) if err != nil { if cpoerrors.IsNotFound(err) { klog.V(3).Infof("ControllerUnpublishVolume assuming volume %s is detached, because node %s does not exist", volumeID, instanceID) @@ -290,7 +320,7 @@ func (cs *controllerServer) ControllerUnpublishVolume(ctx context.Context, req * return nil, status.Errorf(codes.Internal, "[ControllerUnpublishVolume] GetInstanceByID failed with error %v", err) } - err = cs.Cloud.DetachVolume(instanceID, volumeID) + err = cloud.DetachVolume(instanceID, volumeID) if err != nil { if cpoerrors.IsNotFound(err) { klog.V(3).Infof("ControllerUnpublishVolume assuming volume %s is detached, because it does not exist", volumeID) @@ -300,7 +330,7 @@ func (cs *controllerServer) ControllerUnpublishVolume(ctx context.Context, req * return nil, status.Errorf(codes.Internal, "ControllerUnpublishVolume Detach Volume failed with error %v", err) } - err = cs.Cloud.WaitDiskDetached(instanceID, volumeID) + err = cloud.WaitDiskDetached(instanceID, volumeID) if err != nil { klog.Errorf("Failed to WaitDiskDetached: %v", err) if cpoerrors.IsNotFound(err) { @@ -315,6 +345,11 @@ func (cs *controllerServer) ControllerUnpublishVolume(ctx context.Context, req * return &csi.ControllerUnpublishVolumeResponse{}, nil } +type CloudsStartingToken struct { + CloudName string `json:"cloud"` + Token string `json:"token"` +} + func (cs *controllerServer) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) { klog.V(4).Infof("ListVolumes: called with %+#v request", req) @@ -323,44 +358,116 @@ func (cs *controllerServer) ListVolumes(ctx context.Context, req *csi.ListVolume } maxEntries := int(req.MaxEntries) - vlist, nextPageToken, err := cs.Cloud.ListVolumes(maxEntries, req.StartingToken) - if err != nil { - klog.Errorf("Failed to ListVolumes: %v", err) - if cpoerrors.IsInvalidError(err) { - return nil, status.Errorf(codes.Aborted, "[ListVolumes] Invalid request: %v", err) - } - return nil, status.Errorf(codes.Internal, "ListVolumes failed with error %v", err) + var err error + var cloudsToken = CloudsStartingToken{ + CloudName: "", + Token: "", } - ventries := make([]*csi.ListVolumesResponse_Entry, 0, len(vlist)) - for _, v := range vlist { - ventry := csi.ListVolumesResponse_Entry{ - Volume: &csi.Volume{ - VolumeId: v.ID, - CapacityBytes: int64(v.Size * 1024 * 1024 * 1024), - }, + cloudsNames := maps.Keys(cs.Clouds) + sort.Strings(cloudsNames) + + currentCloudName := cloudsNames[0] + if req.StartingToken != "" { + err = json.Unmarshal([]byte(req.StartingToken), &cloudsToken) + if err != nil { + return nil, status.Errorf(codes.Aborted, "[ListVolumes] Invalid request: Token invalid") } + currentCloudName = cloudsToken.CloudName + } - status := &csi.ListVolumesResponse_VolumeStatus{} - status.PublishedNodeIds = make([]string, 0, len(v.Attachments)) - for _, attachment := range v.Attachments { - status.PublishedNodeIds = append(status.PublishedNodeIds, attachment.ServerID) + startingToken := cloudsToken.Token + var cloudsVentries []*csi.ListVolumesResponse_Entry + var vlist []volumes.Volume + var nextPageToken string + + startIdx := 0 + for idx := 0; idx < len(cloudsNames); idx++ { + if cloudsNames[idx] == currentCloudName { + break + } + startIdx++ + } + for idx := startIdx; idx < len(cloudsNames); idx++ { + if maxEntries > 0 { + vlist, nextPageToken, err = cs.Clouds[cloudsNames[idx]].ListVolumes(maxEntries-len(cloudsVentries), startingToken) + } else { + vlist, nextPageToken, err = cs.Clouds[cloudsNames[idx]].ListVolumes(maxEntries, startingToken) } - ventry.Status = status + if err != nil { + klog.Errorf("Failed to ListVolumes: %v", err) + if cpoerrors.IsInvalidError(err) { + return nil, status.Errorf(codes.Aborted, "[ListVolumes] Invalid request: %v", err) + } + return nil, status.Errorf(codes.Internal, "ListVolumes failed with error %v", err) + } + + ventries := make([]*csi.ListVolumesResponse_Entry, 0, len(vlist)) + for _, v := range vlist { + ventry := csi.ListVolumesResponse_Entry{ + Volume: &csi.Volume{ + VolumeId: v.ID, + CapacityBytes: int64(v.Size * 1024 * 1024 * 1024), + }, + } - ventries = append(ventries, &ventry) + status := &csi.ListVolumesResponse_VolumeStatus{} + status.PublishedNodeIds = make([]string, 0, len(v.Attachments)) + for _, attachment := range v.Attachments { + status.PublishedNodeIds = append(status.PublishedNodeIds, attachment.ServerID) + } + ventry.Status = status + + ventries = append(ventries, &ventry) + } + klog.V(4).Infof("ListVolumes: retreived %d entries and %q next token from cloud %q", len(ventries), nextPageToken, cloudsNames[idx]) + + cloudsVentries = append(cloudsVentries, ventries...) + + // Reach maxEntries setup nextToken with cloud identifier if needed + if maxEntries > 0 && len(cloudsVentries) == maxEntries { + if nextPageToken == "" { + if idx+1 == len(cloudsNames) { + // no more entries and no more clouds + // send no token its finished + klog.V(4).Infof("ListVolumes: completed with %d entries and %q next token", len(cloudsVentries), "") + return &csi.ListVolumesResponse{ + Entries: cloudsVentries, + NextToken: "", + }, nil + } else { + // stiil clouds to process + cloudsToken.CloudName = cloudsNames[idx+1] + } + } + cloudsToken.CloudName = cloudsNames[idx] + cloudsToken.Token = nextPageToken + data, _ := json.Marshal(cloudsToken) + klog.V(4).Infof("ListVolumes: completed with %d entries and %q next token", len(cloudsVentries), string(data)) + return &csi.ListVolumesResponse{ + Entries: cloudsVentries, + NextToken: string(data), + }, nil + } } - klog.V(4).Infof("ListVolumes: completed with %d entries and %q next token", len(ventries), nextPageToken) + klog.V(4).Infof("ListVolumes: completed with %d entries and %q next token", len(cloudsVentries), "") return &csi.ListVolumesResponse{ - Entries: ventries, - NextToken: nextPageToken, + Entries: cloudsVentries, + NextToken: "", }, nil } func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) { klog.V(4).Infof("CreateSnapshot: called with args %+v", protosanitizer.StripSecrets(*req)) + // Volume cloud + volCloud := req.GetSecrets()["cloud"] + cloud, cloudExist := cs.Clouds[volCloud] + if !cloudExist { + return nil, status.Error(codes.InvalidArgument, "[CreateSnapshot] specified cloud undefined") + } + name := req.Name volumeID := req.GetSourceVolumeId() snapshotType := req.Parameters[openstack.SnapshotType] @@ -396,7 +503,7 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS return nil, status.Error(codes.InvalidArgument, "Snapshot type must be 'backup', 'snapshot' or not defined") } var backupsAreEnabled bool - backupsAreEnabled, err = cs.Cloud.BackupsAreEnabled() + backupsAreEnabled, err = cloud.BackupsAreEnabled() klog.V(4).Infof("Backups enabled: %v", backupsAreEnabled) if err != nil { klog.Errorf("Failed to check if backups are enabled: %v", err) @@ -408,7 +515,7 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS return nil, status.Error(codes.FailedPrecondition, "Backups are not enabled in Cinder") } // Get a list of backups with the provided name - backups, err = cs.Cloud.ListBackups(filters) + backups, err = cloud.ListBackups(filters) if err != nil { klog.Errorf("Failed to query for existing Backup during CreateSnapshot: %v", err) return nil, status.Error(codes.Internal, "Failed to get backups") @@ -422,7 +529,7 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS if len(backups) == 1 { // since backup.VolumeID is not part of ListBackups response // we need fetch single backup to get the full object. - backup, err = cs.Cloud.GetBackupByID(backups[0].ID) + backup, err = cloud.GetBackupByID(backups[0].ID) if err != nil { klog.Errorf("Failed to get backup by ID %s: %v", backup.ID, err) return nil, status.Error(codes.Internal, "Failed to get backup by ID") @@ -451,7 +558,7 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS // Create the snapshot if the backup does not already exist and wait for it to be ready if !backupAlreadyExists { - snap, err = cs.createSnapshot(name, volumeID, req.Parameters) + snap, err = cs.createSnapshot(cloud, name, volumeID, req.Parameters) if err != nil { return nil, err } @@ -461,7 +568,7 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS klog.Errorf("Error to convert time to timestamp: %v", err) } - snap.Status, err = cs.Cloud.WaitSnapshotReady(snap.ID) + snap.Status, err = cloud.WaitSnapshotReady(snap.ID) if err != nil { klog.Errorf("Failed to WaitSnapshotReady: %v", err) return nil, status.Errorf(codes.Internal, "CreateSnapshot failed with error: %v. Current snapshot status: %v", err, snap.Status) @@ -486,7 +593,7 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS if snapshotType == "backup" { if !backupAlreadyExists { - backup, err = cs.createBackup(name, volumeID, snap, req.Parameters) + backup, err = cs.createBackup(cloud, name, volumeID, snap, req.Parameters) if err != nil { return nil, err } @@ -497,20 +604,20 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS klog.Errorf("Error to convert time to timestamp: %v", err) } - backup.Status, err = cs.Cloud.WaitBackupReady(backup.ID, snapSize, backupMaxDurationSecondsPerGB) + backup.Status, err = cloud.WaitBackupReady(backup.ID, snapSize, backupMaxDurationSecondsPerGB) if err != nil { klog.Errorf("Failed to WaitBackupReady: %v", err) return nil, status.Error(codes.Internal, fmt.Sprintf("CreateBackup failed with error %v. Current backups status: %s", err, backup.Status)) } // Necessary to get all the backup information, including size. - backup, err = cs.Cloud.GetBackupByID(backup.ID) + backup, err = cloud.GetBackupByID(backup.ID) if err != nil { klog.Errorf("Failed to GetBackupByID after backup creation: %v", err) return nil, status.Error(codes.Internal, fmt.Sprintf("GetBackupByID failed with error %v", err)) } - err = cs.Cloud.DeleteSnapshot(backup.SnapshotID) + err = cloud.DeleteSnapshot(backup.SnapshotID) if err != nil && !cpoerrors.IsNotFound(err) { klog.Errorf("Failed to DeleteSnapshot: %v", err) return nil, status.Error(codes.Internal, fmt.Sprintf("DeleteSnapshot failed with error %v", err)) @@ -529,13 +636,13 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS } -func (cs *controllerServer) createSnapshot(name string, volumeID string, parameters map[string]string) (snap *snapshots.Snapshot, err error) { +func (cs *controllerServer) createSnapshot(cloud openstack.IOpenStack, name string, volumeID string, parameters map[string]string) (snap *snapshots.Snapshot, err error) { filters := map[string]string{} filters["Name"] = name // List existing snapshots with the same name - snapshots, _, err := cs.Cloud.ListSnapshots(filters) + snapshots, _, err := cloud.ListSnapshots(filters) if err != nil { klog.Errorf("Failed to query for existing Snapshot during CreateSnapshot: %v", err) return nil, status.Error(codes.Internal, "Failed to get snapshots") @@ -574,7 +681,7 @@ func (cs *controllerServer) createSnapshot(name string, volumeID string, paramet } // TODO: Delegate the check to openstack itself and ignore the conflict - snap, err = cs.Cloud.CreateSnapshot(name, volumeID, properties) + snap, err = cloud.CreateSnapshot(name, volumeID, properties) if err != nil { klog.Errorf("Failed to Create snapshot: %v", err) return nil, status.Errorf(codes.Internal, "CreateSnapshot failed with error %v", err) @@ -585,7 +692,7 @@ func (cs *controllerServer) createSnapshot(name string, volumeID string, paramet return snap, nil } -func (cs *controllerServer) createBackup(name string, volumeID string, snap *snapshots.Snapshot, parameters map[string]string) (*backups.Backup, error) { +func (cs *controllerServer) createBackup(cloud openstack.IOpenStack, name string, volumeID string, snap *snapshots.Snapshot, parameters map[string]string) (*backups.Backup, error) { // Add cluster ID to the snapshot metadata properties := map[string]string{cinderCSIClusterIDKey: cs.Driver.cluster} @@ -600,7 +707,7 @@ func (cs *controllerServer) createBackup(name string, volumeID string, snap *sna } } - backup, err := cs.Cloud.CreateBackup(name, volumeID, snap.ID, parameters[openstack.SnapshotAvailabilityZone], properties) + backup, err := cloud.CreateBackup(name, volumeID, snap.ID, parameters[openstack.SnapshotAvailabilityZone], properties) if err != nil { klog.Errorf("Failed to Create backup: %v", err) return nil, status.Error(codes.Internal, fmt.Sprintf("CreateBackup failed with error %v", err)) @@ -613,6 +720,13 @@ func (cs *controllerServer) createBackup(name string, volumeID string, snap *sna func (cs *controllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) { klog.V(4).Infof("DeleteSnapshot: called with args %+v", protosanitizer.StripSecrets(*req)) + // Volume cloud + volCloud := req.GetSecrets()["cloud"] + cloud, cloudExist := cs.Clouds[volCloud] + if !cloudExist { + return nil, status.Error(codes.InvalidArgument, "[DeleteSnapshot] specified cloud undefined") + } + id := req.GetSnapshotId() if id == "" { @@ -620,9 +734,9 @@ func (cs *controllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteS } // If volumeSnapshot object was linked to a cinder backup, delete the backup. - back, err := cs.Cloud.GetBackupByID(id) + back, err := cloud.GetBackupByID(id) if err == nil && back != nil { - err = cs.Cloud.DeleteBackup(id) + err = cloud.DeleteBackup(id) if err != nil { klog.Errorf("Failed to Delete backup: %v", err) return nil, status.Error(codes.Internal, fmt.Sprintf("DeleteBackup failed with error %v", err)) @@ -630,7 +744,7 @@ func (cs *controllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteS } // Delegate the check to openstack itself - err = cs.Cloud.DeleteSnapshot(id) + err = cloud.DeleteSnapshot(id) if err != nil { if cpoerrors.IsNotFound(err) { klog.V(3).Infof("Snapshot %s is already deleted.", id) @@ -644,9 +758,16 @@ func (cs *controllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteS func (cs *controllerServer) ListSnapshots(ctx context.Context, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { + // Volume cloud + volCloud := req.GetSecrets()["cloud"] + cloud, cloudExist := cs.Clouds[volCloud] + if !cloudExist { + return nil, status.Error(codes.InvalidArgument, "[DeleteSnapshot] specified cloud undefined") + } + snapshotID := req.GetSnapshotId() if len(snapshotID) != 0 { - snap, err := cs.Cloud.GetSnapshotByID(snapshotID) + snap, err := cloud.GetSnapshotByID(snapshotID) if err != nil { if cpoerrors.IsNotFound(err) { klog.V(3).Infof("Snapshot %s not found", snapshotID) @@ -690,7 +811,7 @@ func (cs *controllerServer) ListSnapshots(ctx context.Context, req *csi.ListSnap // Only retrieve snapshots that are available filters["Status"] = "available" - slist, nextPageToken, err = cs.Cloud.ListSnapshots(filters) + slist, nextPageToken, err = cloud.ListSnapshots(filters) if err != nil { klog.Errorf("Failed to ListSnapshots: %v", err) return nil, status.Errorf(codes.Internal, "ListSnapshots failed with error %v", err) @@ -732,6 +853,13 @@ func (cs *controllerServer) ControllerGetCapabilities(ctx context.Context, req * func (cs *controllerServer) ValidateVolumeCapabilities(ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) { + // Volume cloud + volCloud := req.GetSecrets()["cloud"] + cloud, cloudExist := cs.Clouds[volCloud] + if !cloudExist { + return nil, status.Error(codes.InvalidArgument, "[ValidateVolumeCapabilities] specified cloud undefined") + } + reqVolCap := req.GetVolumeCapabilities() if len(reqVolCap) == 0 { @@ -743,7 +871,7 @@ func (cs *controllerServer) ValidateVolumeCapabilities(ctx context.Context, req return nil, status.Error(codes.InvalidArgument, "ValidateVolumeCapabilities Volume ID must be provided") } - _, err := cs.Cloud.GetVolume(volumeID) + _, err := cloud.GetVolume(volumeID) if err != nil { if cpoerrors.IsNotFound(err) { return nil, status.Errorf(codes.NotFound, "ValidateVolumeCapabilities Volume %s not found", volumeID) @@ -784,12 +912,19 @@ func (cs *controllerServer) ControllerGetVolume(ctx context.Context, req *csi.Co return nil, status.Error(codes.InvalidArgument, "Volume ID not provided") } - volume, err := cs.Cloud.GetVolume(volumeID) - if err != nil { - if cpoerrors.IsNotFound(err) { - return nil, status.Errorf(codes.NotFound, "Volume %s not found", volumeID) + var volume *volumes.Volume + var err error + for _, cloud := range cs.Clouds { + volume, err = cloud.GetVolume(volumeID) + if err != nil { + if cpoerrors.IsNotFound(err) { + continue + } + return nil, status.Errorf(codes.Internal, "ControllerGetVolume failed with error %v", err) } - return nil, status.Errorf(codes.Internal, "ControllerGetVolume failed with error %v", err) + } + if err != nil { + return nil, status.Errorf(codes.NotFound, "Volume %s not found", volumeID) } ventry := csi.ControllerGetVolumeResponse{ @@ -812,6 +947,13 @@ func (cs *controllerServer) ControllerGetVolume(ctx context.Context, req *csi.Co func (cs *controllerServer) ControllerExpandVolume(ctx context.Context, req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) { klog.V(4).Infof("ControllerExpandVolume: called with args %+v", protosanitizer.StripSecrets(*req)) + // Volume cloud + volCloud := req.GetSecrets()["cloud"] + cloud, cloudExist := cs.Clouds[volCloud] + if !cloudExist { + return nil, status.Error(codes.InvalidArgument, "[ControllerExpandVolume] specified cloud undefined") + } + volumeID := req.GetVolumeId() if len(volumeID) == 0 { return nil, status.Error(codes.InvalidArgument, "Volume ID not provided") @@ -829,7 +971,7 @@ func (cs *controllerServer) ControllerExpandVolume(ctx context.Context, req *csi return nil, status.Error(codes.OutOfRange, "After round-up, volume size exceeds the limit specified") } - volume, err := cs.Cloud.GetVolume(volumeID) + volume, err := cloud.GetVolume(volumeID) if err != nil { if cpoerrors.IsNotFound(err) { return nil, status.Error(codes.NotFound, "Volume not found") @@ -846,14 +988,14 @@ func (cs *controllerServer) ControllerExpandVolume(ctx context.Context, req *csi }, nil } - err = cs.Cloud.ExpandVolume(volumeID, volume.Status, volSizeGB) + err = cloud.ExpandVolume(volumeID, volume.Status, volSizeGB) if err != nil { return nil, status.Errorf(codes.Internal, "Could not resize volume %q to size %v: %v", volumeID, volSizeGB, err) } // we need wait for the volume to be available or InUse, it might be error_extending in some scenario targetStatus := []string{openstack.VolumeAvailableStatus, openstack.VolumeInUseStatus} - err = cs.Cloud.WaitVolumeTargetStatus(volumeID, targetStatus) + err = cloud.WaitVolumeTargetStatus(volumeID, targetStatus) if err != nil { klog.Errorf("Failed to WaitVolumeTargetStatus of volume %s: %v", volumeID, err) return nil, status.Errorf(codes.Internal, "[ControllerExpandVolume] Volume %s not in target state after resize operation: %v", volumeID, err) diff --git a/pkg/csi/cinder/controllerserver_test.go b/pkg/csi/cinder/controllerserver_test.go index f8d7bcd541..c3bdf6fd24 100644 --- a/pkg/csi/cinder/controllerserver_test.go +++ b/pkg/csi/cinder/controllerserver_test.go @@ -17,6 +17,7 @@ limitations under the License. package cinder import ( + "encoding/json" "testing" "github.com/container-storage-interface/spec/lib/go/csi" @@ -32,11 +33,13 @@ var osmock *openstack.OpenStackMock func init() { if fakeCs == nil { osmock = new(openstack.OpenStackMock) - openstack.OsInstance = osmock + openstack.OsInstances = map[string]openstack.IOpenStack{ + "": osmock, + } d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster}) - fakeCs = NewControllerServer(d, openstack.OsInstance) + fakeCs = NewControllerServer(d, openstack.OsInstances) } } @@ -411,8 +414,12 @@ func TestListVolumes(t *testing.T) { // Init assert assert := assert.New(t) - - fakeReq := &csi.ListVolumesRequest{MaxEntries: 2, StartingToken: FakeVolID} + token := CloudsStartingToken{ + CloudName: "", + Token: FakeVolID, + } + data, _ := json.Marshal(token) + fakeReq := &csi.ListVolumesRequest{MaxEntries: 2, StartingToken: string(data)} // Expected Result expectedRes := &csi.ListVolumesResponse{ diff --git a/pkg/csi/cinder/driver.go b/pkg/csi/cinder/driver.go index bd75fbce9d..f4bb7edecd 100644 --- a/pkg/csi/cinder/driver.go +++ b/pkg/csi/cinder/driver.go @@ -172,9 +172,9 @@ func (d *Driver) GetVolumeCapabilityAccessModes() []*csi.VolumeCapability_Access return d.vcap } -func (d *Driver) SetupControllerService(cloud openstack.IOpenStack) { +func (d *Driver) SetupControllerService(clouds map[string]openstack.IOpenStack) { klog.Info("Providing controller service") - d.cs = NewControllerServer(d, cloud) + d.cs = NewControllerServer(d, clouds) } func (d *Driver) SetupNodeService(cloud openstack.IOpenStack, mount mount.IMount, metadata metadata.IMetadata) { diff --git a/pkg/csi/cinder/nodeserver_test.go b/pkg/csi/cinder/nodeserver_test.go index dbfeb6144b..2e25191519 100644 --- a/pkg/csi/cinder/nodeserver_test.go +++ b/pkg/csi/cinder/nodeserver_test.go @@ -48,9 +48,11 @@ func init() { metadata.MetadataService = metamock omock = new(openstack.OpenStackMock) - openstack.OsInstance = omock + openstack.OsInstances = map[string]openstack.IOpenStack{ + "": omock, + } - fakeNs = NewNodeServer(d, mount.MInstance, metadata.MetadataService, openstack.OsInstance) + fakeNs = NewNodeServer(d, mount.MInstance, metadata.MetadataService, openstack.OsInstances[""]) } } @@ -140,10 +142,12 @@ func TestNodePublishVolumeEphermeral(t *testing.T) { mount.MInstance = mmock metadata.MetadataService = metamock - openstack.OsInstance = omock + openstack.OsInstances = map[string]openstack.IOpenStack{ + "": omock, + } d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster}) - fakeNse := NewNodeServer(d, mount.MInstance, metadata.MetadataService, openstack.OsInstance) + fakeNse := NewNodeServer(d, mount.MInstance, metadata.MetadataService, openstack.OsInstances[""]) // Init assert assert := assert.New(t) @@ -283,7 +287,9 @@ func TestNodeUnpublishVolume(t *testing.T) { func TestNodeUnpublishVolumeEphermeral(t *testing.T) { mount.MInstance = mmock metadata.MetadataService = metamock - openstack.OsInstance = omock + osmock := map[string]openstack.IOpenStack{ + "": new(openstack.OpenStackMock), + } fvolName := fmt.Sprintf("ephemeral-%s", FakeVolID) mmock.On("UnmountPath", FakeTargetPath).Return(nil) @@ -293,7 +299,7 @@ func TestNodeUnpublishVolumeEphermeral(t *testing.T) { omock.On("DeleteVolume", FakeVolID).Return(nil) d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster}) - fakeNse := NewNodeServer(d, mount.MInstance, metadata.MetadataService, openstack.OsInstance) + fakeNse := NewNodeServer(d, mount.MInstance, metadata.MetadataService, osmock[""]) // Init assert assert := assert.New(t) diff --git a/pkg/csi/cinder/openstack/fixtures/clouds.yaml b/pkg/csi/cinder/openstack/fixtures/clouds.yaml index f3e7a6367c..e42bf0927d 100644 --- a/pkg/csi/cinder/openstack/fixtures/clouds.yaml +++ b/pkg/csi/cinder/openstack/fixtures/clouds.yaml @@ -10,3 +10,25 @@ clouds: cacert: "fake-ca.crt" interface: "public" identity_api_version: 3 + openstack_cloud2: + auth: + auth_url: "https://169.254.169.254/identity/v3" + username: "user" + password: "pass" + project_id: "c869168a828847f39f7f06edd7305637" + domain_id: "2a73b8f597c04551a0fdc8e95544be8a" + region_name: "RegionTwo" + cacert: "fake-ca.crt" + interface: "public" + identity_api_version: 3 + openstack_cloud3: + auth: + auth_url: "https://961.452.961.452/identity/v3" + username: "user_cloud3" + password: "pass_cloud3" + project_id: "66c684738f74161ad8b41cb56224b311" + domain_id: "032da590a2714eda744bd321b5356c7e" + region_name: "AnotherRegion" + cacert: "fake-ca_cloud3.crt" + interface: "public" + identity_api_version: 3 diff --git a/pkg/csi/cinder/openstack/openstack.go b/pkg/csi/cinder/openstack/openstack.go index c4e98b9063..cbd1c91fca 100644 --- a/pkg/csi/cinder/openstack/openstack.go +++ b/pkg/csi/cinder/openstack/openstack.go @@ -90,13 +90,16 @@ type BlockStorageOpts struct { } type Config struct { - Global client.AuthOpts + Global map[string]*client.AuthOpts Metadata metadata.Opts BlockStorage BlockStorageOpts } func logcfg(cfg Config) { - client.LogCfg(cfg.Global) + for cloudName, global := range cfg.Global { + klog.V(0).Infof("Global: \"%s\"", cloudName) + client.LogCfg(*global) + } klog.Infof("Block storage opts: %v", cfg.BlockStorage) } @@ -121,16 +124,18 @@ func GetConfigFromFiles(configFilePaths []string) (Config, error) { } } - // Update the config with data from clouds.yaml if UseClouds is enabled - if cfg.Global.UseClouds { - if cfg.Global.CloudsFile != "" { - os.Setenv("OS_CLIENT_CONFIG_FILE", cfg.Global.CloudsFile) - } - err := client.ReadClouds(&cfg.Global) - if err != nil { - return cfg, err + for _, global := range cfg.Global { + // Update the config with data from clouds.yaml if UseClouds is enabled + if global.UseClouds { + if global.CloudsFile != "" { + os.Setenv("OS_CLIENT_CONFIG_FILE", global.CloudsFile) + } + err := client.ReadClouds(global) + if err != nil { + return cfg, err + } + klog.V(5).Infof("Credentials are loaded from %s:", global.CloudsFile) } - klog.V(5).Infof("Credentials are loaded from %s:", cfg.Global.CloudsFile) } return cfg, nil @@ -138,10 +143,11 @@ func GetConfigFromFiles(configFilePaths []string) (Config, error) { const defaultMaxVolAttachLimit int64 = 256 -var OsInstance IOpenStack +var OsInstances map[string]IOpenStack var configFiles = []string{"/etc/cloud.conf"} func InitOpenStackProvider(cfgFiles []string, httpEndpoint string) { + OsInstances = make(map[string]IOpenStack) metrics.RegisterMetrics("cinder-csi") if httpEndpoint != "" { mux := http.NewServeMux() @@ -159,8 +165,8 @@ func InitOpenStackProvider(cfgFiles []string, httpEndpoint string) { klog.V(2).Infof("InitOpenStackProvider configFiles: %s", configFiles) } -// CreateOpenStackProvider creates Openstack Instance -func CreateOpenStackProvider() (IOpenStack, error) { +// CreateOpenStackProvider creates Openstack Instance with custom Global config param +func CreateOpenStackProvider(cloudName string) (IOpenStack, error) { // Get config from file cfg, err := GetConfigFromFiles(configFiles) if err != nil { @@ -168,15 +174,19 @@ func CreateOpenStackProvider() (IOpenStack, error) { return nil, err } logcfg(cfg) + _, cloudNameDefined := cfg.Global[cloudName] + if !cloudNameDefined { + return nil, fmt.Errorf("GetConfigFromFiles cloud name \"%s\" not found in configuration files: %s", cloudName, configFiles) + } - provider, err := client.NewOpenStackClient(&cfg.Global, "cinder-csi-plugin", userAgentData...) + provider, err := client.NewOpenStackClient(cfg.Global[cloudName], "cinder-csi-plugin", userAgentData...) if err != nil { return nil, err } epOpts := gophercloud.EndpointOpts{ - Region: cfg.Global.Region, - Availability: cfg.Global.EndpointType, + Region: cfg.Global[cloudName].Region, + Availability: cfg.Global[cloudName].EndpointType, } // Init Nova ServiceClient @@ -197,7 +207,7 @@ func CreateOpenStackProvider() (IOpenStack, error) { } // Init OpenStack - OsInstance = &OpenStack{ + OsInstances[cloudName] = &OpenStack{ compute: computeclient, blockstorage: blockstorageclient, bsOpts: cfg.BlockStorage, @@ -205,16 +215,17 @@ func CreateOpenStackProvider() (IOpenStack, error) { metadataOpts: cfg.Metadata, } - return OsInstance, nil + return OsInstances[cloudName], nil } // GetOpenStackProvider returns Openstack Instance -func GetOpenStackProvider() (IOpenStack, error) { - if OsInstance != nil { +func GetOpenStackProvider(cloudName string) (IOpenStack, error) { + OsInstance, OsInstanceDefined := OsInstances[cloudName] + if OsInstanceDefined { return OsInstance, nil } var err error - OsInstance, err = CreateOpenStackProvider() + OsInstance, err = CreateOpenStackProvider(cloudName) if err != nil { return nil, err } diff --git a/pkg/csi/cinder/openstack/openstack_test.go b/pkg/csi/cinder/openstack/openstack_test.go index f41eac2764..647787e9bb 100644 --- a/pkg/csi/cinder/openstack/openstack_test.go +++ b/pkg/csi/cinder/openstack/openstack_test.go @@ -24,6 +24,7 @@ import ( "github.com/gophercloud/gophercloud/v2" "github.com/spf13/pflag" "github.com/stretchr/testify/assert" + "k8s.io/cloud-provider-openstack/pkg/client" ) var fakeFileName = "cloud.conf" @@ -37,6 +38,24 @@ var fakeRegion = "RegionOne" var fakeCAfile = "fake-ca.crt" var fakeCloudName = "openstack" +var fakeUserName_cloud2 = "user" +var fakePassword_cloud2 = "pass" +var fakeAuthURL_cloud2 = "https://169.254.169.254/identity/v3" +var fakeTenantID_cloud2 = "c869168a828847f39f7f06edd7305637" +var fakeDomainID_cloud2 = "2a73b8f597c04551a0fdc8e95544be8a" +var fakeRegion_cloud2 = "RegionTwo" +var fakeCAfile_cloud2 = "fake-ca.crt" +var fakeCloudName_cloud2 = "openstack_cloud2" + +var fakeUserName_cloud3 = "user_cloud3" +var fakePassword_cloud3 = "pass_cloud3" +var fakeAuthURL_cloud3 = "https://961.452.961.452/identity/v3" +var fakeTenantID_cloud3 = "66c684738f74161ad8b41cb56224b311" +var fakeDomainID_cloud3 = "032da590a2714eda744bd321b5356c7e" +var fakeRegion_cloud3 = "AnotherRegion" +var fakeCAfile_cloud3 = "fake-ca_cloud3.crt" +var fakeCloudName_cloud3 = "openstack_cloud3" + // Test GetConfigFromFiles func TestGetConfigFromFiles(t *testing.T) { // init file @@ -49,6 +68,22 @@ tenant-id=` + fakeTenantID + ` domain-id=` + fakeDomainID + ` ca-file=` + fakeCAfile + ` region=` + fakeRegion + ` +[Global "cloud2"] +username=` + fakeUserName_cloud2 + ` +password=` + fakePassword_cloud2 + ` +auth-url=` + fakeAuthURL_cloud2 + ` +tenant-id=` + fakeTenantID_cloud2 + ` +domain-id=` + fakeDomainID_cloud2 + ` +ca-file=` + fakeCAfile_cloud2 + ` +region=` + fakeRegion_cloud2 + ` +[Global "cloud3"] +username=` + fakeUserName_cloud3 + ` +password=` + fakePassword_cloud3 + ` +auth-url=` + fakeAuthURL_cloud3 + ` +tenant-id=` + fakeTenantID_cloud3 + ` +domain-id=` + fakeDomainID_cloud3 + ` +ca-file=` + fakeCAfile_cloud3 + ` +region=` + fakeRegion_cloud3 + ` [BlockStorage] rescan-on-resize=true` @@ -67,13 +102,36 @@ rescan-on-resize=true` // Init assert assert := assert.New(t) expectedOpts := Config{} - expectedOpts.Global.Username = fakeUserName - expectedOpts.Global.Password = fakePassword - expectedOpts.Global.DomainID = fakeDomainID - expectedOpts.Global.AuthURL = fakeAuthURL - expectedOpts.Global.CAFile = fakeCAfile - expectedOpts.Global.TenantID = fakeTenantID - expectedOpts.Global.Region = fakeRegion + expectedOpts.Global = make(map[string]*client.AuthOpts, 3) + + expectedOpts.Global[""] = &client.AuthOpts{ + Username: fakeUserName, + Password: fakePassword, + DomainID: fakeDomainID, + AuthURL: fakeAuthURL, + CAFile: fakeCAfile, + TenantID: fakeTenantID, + Region: fakeRegion, + } + expectedOpts.Global["cloud2"] = &client.AuthOpts{ + Username: fakeUserName_cloud2, + Password: fakePassword_cloud2, + DomainID: fakeDomainID_cloud2, + AuthURL: fakeAuthURL_cloud2, + CAFile: fakeCAfile_cloud2, + TenantID: fakeTenantID_cloud2, + Region: fakeRegion_cloud2, + } + expectedOpts.Global["cloud3"] = &client.AuthOpts{ + Username: fakeUserName_cloud3, + Password: fakePassword_cloud3, + DomainID: fakeDomainID_cloud3, + AuthURL: fakeAuthURL_cloud3, + CAFile: fakeCAfile_cloud3, + TenantID: fakeTenantID_cloud3, + Region: fakeRegion_cloud3, + } + expectedOpts.BlockStorage.RescanOnResize = true // Invoke GetConfigFromFiles @@ -130,6 +188,14 @@ func TestGetConfigFromFileWithUseClouds(t *testing.T) { use-clouds = true clouds-file = ` + wd + `/fixtures/clouds.yaml cloud = ` + fakeCloudName + ` +[Global "cloud2"] +use-clouds = true +clouds-file = ` + wd + `/fixtures/clouds.yaml +cloud = ` + fakeCloudName_cloud2 + ` +[Global "cloud3"] +use-clouds = true +clouds-file = ` + wd + `/fixtures/clouds.yaml +cloud = ` + fakeCloudName_cloud3 + ` [BlockStorage] rescan-on-resize=true` @@ -148,17 +214,48 @@ rescan-on-resize=true` // Init assert assert := assert.New(t) expectedOpts := Config{} - expectedOpts.Global.Username = fakeUserName - expectedOpts.Global.Password = fakePassword - expectedOpts.Global.DomainID = fakeDomainID - expectedOpts.Global.AuthURL = fakeAuthURL - expectedOpts.Global.CAFile = fakeCAfile - expectedOpts.Global.TenantID = fakeTenantID - expectedOpts.Global.Region = fakeRegion - expectedOpts.Global.EndpointType = gophercloud.AvailabilityPublic - expectedOpts.Global.UseClouds = true - expectedOpts.Global.CloudsFile = wd + "/fixtures/clouds.yaml" - expectedOpts.Global.Cloud = fakeCloudName + expectedOpts.Global = make(map[string]*client.AuthOpts, 3) + + expectedOpts.Global[""] = &client.AuthOpts{ + Username: fakeUserName, + Password: fakePassword, + DomainID: fakeDomainID, + AuthURL: fakeAuthURL, + CAFile: fakeCAfile, + TenantID: fakeTenantID, + Region: fakeRegion, + EndpointType: gophercloud.AvailabilityPublic, + UseClouds: true, + CloudsFile: wd + "/fixtures/clouds.yaml", + Cloud: fakeCloudName, + } + expectedOpts.Global["cloud2"] = &client.AuthOpts{ + Username: fakeUserName_cloud2, + Password: fakePassword_cloud2, + DomainID: fakeDomainID_cloud2, + AuthURL: fakeAuthURL_cloud2, + CAFile: fakeCAfile_cloud2, + TenantID: fakeTenantID_cloud2, + Region: fakeRegion_cloud2, + EndpointType: gophercloud.AvailabilityPublic, + UseClouds: true, + CloudsFile: wd + "/fixtures/clouds.yaml", + Cloud: fakeCloudName_cloud2, + } + expectedOpts.Global["cloud3"] = &client.AuthOpts{ + Username: fakeUserName_cloud3, + Password: fakePassword_cloud3, + DomainID: fakeDomainID_cloud3, + AuthURL: fakeAuthURL_cloud3, + CAFile: fakeCAfile_cloud3, + TenantID: fakeTenantID_cloud3, + Region: fakeRegion_cloud3, + EndpointType: gophercloud.AvailabilityPublic, + UseClouds: true, + CloudsFile: wd + "/fixtures/clouds.yaml", + Cloud: fakeCloudName_cloud3, + } + expectedOpts.BlockStorage.RescanOnResize = true // Invoke GetConfigFromFiles diff --git a/pkg/csi/cinder/utils.go b/pkg/csi/cinder/utils.go index c4ecb6ffe9..627d6a6bc9 100644 --- a/pkg/csi/cinder/utils.go +++ b/pkg/csi/cinder/utils.go @@ -44,10 +44,10 @@ func NewVolumeCapabilityAccessMode(mode csi.VolumeCapability_AccessMode_Mode) *c } //revive:disable:unexported-return -func NewControllerServer(d *Driver, cloud openstack.IOpenStack) *controllerServer { +func NewControllerServer(d *Driver, clouds map[string]openstack.IOpenStack) *controllerServer { return &controllerServer{ Driver: d, - Cloud: cloud, + Clouds: clouds, } } diff --git a/tests/sanity/cinder/sanity_test.go b/tests/sanity/cinder/sanity_test.go index 4e3a2abd65..aba0d90de0 100644 --- a/tests/sanity/cinder/sanity_test.go +++ b/tests/sanity/cinder/sanity_test.go @@ -22,12 +22,14 @@ func TestDriver(t *testing.T) { d := cinder.NewDriver(&cinder.DriverOpts{Endpoint: endpoint, ClusterID: cluster}) fakecloudprovider := getfakecloud() - openstack.OsInstance = fakecloudprovider + openstack.OsInstances = map[string]openstack.IOpenStack{ + "": fakecloudprovider, + } fakemnt := GetFakeMountProvider() fakemet := &fakemetadata{} - d.SetupControllerService(fakecloudprovider) + d.SetupControllerService(openstack.OsInstances) d.SetupNodeService(fakecloudprovider, fakemnt, fakemet) // TODO: Stop call From 29536a2b6be21eb7522d8c4cd0d66a5173888ee2 Mon Sep 17 00:00:00 2001 From: MatthieuFin Date: Mon, 26 Feb 2024 15:29:33 +0100 Subject: [PATCH 2/7] feat(cinder-csi-plugin): add possibility to set custom topology keys (#2035) * feat(cinder-csi-plugin-node): Additionnal topology keys `--additionnal-topology` are announced on NodeGetInfo to create proper CSINode object and could used in storage class allowedTopologies field, in combination with csi-provisioner `--feature-gates Topology=true` created PV will have nodeAffinity set with topologies presents in storageClass allowedTopologies and in `--additionnal-topology` argument. Signed-off-by: MatthieuFin --- cmd/cinder-csi-plugin/main.go | 4 +++- pkg/csi/cinder/driver.go | 4 ++-- pkg/csi/cinder/nodeserver.go | 16 +++++++++++----- pkg/csi/cinder/nodeserver_test.go | 6 +++--- pkg/csi/cinder/utils.go | 11 ++++++----- tests/sanity/cinder/sanity_test.go | 2 +- 6 files changed, 26 insertions(+), 17 deletions(-) diff --git a/cmd/cinder-csi-plugin/main.go b/cmd/cinder-csi-plugin/main.go index 537fdb940e..c3b031547a 100644 --- a/cmd/cinder-csi-plugin/main.go +++ b/cmd/cinder-csi-plugin/main.go @@ -35,6 +35,7 @@ var ( nodeID string cloudConfig []string cloudNames []string + additionalTopologies map[string]string cluster string httpEndpoint string provideControllerService bool @@ -67,6 +68,7 @@ func main() { } cmd.PersistentFlags().StringSliceVar(&cloudNames, "cloud-name", []string{""}, "CSI driver cloud name in config files. This option can be given multiple times to manage multiple openstack clouds") + cmd.PersistentFlags().StringToStringVar(&additionalTopologies, "additionnal-topology", map[string]string{}, "CSI driver topology example topology.kubernetes.io/region=REGION1. This option can be given multiple times to manage multiple topology keys") cmd.PersistentFlags().StringVar(&cluster, "cluster", "", "The identifier of the cluster that the plugin is running in.") cmd.PersistentFlags().StringVar(&httpEndpoint, "http-endpoint", "", "The TCP network address where the HTTP server for providing metrics for diagnostics, will listen (example: `:8080`). The default is empty string, which means the server is disabled.") @@ -106,7 +108,7 @@ func handle() { //Initialize Metadata metadata := metadata.GetMetadataProvider(clouds[cloudNames[0]].GetMetadataOpts().SearchOrder) - d.SetupNodeService(clouds[cloudNames[0]], mount, metadata) + d.SetupNodeService(clouds[cloudNames[0]], mount, metadata, additionalTopologies) } d.Run() diff --git a/pkg/csi/cinder/driver.go b/pkg/csi/cinder/driver.go index f4bb7edecd..773b98a8cc 100644 --- a/pkg/csi/cinder/driver.go +++ b/pkg/csi/cinder/driver.go @@ -177,9 +177,9 @@ func (d *Driver) SetupControllerService(clouds map[string]openstack.IOpenStack) d.cs = NewControllerServer(d, clouds) } -func (d *Driver) SetupNodeService(cloud openstack.IOpenStack, mount mount.IMount, metadata metadata.IMetadata) { +func (d *Driver) SetupNodeService(cloud openstack.IOpenStack, mount mount.IMount, metadata metadata.IMetadata, topologies map[string]string) { klog.Info("Providing node service") - d.ns = NewNodeServer(d, mount, metadata, cloud) + d.ns = NewNodeServer(d, mount, metadata, cloud, topologies) } func (d *Driver) Run() { diff --git a/pkg/csi/cinder/nodeserver.go b/pkg/csi/cinder/nodeserver.go index f29049e6a1..c2407c5a70 100644 --- a/pkg/csi/cinder/nodeserver.go +++ b/pkg/csi/cinder/nodeserver.go @@ -41,10 +41,11 @@ import ( ) type nodeServer struct { - Driver *Driver - Mount mount.IMount - Metadata metadata.IMetadata - Cloud openstack.IOpenStack + Driver *Driver + Mount mount.IMount + Metadata metadata.IMetadata + Cloud openstack.IOpenStack + Topologies map[string]string } func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) { @@ -486,7 +487,12 @@ func (ns *nodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoReque if err != nil { return nil, status.Errorf(codes.Internal, "[NodeGetInfo] Unable to retrieve availability zone of node %v", err) } - topology := &csi.Topology{Segments: map[string]string{topologyKey: zone}} + topologyMap := make(map[string]string, len(ns.Topologies)+1) + topologyMap[topologyKey] = zone + for k, v := range ns.Topologies { + topologyMap[k] = v + } + topology := &csi.Topology{Segments: topologyMap} maxVolume := ns.Cloud.GetMaxVolLimit() diff --git a/pkg/csi/cinder/nodeserver_test.go b/pkg/csi/cinder/nodeserver_test.go index 2e25191519..cb63c3504f 100644 --- a/pkg/csi/cinder/nodeserver_test.go +++ b/pkg/csi/cinder/nodeserver_test.go @@ -52,7 +52,7 @@ func init() { "": omock, } - fakeNs = NewNodeServer(d, mount.MInstance, metadata.MetadataService, openstack.OsInstances[""]) + fakeNs = NewNodeServer(d, mount.MInstance, metadata.MetadataService, openstack.OsInstances[""], map[string]string{}) } } @@ -147,7 +147,7 @@ func TestNodePublishVolumeEphermeral(t *testing.T) { } d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster}) - fakeNse := NewNodeServer(d, mount.MInstance, metadata.MetadataService, openstack.OsInstances[""]) + fakeNse := NewNodeServer(d, mount.MInstance, metadata.MetadataService, openstack.OsInstances[""], map[string]string{}) // Init assert assert := assert.New(t) @@ -299,7 +299,7 @@ func TestNodeUnpublishVolumeEphermeral(t *testing.T) { omock.On("DeleteVolume", FakeVolID).Return(nil) d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster}) - fakeNse := NewNodeServer(d, mount.MInstance, metadata.MetadataService, osmock[""]) + fakeNse := NewNodeServer(d, mount.MInstance, metadata.MetadataService, osmock[""], map[string]string{}) // Init assert assert := assert.New(t) diff --git a/pkg/csi/cinder/utils.go b/pkg/csi/cinder/utils.go index 627d6a6bc9..13c1ee993a 100644 --- a/pkg/csi/cinder/utils.go +++ b/pkg/csi/cinder/utils.go @@ -57,12 +57,13 @@ func NewIdentityServer(d *Driver) *identityServer { } } -func NewNodeServer(d *Driver, mount mount.IMount, metadata metadata.IMetadata, cloud openstack.IOpenStack) *nodeServer { +func NewNodeServer(d *Driver, mount mount.IMount, metadata metadata.IMetadata, cloud openstack.IOpenStack, topologies map[string]string) *nodeServer { return &nodeServer{ - Driver: d, - Mount: mount, - Metadata: metadata, - Cloud: cloud, + Driver: d, + Mount: mount, + Metadata: metadata, + Cloud: cloud, + Topologies: topologies, } } diff --git a/tests/sanity/cinder/sanity_test.go b/tests/sanity/cinder/sanity_test.go index aba0d90de0..3eb723ddfa 100644 --- a/tests/sanity/cinder/sanity_test.go +++ b/tests/sanity/cinder/sanity_test.go @@ -30,7 +30,7 @@ func TestDriver(t *testing.T) { fakemet := &fakemetadata{} d.SetupControllerService(openstack.OsInstances) - d.SetupNodeService(fakecloudprovider, fakemnt, fakemet) + d.SetupNodeService(fakecloudprovider, fakemnt, fakemet, map[string]string{}) // TODO: Stop call From f9ab5b39075fb2be34dda32e299970b1ecebe65f Mon Sep 17 00:00:00 2001 From: MatthieuFin Date: Wed, 17 Apr 2024 08:52:38 +0200 Subject: [PATCH 3/7] =?UTF-8?q?refactor:=20=F0=9F=92=A1=20rephrase=20comma?= =?UTF-8?q?nd=20flags=20descriptions?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: MatthieuFin --- cmd/cinder-csi-plugin/main.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/cinder-csi-plugin/main.go b/cmd/cinder-csi-plugin/main.go index c3b031547a..5bb50621ca 100644 --- a/cmd/cinder-csi-plugin/main.go +++ b/cmd/cinder-csi-plugin/main.go @@ -67,8 +67,8 @@ func main() { klog.Fatalf("Unable to mark flag cloud-config to be required: %v", err) } - cmd.PersistentFlags().StringSliceVar(&cloudNames, "cloud-name", []string{""}, "CSI driver cloud name in config files. This option can be given multiple times to manage multiple openstack clouds") - cmd.PersistentFlags().StringToStringVar(&additionalTopologies, "additionnal-topology", map[string]string{}, "CSI driver topology example topology.kubernetes.io/region=REGION1. This option can be given multiple times to manage multiple topology keys") + cmd.PersistentFlags().StringSliceVar(&cloudNames, "cloud-name", []string{""}, "Cloud name to instruct CSI driver to read additional OpenStack cloud credentials from the configuration subsections. This option can be specified multiple times to manage multiple OpenStack clouds.") + cmd.PersistentFlags().StringToStringVar(&additionalTopologies, "additional-topology", map[string]string{}, "Additional CSI driver topology keys, for example topology.kubernetes.io/region=REGION1. This option can be specified multiple times to add multiple additional topology keys.") cmd.PersistentFlags().StringVar(&cluster, "cluster", "", "The identifier of the cluster that the plugin is running in.") cmd.PersistentFlags().StringVar(&httpEndpoint, "http-endpoint", "", "The TCP network address where the HTTP server for providing metrics for diagnostics, will listen (example: `:8080`). The default is empty string, which means the server is disabled.") From 62c71e3c4b1b4cb6230ab529345d57901c27708a Mon Sep 17 00:00:00 2001 From: MatthieuFin Date: Wed, 17 Apr 2024 09:55:19 +0200 Subject: [PATCH 4/7] =?UTF-8?q?refactor:=20=F0=9F=92=A1=20remove=20useless?= =?UTF-8?q?=20declaration?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pkg/csi/cinder/openstack/openstack.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pkg/csi/cinder/openstack/openstack.go b/pkg/csi/cinder/openstack/openstack.go index cbd1c91fca..dbf10dd11d 100644 --- a/pkg/csi/cinder/openstack/openstack.go +++ b/pkg/csi/cinder/openstack/openstack.go @@ -224,8 +224,7 @@ func GetOpenStackProvider(cloudName string) (IOpenStack, error) { if OsInstanceDefined { return OsInstance, nil } - var err error - OsInstance, err = CreateOpenStackProvider(cloudName) + OsInstance, err := CreateOpenStackProvider(cloudName) if err != nil { return nil, err } From b3b83bfaec054bc30bd15e0cfc7d6d8d51cbd94d Mon Sep 17 00:00:00 2001 From: MatthieuFin Date: Fri, 19 Apr 2024 00:04:28 +0200 Subject: [PATCH 5/7] =?UTF-8?q?refactor:=20=F0=9F=92=A1=20loop=20with=20ra?= =?UTF-8?q?nge=20instead=20of=20C=20like?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pkg/csi/cinder/controllerserver.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/csi/cinder/controllerserver.go b/pkg/csi/cinder/controllerserver.go index 86d5066520..b3675d4210 100644 --- a/pkg/csi/cinder/controllerserver.go +++ b/pkg/csi/cinder/controllerserver.go @@ -382,8 +382,8 @@ func (cs *controllerServer) ListVolumes(ctx context.Context, req *csi.ListVolume var nextPageToken string startIdx := 0 - for idx := 0; idx < len(cloudsNames); idx++ { - if cloudsNames[idx] == currentCloudName { + for _, cloudName := range cloudsNames { + if cloudName == currentCloudName { break } startIdx++ @@ -436,7 +436,7 @@ func (cs *controllerServer) ListVolumes(ctx context.Context, req *csi.ListVolume NextToken: "", }, nil } else { - // stiil clouds to process + // still clouds to process cloudsToken.CloudName = cloudsNames[idx+1] } } From 89383ee830ecbb854bcf92d22c4c6298ac0d959f Mon Sep 17 00:00:00 2001 From: MatthieuFin Date: Tue, 14 May 2024 14:34:19 +0200 Subject: [PATCH 6/7] docs(cinder-csi-plugin): multi cloud configuration add documentation and examples about multi cloud support configuration Co-authored-by: sebastienmusso Co-authored-by: FlorentLaloup Signed-off-by: MatthieuFin --- docs/cinder-csi-plugin/multi-region-clouds.md | 316 ++++++++++++++++++ 1 file changed, 316 insertions(+) create mode 100644 docs/cinder-csi-plugin/multi-region-clouds.md diff --git a/docs/cinder-csi-plugin/multi-region-clouds.md b/docs/cinder-csi-plugin/multi-region-clouds.md new file mode 100644 index 0000000000..565b53446b --- /dev/null +++ b/docs/cinder-csi-plugin/multi-region-clouds.md @@ -0,0 +1,316 @@ +# Multi Az/Region/Openstack Configuration + +### Multi cluster Configuration file + +Create a configuration file with a subsection per openstack cluster to manage (pay attention to enable ignore-volume-az in BlockStorage section). + +Example of configuration with 3 regions (The default is backward compatible with mono cluster configuration but not mandatory). +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: cloud-config + namespace: kube-system +type: Opaque +stringData: + cloud.conf: |- + [BlockStorage] + bs-version=v3 + ignore-volume-az=True + + [Global] + auth-url="https://auth.cloud.openstackcluster.region-default.local/v3" + username="region-default-username" + password="region-default-password" + region="default" + tenant-id="region-default-tenant-id" + tenant-name="region-default-tenant-name" + domain-name="Default" + + [Global "region-one"] + auth-url="https://auth.cloud.openstackcluster.region-one.local/v3" + username="region-one-username" + password="region-one-password" + region="one" + tenant-id="region-one-tenant-id" + tenant-name="region-one-tenant-name" + domain-name="Default" + + [Global "region-two"] + auth-url="https://auth.cloud.openstackcluster.region-two.local/v3" + username="region-two-username" + password="region-two-password" + region="two" + tenant-id="region-two-tenant-id" + tenant-name="region-two-tenant-name" + domain-name="Default" +``` + + + +### Create region/cloud secrets + +Create a secret per openstack cluster which contains a key `cloud` and as value the subsection's name of corresponding openstack cluster in configuration file. + +These secrets are referenced in storageClass definitions to identify openstack cluster associated to the storageClass. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: openstack-config-region-one + namespace: kube-system +type: Opaque +stringData: + cloud: region-one +--- +apiVersion: v1 +kind: Secret +metadata: + name: openstack-config-region-two + namespace: kube-system +type: Opaque +stringData: + cloud: region-two +``` + +### Create storage Class for dedicated cluster + +```yaml +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "true" + name: sc-region-one +allowVolumeExpansion: true +allowedTopologies: +- matchLabelExpressions: + - key: topology.cinder.csi.openstack.org/zone + values: + - nova + - key: topology.kubernetes.io/region + values: + - region-one +parameters: + csi.storage.k8s.io/controller-publish-secret-name: openstack-config-region-one + csi.storage.k8s.io/controller-publish-secret-namespace: kube-system + csi.storage.k8s.io/node-publish-secret-name: openstack-config-region-one + csi.storage.k8s.io/node-publish-secret-namespace: kube-system + csi.storage.k8s.io/node-stage-secret-name: openstack-config-region-one + csi.storage.k8s.io/node-stage-secret-namespace: kube-system + csi.storage.k8s.io/provisioner-secret-name: openstack-config-region-one + csi.storage.k8s.io/provisioner-secret-namespace: kube-system +provisioner: cinder.csi.openstack.org +reclaimPolicy: Delete +volumeBindingMode: Immediate +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: sc-region-two +allowVolumeExpansion: true +allowedTopologies: +- matchLabelExpressions: + - key: topology.cinder.csi.openstack.org/zone + values: + - nova + - key: topology.kubernetes.io/region + values: + - region-two +parameters: + csi.storage.k8s.io/controller-publish-secret-name: openstack-config-region-two + csi.storage.k8s.io/controller-publish-secret-namespace: kube-system + csi.storage.k8s.io/node-publish-secret-name: openstack-config-region-two + csi.storage.k8s.io/node-publish-secret-namespace: kube-system + csi.storage.k8s.io/node-stage-secret-name: openstack-config-region-two + csi.storage.k8s.io/node-stage-secret-namespace: kube-system + csi.storage.k8s.io/provisioner-secret-name: openstack-config-region-two + csi.storage.k8s.io/provisioner-secret-namespace: kube-system +provisioner: cinder.csi.openstack.org +reclaimPolicy: Delete +volumeBindingMode: Immediate +``` + +### Create a csi-cinder-nodeplugin daemonset per cluster openstack + +Daemonsets should deploy pods on nodes from proper openstack context. We suppose that the node have a label `topology.kubernetes.io/region` with the openstack cluster name as value (you could manage this with kubespray, manually, whatever, it should be great to implement this in openstack cloud controller manager). + +Do as follows: +- Use nodeSelector to match proper nodes labels +- Add cli argument `--additionnal-topology topology.kubernetes.io/region=region-one`, which should match node labels, to container cinder-csi-plugin +- Add cli argument `--cloud-name="region-one"`, which should match configuration file subsection name, to container cinder-csi-plugin. + +```yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: csi-cinder-nodeplugin-region-one + namespace: kube-system +spec: + selector: + matchLabels: + app: csi-cinder-nodeplugin-region-one + template: + metadata: + labels: + app: csi-cinder-nodeplugin-region-one + spec: + containers: + - name: node-driver-registrar + ... + - name: liveness-probe + ... + - name: cinder-csi-plugin + image: docker.io/k8scloudprovider/cinder-csi-plugin:v1.31.0 + args: + - /bin/cinder-csi-plugin + - --endpoint=$(CSI_ENDPOINT) + - --cloud-config=$(CLOUD_CONFIG) + - --cloud-name="region-one" + - --additionnal-topology + - topology.kubernetes.io/region=region-one + env: + - name: CSI_ENDPOINT + value: unix://csi/csi.sock + - name: CLOUD_CONFIG + value: /etc/config/cloud.conf + ... + volumeMounts: + ... + - mountPath: /etc/config + name: secret-cinderplugin + readOnly: true + ... + nodeSelector: + topology.kubernetes.io/region: region-one + volumes: + ... + - name: secret-cinderplugin + secret: + defaultMode: 420 + secretName: cloud-config + ... +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: csi-cinder-nodeplugin-region-two + namespace: kube-system +spec: + selector: + matchLabels: + app: csi-cinder-nodeplugin-region-two + template: + metadata: + labels: + app: csi-cinder-nodeplugin-region-two + spec: + containers: + - name: node-driver-registrar + ... + - name: liveness-probe + ... + - name: cinder-csi-plugin + image: docker.io/k8scloudprovider/cinder-csi-plugin:v1.31.0 + args: + - /bin/cinder-csi-plugin + - --endpoint=$(CSI_ENDPOINT) + - --cloud-config=$(CLOUD_CONFIG) + - --cloud-name="region-two" + - --additionnal-topology + - topology.kubernetes.io/region=region-two + env: + - name: CSI_ENDPOINT + value: unix://csi/csi.sock + - name: CLOUD_CONFIG + value: /etc/config/cloud.conf + ... + volumeMounts: + ... + - mountPath: /etc/config + name: secret-cinderplugin + readOnly: true + ... + nodeSelector: + topology.kubernetes.io/region: region-two + volumes: + ... + - name: secret-cinderplugin + secret: + defaultMode: 420 + secretName: cloud-config + ... +``` + +### Configure csi-cinder-controllerplugin deployment + +Enable Topology feature-gate on container csi-provisioner of csi-cinder-controllerplugin deployment by adding cli argument ``--feature-gates="Topology=true" + +Add cli argument `--cloud-name="region-one"` for each managed openstack cluster, name should match configuration file subsection name, to container `cinder-csi-plugin`. + + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + name: csi-cinder-controllerplugin + namespace: kube-system +spec: + selector: + matchLabels: + app: csi-cinder-controllerplugin + template: + metadata: + labels: + app: csi-cinder-controllerplugin + spec: + containers: + - name: csi-provisioner + image: registry.k8s.io/sig-storage/csi-provisioner:v3.0.0 + args: + - --csi-address=$(ADDRESS) + - --timeout=3m + - --default-fstype=ext4 + - --extra-create-metadata + - --feature-gates + - Topology=true + ... + - name: cinder-csi-plugin + image: docker.io/k8scloudprovider/cinder-csi-plugin:v1.31.0 + args: + - /bin/cinder-csi-plugin + - --endpoint=$(CSI_ENDPOINT) + - --cloud-config=$(CLOUD_CONFIG) + - --cluster=$(CLUSTER_NAME) + - --cloud-name="region-one" + - --cloud-name="region-two" + env: + - name: CSI_ENDPOINT + value: unix://csi/csi.sock + - name: CLOUD_CONFIG + value: /etc/config/cloud.conf + - name: CLUSTER_NAME + value: kubernetes + volumeMounts: + - mountPath: /etc/config + name: secret-cinderplugin + readOnly: true + ... + - name: csi-attacher + ... + - name: csi-snapshotter + ... + - name: csi-resizer + ... + - name: liveness-probe + ... + volumes: + - name: secret-cinderplugin + secret: + defaultMode: 420 + secretName: cloud-config + ... +``` + From b0e5a1cc1731a85d18228fd243f39f19f0588de4 Mon Sep 17 00:00:00 2001 From: MatthieuFin Date: Sun, 16 Jun 2024 22:05:02 +0200 Subject: [PATCH 7/7] fix(cinder-csi-plugin): controllerServer ListVolumes with `--max-entries` arg Add unit tests cases on listvolumes function in multicloud configuration with and without `--max-entries` arg Signed-off-by: MatthieuFin --- pkg/csi/cinder/controllerserver.go | 86 ++- pkg/csi/cinder/controllerserver_test.go | 908 +++++++++++++++++++++++- 2 files changed, 948 insertions(+), 46 deletions(-) diff --git a/pkg/csi/cinder/controllerserver.go b/pkg/csi/cinder/controllerserver.go index b3675d4210..595b34fb7d 100644 --- a/pkg/csi/cinder/controllerserver.go +++ b/pkg/csi/cinder/controllerserver.go @@ -348,6 +348,31 @@ func (cs *controllerServer) ControllerUnpublishVolume(ctx context.Context, req * type CloudsStartingToken struct { CloudName string `json:"cloud"` Token string `json:"token"` + isEmpty bool +} + +func (cs *controllerServer) extractNodeIDs(attachments []volumes.Attachment) []string { + nodeIDs := make([]string, len(attachments)) + for i, attachment := range attachments { + nodeIDs[i] = attachment.ServerID + } + return nodeIDs +} + +func (cs *controllerServer) createVolumeEntries(vlist []volumes.Volume) []*csi.ListVolumesResponse_Entry { + entries := make([]*csi.ListVolumesResponse_Entry, len(vlist)) + for i, v := range vlist { + entries[i] = &csi.ListVolumesResponse_Entry{ + Volume: &csi.Volume{ + VolumeId: v.ID, + CapacityBytes: int64(v.Size * 1024 * 1024 * 1024), + }, + Status: &csi.ListVolumesResponse_VolumeStatus{ + PublishedNodeIds: cs.extractNodeIDs(v.Attachments), + }, + } + } + return entries } func (cs *controllerServer) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) { @@ -362,6 +387,7 @@ func (cs *controllerServer) ListVolumes(ctx context.Context, req *csi.ListVolume var cloudsToken = CloudsStartingToken{ CloudName: "", Token: "", + isEmpty: len(req.StartingToken) == 0, } cloudsNames := maps.Keys(cs.Clouds) @@ -381,6 +407,16 @@ func (cs *controllerServer) ListVolumes(ctx context.Context, req *csi.ListVolume var vlist []volumes.Volume var nextPageToken string + if !cloudsToken.isEmpty && startingToken == "" { + // previous call ended on last volumes from "currentCloudName" we should pass to next one + for i := range cloudsNames { + if cloudsNames[i] == currentCloudName { + currentCloudName = cloudsNames[i+1] + break + } + } + } + startIdx := 0 for _, cloudName := range cloudsNames { if cloudName == currentCloudName { @@ -394,6 +430,7 @@ func (cs *controllerServer) ListVolumes(ctx context.Context, req *csi.ListVolume } else { vlist, nextPageToken, err = cs.Clouds[cloudsNames[idx]].ListVolumes(maxEntries, startingToken) } + startingToken = nextPageToken if err != nil { klog.Errorf("Failed to ListVolumes: %v", err) if cpoerrors.IsInvalidError(err) { @@ -402,29 +439,13 @@ func (cs *controllerServer) ListVolumes(ctx context.Context, req *csi.ListVolume return nil, status.Errorf(codes.Internal, "ListVolumes failed with error %v", err) } - ventries := make([]*csi.ListVolumesResponse_Entry, 0, len(vlist)) - for _, v := range vlist { - ventry := csi.ListVolumesResponse_Entry{ - Volume: &csi.Volume{ - VolumeId: v.ID, - CapacityBytes: int64(v.Size * 1024 * 1024 * 1024), - }, - } - - status := &csi.ListVolumesResponse_VolumeStatus{} - status.PublishedNodeIds = make([]string, 0, len(v.Attachments)) - for _, attachment := range v.Attachments { - status.PublishedNodeIds = append(status.PublishedNodeIds, attachment.ServerID) - } - ventry.Status = status - - ventries = append(ventries, &ventry) - } - klog.V(4).Infof("ListVolumes: retreived %d entries and %q next token from cloud %q", len(ventries), nextPageToken, cloudsNames[idx]) + ventries := cs.createVolumeEntries(vlist) + klog.V(4).Infof("ListVolumes: retrieved %d entries and %q next token from cloud %q", len(ventries), nextPageToken, cloudsNames[idx]) cloudsVentries = append(cloudsVentries, ventries...) // Reach maxEntries setup nextToken with cloud identifier if needed + sendEmptyToken := false if maxEntries > 0 && len(cloudsVentries) == maxEntries { if nextPageToken == "" { if idx+1 == len(cloudsNames) { @@ -437,12 +458,35 @@ func (cs *controllerServer) ListVolumes(ctx context.Context, req *csi.ListVolume }, nil } else { // still clouds to process - cloudsToken.CloudName = cloudsNames[idx+1] + // set token to next non empty cloud + i := 0 + for i = idx + 1; i < len(cloudsNames); i++ { + vlistTmp, _, err := cs.Clouds[cloudsNames[i]].ListVolumes(1, "") + if err != nil { + klog.Errorf("Failed to ListVolumes: %v", err) + if cpoerrors.IsInvalidError(err) { + return nil, status.Errorf(codes.Aborted, "[ListVolumes] Invalid request: %v", err) + } + return nil, status.Errorf(codes.Internal, "ListVolumes failed with error %v", err) + } + if len(vlistTmp) > 0 { + cloudsToken.CloudName = cloudsNames[i] + cloudsToken.isEmpty = false + break + } + } + if i == len(cloudsNames) { + sendEmptyToken = true + } } } cloudsToken.CloudName = cloudsNames[idx] cloudsToken.Token = nextPageToken - data, _ := json.Marshal(cloudsToken) + var data []byte + data, _ = json.Marshal(cloudsToken) + if sendEmptyToken { + data = []byte("") + } klog.V(4).Infof("ListVolumes: completed with %d entries and %q next token", len(cloudsVentries), string(data)) return &csi.ListVolumesResponse{ Entries: cloudsVentries, diff --git a/pkg/csi/cinder/controllerserver_test.go b/pkg/csi/cinder/controllerserver_test.go index c3bdf6fd24..e65d8c661d 100644 --- a/pkg/csi/cinder/controllerserver_test.go +++ b/pkg/csi/cinder/controllerserver_test.go @@ -21,25 +21,32 @@ import ( "testing" "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" - "k8s.io/cloud-provider-openstack/pkg/csi/cinder/openstack" + openstack "k8s.io/cloud-provider-openstack/pkg/csi/cinder/openstack" ) var fakeCs *controllerServer +var fakeCsMultipleClouds *controllerServer var osmock *openstack.OpenStackMock +var osmockRegionX *openstack.OpenStackMock // Init Controller Server func init() { if fakeCs == nil { osmock = new(openstack.OpenStackMock) - openstack.OsInstances = map[string]openstack.IOpenStack{ - "": osmock, - } + osmockRegionX = new(openstack.OpenStackMock) d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster}) - fakeCs = NewControllerServer(d, openstack.OsInstances) + fakeCs = NewControllerServer(d, map[string]openstack.IOpenStack{ + "": osmock, + }) + fakeCsMultipleClouds = NewControllerServer(d, map[string]openstack.IOpenStack{ + "": osmock, + "region-x": osmockRegionX, + }) } } @@ -409,6 +416,33 @@ func TestControllerUnpublishVolume(t *testing.T) { assert.Equal(expectedRes, actualRes) } +func extractFakeNodeIDs(attachments []volumes.Attachment) []string { + nodeIDs := make([]string, len(attachments)) + for i, attachment := range attachments { + nodeIDs[i] = attachment.ServerID + } + return nodeIDs +} + +func genFakeVolumeEntry(fakeVol volumes.Volume) *csi.ListVolumesResponse_Entry { + return &csi.ListVolumesResponse_Entry{ + Volume: &csi.Volume{ + VolumeId: fakeVol.ID, + CapacityBytes: int64(fakeVol.Size * 1024 * 1024 * 1024), + }, + Status: &csi.ListVolumesResponse_VolumeStatus{ + PublishedNodeIds: extractFakeNodeIDs(fakeVol.Attachments), + }, + } +} +func genFakeVolumeEntries(fakeVolumes []volumes.Volume) []*csi.ListVolumesResponse_Entry { + var entries []*csi.ListVolumesResponse_Entry + for _, fakeVol := range fakeVolumes { + entries = append(entries, genFakeVolumeEntry(fakeVol)) + } + return entries +} + func TestListVolumes(t *testing.T) { osmock.On("ListVolumes", 2, FakeVolID).Return(FakeVolListMultiple, "", nil) @@ -423,26 +457,7 @@ func TestListVolumes(t *testing.T) { // Expected Result expectedRes := &csi.ListVolumesResponse{ - Entries: []*csi.ListVolumesResponse_Entry{ - { - Volume: &csi.Volume{ - VolumeId: FakeVol1.ID, - CapacityBytes: int64(FakeVol1.Size * 1024 * 1024 * 1024), - }, - Status: &csi.ListVolumesResponse_VolumeStatus{ - PublishedNodeIds: []string{FakeNodeID}, - }, - }, - { - Volume: &csi.Volume{ - VolumeId: FakeVol3.ID, - CapacityBytes: int64(FakeVol3.Size * 1024 * 1024 * 1024), - }, - Status: &csi.ListVolumesResponse_VolumeStatus{ - PublishedNodeIds: []string{}, - }, - }, - }, + Entries: genFakeVolumeEntries(FakeVolListMultiple), NextToken: "", } @@ -456,6 +471,849 @@ func TestListVolumes(t *testing.T) { assert.Equal(expectedRes, actualRes) } +type ListVolumeTestOSMock struct { + //name string + mockCloud *openstack.OpenStackMock + mockMaxEntries int + mockVolumes []volumes.Volume + mockToken string + mockTokenCall string +} +type ListVolumesTest struct { + volumeSet map[string]ListVolumeTestOSMock + maxEntries int + StartingToken string + Result ListVolumesTestResult +} +type ListVolumesTestResult struct { + ExpectedToken CloudsStartingToken + Entries []*csi.ListVolumesResponse_Entry +} + +func TestGlobalListVolumesMultipleClouds(t *testing.T) { + //osmock.On("ListVolumes", 2, "").Return([]volumes.Volume{FakeVol1}, "", nil) + //osmockRegionX.On("ListVolumes", 1, "").Return([]volumes.Volume{FakeVol2}, FakeVol2.ID, nil) + + tests := []*ListVolumesTest{ + { + // no pagination, no clouds has volumes + maxEntries: 0, + StartingToken: "", + volumeSet: map[string]ListVolumeTestOSMock{ + "": { + mockCloud: osmock, + mockMaxEntries: 0, + mockVolumes: []volumes.Volume{}, + mockToken: "", + mockTokenCall: "", + }, + "region-x": { + mockCloud: osmockRegionX, + mockMaxEntries: 0, + mockVolumes: []volumes.Volume{}, + mockToken: "", + mockTokenCall: "", + }, + }, + Result: ListVolumesTestResult{ + ExpectedToken: CloudsStartingToken{ + isEmpty: true, + }, + Entries: genFakeVolumeEntries([]volumes.Volume{}), + }, + }, + { + // no pagination, all clouds has volumes + maxEntries: 0, + StartingToken: "", + volumeSet: map[string]ListVolumeTestOSMock{ + "": { + mockCloud: osmock, + mockMaxEntries: 0, + mockVolumes: []volumes.Volume{ + {ID: "vol1"}, + {ID: "vol2"}, + {ID: "vol3"}, + {ID: "vol4"}, + }, + mockToken: "", + mockTokenCall: "", + }, + "region-x": { + mockCloud: osmockRegionX, + mockMaxEntries: 0, + mockVolumes: []volumes.Volume{ + {ID: "vol5"}, + {ID: "vol6"}, + {ID: "vol7"}, + }, + mockToken: "", + mockTokenCall: "", + }, + }, + Result: ListVolumesTestResult{ + ExpectedToken: CloudsStartingToken{ + isEmpty: true, + }, + Entries: genFakeVolumeEntries([]volumes.Volume{ + {ID: "vol1"}, + {ID: "vol2"}, + {ID: "vol3"}, + {ID: "vol4"}, + {ID: "vol5"}, + {ID: "vol6"}, + {ID: "vol7"}, + }), + }, + }, + { + // no pagination, only first cloud have volumes + maxEntries: 0, + StartingToken: "", + volumeSet: map[string]ListVolumeTestOSMock{ + "": { + mockCloud: osmock, + mockMaxEntries: 0, + mockVolumes: []volumes.Volume{ + {ID: "vol1"}, + {ID: "vol2"}, + {ID: "vol3"}, + {ID: "vol4"}, + }, + mockToken: "", + mockTokenCall: "", + }, + "region-x": { + mockCloud: osmockRegionX, + mockMaxEntries: 0, + mockVolumes: []volumes.Volume{}, + mockToken: "", + mockTokenCall: "", + }, + }, + Result: ListVolumesTestResult{ + ExpectedToken: CloudsStartingToken{ + isEmpty: true, + }, + Entries: genFakeVolumeEntries([]volumes.Volume{ + {ID: "vol1"}, + {ID: "vol2"}, + {ID: "vol3"}, + {ID: "vol4"}, + }), + }, + }, + { + // no pagination, first cloud without volumes + maxEntries: 0, + StartingToken: "", + volumeSet: map[string]ListVolumeTestOSMock{ + "": { + mockCloud: osmock, + mockMaxEntries: 0, + mockVolumes: []volumes.Volume{}, + mockToken: "", + mockTokenCall: "", + }, + "region-x": { + mockCloud: osmockRegionX, + mockMaxEntries: 0, + mockVolumes: []volumes.Volume{ + {ID: "vol1"}, + {ID: "vol2"}, + {ID: "vol3"}, + {ID: "vol4"}, + }, + mockToken: "", + mockTokenCall: "", + }, + }, + Result: ListVolumesTestResult{ + ExpectedToken: CloudsStartingToken{ + isEmpty: true, + }, + Entries: genFakeVolumeEntries([]volumes.Volume{ + {ID: "vol1"}, + {ID: "vol2"}, + {ID: "vol3"}, + {ID: "vol4"}, + }), + }, + }, + // PAGINATION + { + // no volmues + maxEntries: 2, + StartingToken: "", + volumeSet: map[string]ListVolumeTestOSMock{ + "": { + mockCloud: osmock, + mockMaxEntries: 2, + mockVolumes: []volumes.Volume{}, + mockToken: "", + mockTokenCall: "", + }, + "region-x": { + mockCloud: osmockRegionX, + mockMaxEntries: 2, + mockVolumes: []volumes.Volume{}, + mockToken: "", + mockTokenCall: "", + }, + }, + Result: ListVolumesTestResult{ + ExpectedToken: CloudsStartingToken{ + isEmpty: true, + }, + Entries: genFakeVolumeEntries([]volumes.Volume{}), + }, + }, + { + // cloud1: 1 volume, cloud2: 0 volume + maxEntries: 2, + StartingToken: "", + volumeSet: map[string]ListVolumeTestOSMock{ + "": { + mockCloud: osmock, + mockMaxEntries: 2, + mockVolumes: []volumes.Volume{ + {ID: "vol1"}, + }, + mockToken: "", + mockTokenCall: "", + }, + "region-x": { + mockCloud: osmockRegionX, + mockMaxEntries: 1, + mockVolumes: []volumes.Volume{}, + mockToken: "", + mockTokenCall: "", + }, + }, + Result: ListVolumesTestResult{ + ExpectedToken: CloudsStartingToken{ + isEmpty: true, + }, + Entries: genFakeVolumeEntries([]volumes.Volume{ + {ID: "vol1"}, + }), + }, + }, + { + // cloud1: 0 volume, cloud2: 1 volume + maxEntries: 2, + StartingToken: "", + volumeSet: map[string]ListVolumeTestOSMock{ + "": { + mockCloud: osmock, + mockMaxEntries: 2, + mockVolumes: []volumes.Volume{}, + mockToken: "", + mockTokenCall: "", + }, + "region-x": { + mockCloud: osmockRegionX, + mockMaxEntries: 2, + mockVolumes: []volumes.Volume{ + {ID: "vol1"}, + }, + mockToken: "", + mockTokenCall: "", + }, + }, + Result: ListVolumesTestResult{ + ExpectedToken: CloudsStartingToken{ + isEmpty: true, + }, + Entries: genFakeVolumeEntries([]volumes.Volume{ + {ID: "vol1"}, + }), + }, + }, + { + // cloud1: 2 volume, cloud2: 0 volume + maxEntries: 2, + StartingToken: "", + volumeSet: map[string]ListVolumeTestOSMock{ + "": { + mockCloud: osmock, + mockMaxEntries: 2, + mockVolumes: []volumes.Volume{ + {ID: "vol1"}, + {ID: "vol2"}, + }, + mockToken: "", + mockTokenCall: "", + }, + "region-x": { + mockCloud: osmockRegionX, + mockMaxEntries: 1, + mockVolumes: []volumes.Volume{}, + mockToken: "", + mockTokenCall: "", + }, + }, + Result: ListVolumesTestResult{ + ExpectedToken: CloudsStartingToken{ + isEmpty: true, + }, + Entries: genFakeVolumeEntries([]volumes.Volume{ + {ID: "vol1"}, + {ID: "vol2"}, + }), + }, + }, + { + // cloud1: 0 volume, cloud2: 2 volume + maxEntries: 2, + StartingToken: "", + volumeSet: map[string]ListVolumeTestOSMock{ + "": { + mockCloud: osmock, + mockMaxEntries: 2, + mockVolumes: []volumes.Volume{}, + mockToken: "", + mockTokenCall: "", + }, + "region-x": { + mockCloud: osmockRegionX, + mockMaxEntries: 2, + mockVolumes: []volumes.Volume{ + {ID: "vol1"}, + {ID: "vol2"}, + }, + mockToken: "", + mockTokenCall: "", + }, + }, + Result: ListVolumesTestResult{ + ExpectedToken: CloudsStartingToken{ + isEmpty: true, + }, + Entries: genFakeVolumeEntries([]volumes.Volume{ + {ID: "vol1"}, + {ID: "vol2"}, + }), + }, + }, + { + // cloud1: 2 volume, cloud2: 1 volume : 1st call + maxEntries: 2, + StartingToken: "", + volumeSet: map[string]ListVolumeTestOSMock{ + "": { + mockCloud: osmock, + mockMaxEntries: 2, + mockVolumes: []volumes.Volume{ + {ID: "vol1"}, + {ID: "vol2"}, + }, + mockToken: "", + mockTokenCall: "", + }, + "region-x": { + mockCloud: osmockRegionX, + mockMaxEntries: 1, + mockVolumes: []volumes.Volume{ + {ID: "vol1"}, + }, + mockToken: "", + mockTokenCall: "", + }, + }, + Result: ListVolumesTestResult{ + ExpectedToken: CloudsStartingToken{ + CloudName: "", + Token: "", + isEmpty: false, + }, + Entries: genFakeVolumeEntries([]volumes.Volume{ + {ID: "vol1"}, + {ID: "vol2"}, + }), + }, + }, + { + // cloud1: 2 volume, cloud2: 1 volume : 2nd call + maxEntries: 2, + StartingToken: "{\"cloud\":\"\",\"token\":\"\"}", + volumeSet: map[string]ListVolumeTestOSMock{ + "": { + mockCloud: osmock, + mockMaxEntries: 1234, + mockVolumes: []volumes.Volume{ + {ID: "vol1"}, + {ID: "vol2"}, + }, + mockToken: "", + mockTokenCall: "", + }, + "region-x": { + mockCloud: osmockRegionX, + mockMaxEntries: 2, + mockVolumes: []volumes.Volume{ + {ID: "vol3"}, + }, + mockToken: "", + mockTokenCall: "", + }, + }, + Result: ListVolumesTestResult{ + ExpectedToken: CloudsStartingToken{ + isEmpty: true, + }, + Entries: genFakeVolumeEntries([]volumes.Volume{ + {ID: "vol3"}, + }), + }, + }, + { + // cloud1: 1 volume, cloud2: 2 volume : 1st call + maxEntries: 2, + StartingToken: "", + volumeSet: map[string]ListVolumeTestOSMock{ + "": { + mockCloud: osmock, + mockMaxEntries: 2, + mockVolumes: []volumes.Volume{ + {ID: "vol1"}, + }, + mockToken: "", + mockTokenCall: "", + }, + "region-x": { + mockCloud: osmockRegionX, + mockMaxEntries: 1, + mockVolumes: []volumes.Volume{ + {ID: "vol2"}, + }, + mockToken: "vol2", + mockTokenCall: "", + }, + }, + Result: ListVolumesTestResult{ + ExpectedToken: CloudsStartingToken{ + CloudName: "region-x", + Token: "vol2", + isEmpty: false, + }, + Entries: genFakeVolumeEntries([]volumes.Volume{ + {ID: "vol1"}, + {ID: "vol2"}, + }), + }, + }, + { + // cloud1: 1 volume, cloud2: 2 volume : 2nd call + maxEntries: 2, + StartingToken: "{\"cloud\":\"region-x\",\"token\":\"vol2\"}", + volumeSet: map[string]ListVolumeTestOSMock{ + "": { + mockCloud: osmock, + mockMaxEntries: 1234, + mockVolumes: []volumes.Volume{ + {ID: "vol1"}, + }, + mockToken: "", + mockTokenCall: "", + }, + "region-x": { + mockCloud: osmockRegionX, + mockMaxEntries: 2, + mockVolumes: []volumes.Volume{ + {ID: "vol3"}, + }, + mockToken: "", + mockTokenCall: "vol2", + }, + }, + Result: ListVolumesTestResult{ + ExpectedToken: CloudsStartingToken{ + isEmpty: true, + }, + Entries: genFakeVolumeEntries([]volumes.Volume{ + {ID: "vol3"}, + }), + }, + }, + { + // cloud1: 2 volume, cloud2: 2 volume : 1st call + maxEntries: 2, + StartingToken: "", + volumeSet: map[string]ListVolumeTestOSMock{ + "": { + mockCloud: osmock, + mockMaxEntries: 2, + mockVolumes: []volumes.Volume{ + {ID: "vol1"}, + {ID: "vol2"}, + }, + mockToken: "", + mockTokenCall: "", + }, + "region-x": { + mockCloud: osmockRegionX, + mockMaxEntries: 1, + mockVolumes: []volumes.Volume{ + {ID: "vol3"}, + }, + mockToken: "vol3", + mockTokenCall: "", + }, + }, + Result: ListVolumesTestResult{ + ExpectedToken: CloudsStartingToken{ + CloudName: "", + Token: "", + isEmpty: false, + }, + Entries: genFakeVolumeEntries([]volumes.Volume{ + {ID: "vol1"}, + {ID: "vol2"}, + }), + }, + }, + { + // cloud1: 2 volume, cloud2: 2 volume : 2nd call + maxEntries: 2, + StartingToken: "{\"cloud\":\"\",\"token\":\"\"}", + volumeSet: map[string]ListVolumeTestOSMock{ + "": { + mockCloud: osmock, + mockMaxEntries: 1234, + mockVolumes: []volumes.Volume{ + {ID: "vol1"}, + {ID: "vol2"}, + }, + mockToken: "", + mockTokenCall: "", + }, + "region-x": { + mockCloud: osmockRegionX, + mockMaxEntries: 2, + mockVolumes: []volumes.Volume{ + {ID: "vol3"}, + {ID: "vol4"}, + }, + mockToken: "", + mockTokenCall: "", + }, + }, + Result: ListVolumesTestResult{ + ExpectedToken: CloudsStartingToken{ + CloudName: "region-x", + Token: "", + isEmpty: true, + }, + Entries: genFakeVolumeEntries([]volumes.Volume{ + {ID: "vol3"}, + {ID: "vol4"}, + }), + }, + }, + { + // cloud1: 3 volume, cloud2: 2 volume : 1st call + maxEntries: 2, + StartingToken: "", + volumeSet: map[string]ListVolumeTestOSMock{ + "": { + mockCloud: osmock, + mockMaxEntries: 2, + mockVolumes: []volumes.Volume{ + {ID: "vol1"}, + {ID: "vol2"}, + }, + mockToken: "vol2", + mockTokenCall: "", + }, + "region-x": { + mockCloud: osmockRegionX, + mockMaxEntries: 1234, + mockVolumes: []volumes.Volume{ + {ID: "vol3"}, + {ID: "vol4"}, + }, + mockToken: "", + mockTokenCall: "", + }, + }, + Result: ListVolumesTestResult{ + ExpectedToken: CloudsStartingToken{ + CloudName: "", + Token: "vol2", + isEmpty: false, + }, + Entries: genFakeVolumeEntries([]volumes.Volume{ + {ID: "vol1"}, + {ID: "vol2"}, + }), + }, + }, + { + // cloud1: 3 volume, cloud2: 2 volume : 2nd call + maxEntries: 2, + StartingToken: "{\"cloud\":\"\",\"token\":\"vol2\"}", + volumeSet: map[string]ListVolumeTestOSMock{ + "": { + mockCloud: osmock, + mockMaxEntries: 2, + mockVolumes: []volumes.Volume{ + {ID: "vol3"}, + }, + mockToken: "", + mockTokenCall: "vol2", + }, + "region-x": { + mockCloud: osmockRegionX, + mockMaxEntries: 1, + mockVolumes: []volumes.Volume{ + {ID: "vol4"}, + }, + mockToken: "vol4", + mockTokenCall: "", + }, + }, + Result: ListVolumesTestResult{ + ExpectedToken: CloudsStartingToken{ + CloudName: "region-x", + Token: "vol4", + isEmpty: false, + }, + Entries: genFakeVolumeEntries([]volumes.Volume{ + {ID: "vol3"}, + {ID: "vol4"}, + }), + }, + }, + { + // cloud1: 3 volume, cloud2: 2 volume : 3rd call + maxEntries: 2, + StartingToken: "{\"cloud\":\"region-x\",\"token\":\"vol4\"}", + volumeSet: map[string]ListVolumeTestOSMock{ + "": { + mockCloud: osmock, + mockMaxEntries: 1234, + mockVolumes: []volumes.Volume{ + {ID: "vol3"}, + }, + mockToken: "", + mockTokenCall: "", + }, + "region-x": { + mockCloud: osmockRegionX, + mockMaxEntries: 2, + mockVolumes: []volumes.Volume{ + {ID: "vol5"}, + }, + mockToken: "", + mockTokenCall: "vol4", + }, + }, + Result: ListVolumesTestResult{ + ExpectedToken: CloudsStartingToken{ + CloudName: "region-x", + Token: "", + isEmpty: true, + }, + Entries: genFakeVolumeEntries([]volumes.Volume{ + {ID: "vol5"}, + }), + }, + }, + { + // cloud1: 2 volume, cloud2: 3 volume : 1st call + maxEntries: 2, + StartingToken: "", + volumeSet: map[string]ListVolumeTestOSMock{ + "": { + mockCloud: osmock, + mockMaxEntries: 2, + mockVolumes: []volumes.Volume{ + {ID: "vol1"}, + {ID: "vol2"}, + }, + mockToken: "", + mockTokenCall: "", + }, + "region-x": { + mockCloud: osmockRegionX, + mockMaxEntries: 1, + mockVolumes: []volumes.Volume{ + {ID: "vol3"}, + }, + mockToken: "vol3", + mockTokenCall: "", + }, + }, + Result: ListVolumesTestResult{ + ExpectedToken: CloudsStartingToken{ + CloudName: "", + Token: "", + isEmpty: false, + }, + Entries: genFakeVolumeEntries([]volumes.Volume{ + {ID: "vol1"}, + {ID: "vol2"}, + }), + }, + }, + { + // cloud1: 3 volume, cloud2: 2 volume : 2nd call + maxEntries: 2, + StartingToken: "{\"cloud\":\"\",\"token\":\"\"}", + volumeSet: map[string]ListVolumeTestOSMock{ + "": { + mockCloud: osmock, + mockMaxEntries: 1234, + mockVolumes: []volumes.Volume{ + {ID: "vol3"}, + }, + mockToken: "", + mockTokenCall: "", + }, + "region-x": { + mockCloud: osmockRegionX, + mockMaxEntries: 2, + mockVolumes: []volumes.Volume{ + {ID: "vol3"}, + {ID: "vol4"}, + }, + mockToken: "vol4", + mockTokenCall: "", + }, + }, + Result: ListVolumesTestResult{ + ExpectedToken: CloudsStartingToken{ + CloudName: "region-x", + Token: "vol4", + isEmpty: false, + }, + Entries: genFakeVolumeEntries([]volumes.Volume{ + {ID: "vol3"}, + {ID: "vol4"}, + }), + }, + }, + { + // cloud1: 2 volume, cloud2: 3 volume : 3rd call + maxEntries: 2, + StartingToken: "{\"cloud\":\"region-x\",\"token\":\"vol4\"}", + volumeSet: map[string]ListVolumeTestOSMock{ + "": { + mockCloud: osmock, + mockMaxEntries: 1234, + mockVolumes: []volumes.Volume{ + {ID: "vol3"}, + }, + mockToken: "", + mockTokenCall: "", + }, + "region-x": { + mockCloud: osmockRegionX, + mockMaxEntries: 2, + mockVolumes: []volumes.Volume{ + {ID: "vol5"}, + }, + mockToken: "", + mockTokenCall: "vol4", + }, + }, + Result: ListVolumesTestResult{ + ExpectedToken: CloudsStartingToken{ + CloudName: "region-x", + Token: "", + isEmpty: true, + }, + Entries: genFakeVolumeEntries([]volumes.Volume{ + {ID: "vol5"}, + }), + }, + }, + { + // cloud1: 3 volume, cloud2: 1 volume : 2rd call + maxEntries: 2, + StartingToken: "{\"cloud\":\"\",\"token\":\"vol2\"}", + volumeSet: map[string]ListVolumeTestOSMock{ + "": { + mockCloud: osmock, + mockMaxEntries: 2, + mockVolumes: []volumes.Volume{ + {ID: "vol3"}, + }, + mockToken: "", + mockTokenCall: "vol2", + }, + "region-x": { + mockCloud: osmockRegionX, + mockMaxEntries: 1, + mockVolumes: []volumes.Volume{ + {ID: "vol4"}, + }, + mockToken: "", + mockTokenCall: "", + }, + }, + Result: ListVolumesTestResult{ + ExpectedToken: CloudsStartingToken{ + isEmpty: true, + }, + Entries: genFakeVolumeEntries([]volumes.Volume{ + {ID: "vol3"}, + {ID: "vol4"}, + }), + }, + }, + } + + // Init assert + assert := assert.New(t) + for _, test := range tests { + // Setup Mock + for _, volumeSet := range test.volumeSet { + cloud := volumeSet.mockCloud + cloud.On( + "ListVolumes", + volumeSet.mockMaxEntries, + volumeSet.mockTokenCall, + ).Return( + volumeSet.mockVolumes, + volumeSet.mockToken, + nil, + ).Once() + } + fakeReq := &csi.ListVolumesRequest{MaxEntries: int32(test.maxEntries), StartingToken: test.StartingToken} + expectedToken, _ := json.Marshal(test.Result.ExpectedToken) + if test.Result.ExpectedToken.isEmpty { + expectedToken = []byte("") + } + expectedRes := &csi.ListVolumesResponse{ + Entries: test.Result.Entries, + NextToken: string(expectedToken), + } + // Invoke ListVolumes + actualRes, err := fakeCsMultipleClouds.ListVolumes(FakeCtx, fakeReq) + if err != nil { + t.Errorf("failed to ListVolumes: %v", err) + } + // Assert + assert.Equal(expectedRes, actualRes) + + // Unset Mock + for _, volumeSet := range test.volumeSet { + cloud := volumeSet.mockCloud + cloud.On( + "ListVolumes", + volumeSet.mockMaxEntries, + volumeSet.mockTokenCall, + ).Return( + volumeSet.mockVolumes, + volumeSet.mockToken, + nil, + ).Unset() + } + } +} + // Test CreateSnapshot func TestCreateSnapshot(t *testing.T) {