diff --git a/cmd/runhcs/create-scratch.go b/cmd/runhcs/create-scratch.go index 78bb614171..2572186856 100644 --- a/cmd/runhcs/create-scratch.go +++ b/cmd/runhcs/create-scratch.go @@ -52,6 +52,9 @@ var createScratchCommand = cli.Command{ // 256MB with boot from vhd supported. opts.MemorySizeInMB = 256 opts.VPMemDeviceCount = 1 + // Default SCSI controller count is 4, we don't need that for this UVM, + // bring it back to 1 to avoid any confusion with SCSI controller numbers. + opts.SCSIControllerCount = 1 sizeGB := uint32(context.Uint("sizeGB")) if sizeGB == 0 { diff --git a/cmd/runhcs/prepare-disk.go b/cmd/runhcs/prepare-disk.go index 6349e02911..6d0a437af6 100644 --- a/cmd/runhcs/prepare-disk.go +++ b/cmd/runhcs/prepare-disk.go @@ -44,6 +44,9 @@ var prepareDiskCommand = cli.Command{ } opts := uvm.NewDefaultOptionsLCOW("preparedisk-uvm", context.GlobalString("owner")) + // Default SCSI controller count is 4, we don't need that for this UVM, + // bring it back to 1 to avoid any confusion with SCSI controller numbers. + opts.SCSIControllerCount = 1 preparediskUVM, err := uvm.CreateLCOW(ctx, opts) if err != nil { diff --git a/internal/guest/storage/scsi/scsi.go b/internal/guest/storage/scsi/scsi.go index f5231dfb9d..7af2db325d 100644 --- a/internal/guest/storage/scsi/scsi.go +++ b/internal/guest/storage/scsi/scsi.go @@ -8,7 +8,10 @@ import ( "fmt" "io/ioutil" "os" + "path" "path/filepath" + "strconv" + "strings" "time" "github.com/pkg/errors" @@ -20,6 +23,7 @@ import ( dm "github.com/Microsoft/hcsshim/internal/guest/storage/devicemapper" "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/oc" + "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" "github.com/Microsoft/hcsshim/internal/protocol/guestresource" "github.com/Microsoft/hcsshim/pkg/securitypolicy" ) @@ -39,11 +43,45 @@ var ( ) const ( - scsiDevicesPath = "/sys/bus/scsi/devices" - verityDeviceFmt = "verity-scsi-contr%d-lun%d-%s" + scsiDevicesPath = "/sys/bus/scsi/devices" + vmbusDevicesPath = "/sys/bus/vmbus/devices" + verityDeviceFmt = "verity-scsi-contr%d-lun%d-%s" ) -// Mount creates a mount from the SCSI device on `controller` index `lun` to +// fetchActualControllerNumber retrieves the actual controller number assigned to a SCSI controller +// with number `passedController`. +// When HCS creates the UVM it adds 4 SCSI controllers to the UVM but the 1st SCSI +// controller according to HCS can actually show up as 2nd, 3rd or 4th controller inside +// the UVM. So the i'th controller from HCS' perspective could actually be j'th controller +// inside the UVM. However, we can refer to the SCSI controllers with their GUIDs (that +// are hardcoded) and then using that GUID find out the SCSI controller number inside the +// guest. This function does exactly that. +func fetchActualControllerNumber(ctx context.Context, passedController uint8) (uint8, error) { + // find the controller number by looking for a file named host (e.g host1, host3 etc.) + // `N` is the controller number. + // Full file path would be /sys/bus/vmbus/devices//host. + controllerDirPath := path.Join(vmbusDevicesPath, guestrequest.ScsiControllerGuids[passedController]) + entries, err := ioutil.ReadDir(controllerDirPath) + if err != nil { + return 0, err + } + + for _, entry := range entries { + baseName := path.Base(entry.Name()) + if !strings.HasPrefix(baseName, "host") { + continue + } + controllerStr := baseName[len("host"):] + controllerNum, err := strconv.ParseUint(controllerStr, 10, 8) + if err != nil { + return 0, fmt.Errorf("failed to parse controller number from %s: %w", baseName, err) + } + return uint8(controllerNum), nil + } + return 0, fmt.Errorf("host directory not found inside %s", controllerDirPath) +} + +// mount creates a mount from the SCSI device on `controller` index `lun` to // `target` // // `target` will be created. On mount failure the created `target` will be @@ -51,7 +89,7 @@ const ( // // If `encrypted` is set to true, the SCSI device will be encrypted using // dm-crypt. -func Mount( +func mount( ctx context.Context, controller, lun uint8, @@ -159,10 +197,30 @@ func Mount( return nil } -// Unmount unmounts a SCSI device mounted at `target`. +// Mount is just a wrapper over actual mount call. This wrapper finds out the controller +// number from the controller GUID string and calls mount. +func Mount( + ctx context.Context, + controller, + lun uint8, + target string, + readonly bool, + encrypted bool, + options []string, + verityInfo *guestresource.DeviceVerityInfo, + securityPolicy securitypolicy.SecurityPolicyEnforcer, +) (err error) { + cNum, err := fetchActualControllerNumber(ctx, controller) + if err != nil { + return err + } + return mount(ctx, cNum, lun, target, readonly, encrypted, options, verityInfo, securityPolicy) +} + +// unmount unmounts a SCSI device mounted at `target`. // // If `encrypted` is true, it removes all its associated dm-crypto state. -func Unmount( +func unmount( ctx context.Context, controller, lun uint8, @@ -206,6 +264,24 @@ func Unmount( return nil } +// Unmount is just a wrapper over actual unmount call. This wrapper finds out the controller +// number from the controller GUID string and calls mount. +func Unmount( + ctx context.Context, + controller, + lun uint8, + target string, + encrypted bool, + verityInfo *guestresource.DeviceVerityInfo, + securityPolicy securitypolicy.SecurityPolicyEnforcer, +) (err error) { + cNum, err := fetchActualControllerNumber(ctx, controller) + if err != nil { + return err + } + return unmount(ctx, cNum, lun, target, encrypted, verityInfo, securityPolicy) +} + // ControllerLunToName finds the `/dev/sd*` path to the SCSI device on // `controller` index `lun`. func ControllerLunToName(ctx context.Context, controller, lun uint8) (_ string, err error) { @@ -217,8 +293,7 @@ func ControllerLunToName(ctx context.Context, controller, lun uint8) (_ string, trace.Int64Attribute("controller", int64(controller)), trace.Int64Attribute("lun", int64(lun))) - scsiID := fmt.Sprintf("0:0:%d:%d", controller, lun) - + scsiID := fmt.Sprintf("%d:0:0:%d", controller, lun) // Devices matching the given SCSI code should each have a subdirectory // under /sys/bus/scsi/devices//block. blockPath := filepath.Join(scsiDevicesPath, scsiID, "block") @@ -249,11 +324,11 @@ func ControllerLunToName(ctx context.Context, controller, lun uint8) (_ string, return devicePath, nil } -// UnplugDevice finds the SCSI device on `controller` index `lun` and issues a +// unplugDevice finds the SCSI device on `controller` index `lun` and issues a // guest initiated unplug. // // If the device is not attached returns no error. -func UnplugDevice(ctx context.Context, controller, lun uint8) (err error) { +func unplugDevice(ctx context.Context, controller, lun uint8) (err error) { _, span := trace.StartSpan(ctx, "scsi::UnplugDevice") defer span.End() defer func() { oc.SetSpanStatus(span, err) }() @@ -262,7 +337,7 @@ func UnplugDevice(ctx context.Context, controller, lun uint8) (err error) { trace.Int64Attribute("controller", int64(controller)), trace.Int64Attribute("lun", int64(lun))) - scsiID := fmt.Sprintf("0:0:%d:%d", controller, lun) + scsiID := fmt.Sprintf("%d:0:0:%d", controller, lun) f, err := os.OpenFile(filepath.Join(scsiDevicesPath, scsiID, "delete"), os.O_WRONLY, 0644) if err != nil { if os.IsNotExist(err) { @@ -277,3 +352,13 @@ func UnplugDevice(ctx context.Context, controller, lun uint8) (err error) { } return nil } + +// UnplugDevice is just a wrapper over actual unplugDevice call. This wrapper finds out the controller +// number from the controller GUID string and calls unplugDevice. +func UnplugDevice(ctx context.Context, controller, lun uint8) (err error) { + cNum, err := fetchActualControllerNumber(ctx, controller) + if err != nil { + return err + } + return unplugDevice(ctx, cNum, lun) +} diff --git a/internal/guest/storage/scsi/scsi_test.go b/internal/guest/storage/scsi/scsi_test.go index 4ac33ebcb4..c4cb2f55ae 100644 --- a/internal/guest/storage/scsi/scsi_test.go +++ b/internal/guest/storage/scsi/scsi_test.go @@ -37,7 +37,7 @@ func Test_Mount_Mkdir_Fails_Error(t *testing.T) { return "", nil } - if err := Mount( + if err := mount( context.Background(), 0, 0, @@ -74,7 +74,7 @@ func Test_Mount_Mkdir_ExpectedPath(t *testing.T) { return nil } - if err := Mount( + if err := mount( context.Background(), 0, 0, @@ -111,7 +111,7 @@ func Test_Mount_Mkdir_ExpectedPerm(t *testing.T) { return nil } - if err := Mount( + if err := mount( context.Background(), 0, 0, @@ -148,7 +148,7 @@ func Test_Mount_ControllerLunToName_Valid_Controller(t *testing.T) { return nil } - if err := Mount( + if err := mount( context.Background(), expectedController, 0, @@ -185,7 +185,7 @@ func Test_Mount_ControllerLunToName_Valid_Lun(t *testing.T) { return nil } - if err := Mount( + if err := mount( context.Background(), 0, expectedLun, @@ -225,7 +225,7 @@ func Test_Mount_Calls_RemoveAll_OnMountFailure(t *testing.T) { return expectedErr } - if err := Mount( + if err := mount( context.Background(), 0, 0, @@ -263,7 +263,7 @@ func Test_Mount_Valid_Source(t *testing.T) { } return nil } - err := Mount(context.Background(), 0, 0, "/fake/path", false, false, nil, nil, openDoorSecurityPolicyEnforcer()) + err := mount(context.Background(), 0, 0, "/fake/path", false, false, nil, nil, openDoorSecurityPolicyEnforcer()) if err != nil { t.Fatalf("expected nil err, got: %v", err) } @@ -290,7 +290,7 @@ func Test_Mount_Valid_Target(t *testing.T) { return nil } - if err := Mount( + if err := mount( context.Background(), 0, 0, @@ -326,7 +326,7 @@ func Test_Mount_Valid_FSType(t *testing.T) { return nil } - if err := Mount( + if err := mount( context.Background(), 0, 0, @@ -362,7 +362,7 @@ func Test_Mount_Valid_Flags(t *testing.T) { return nil } - if err := Mount( + if err := mount( context.Background(), 0, 0, @@ -398,7 +398,7 @@ func Test_Mount_Readonly_Valid_Flags(t *testing.T) { return nil } - if err := Mount( + if err := mount( context.Background(), 0, 0, @@ -433,7 +433,7 @@ func Test_Mount_Valid_Data(t *testing.T) { return nil } - if err := Mount( + if err := mount( context.Background(), 0, 0, @@ -469,7 +469,7 @@ func Test_Mount_Readonly_Valid_Data(t *testing.T) { return nil } - if err := Mount( + if err := mount( context.Background(), 0, 0, @@ -506,7 +506,7 @@ func Test_Read_Only_Security_Policy_Enforcement_Mount_Calls(t *testing.T) { } enforcer := mountMonitoringSecurityPolicyEnforcer() - err := Mount(context.Background(), 0, 0, target, true, false, nil, nil, enforcer) + err := mount(context.Background(), 0, 0, target, true, false, nil, nil, enforcer) if err != nil { t.Fatalf("expected nil err, got: %v", err) } @@ -549,7 +549,7 @@ func Test_Read_Write_Security_Policy_Enforcement_Mount_Calls(t *testing.T) { } enforcer := mountMonitoringSecurityPolicyEnforcer() - err := Mount(context.Background(), 0, 0, target, false, false, nil, nil, enforcer) + err := mount(context.Background(), 0, 0, target, false, false, nil, nil, enforcer) if err != nil { t.Fatalf("expected nil err, got: %v", err) } @@ -592,12 +592,12 @@ func Test_Security_Policy_Enforcement_Unmount_Calls(t *testing.T) { } enforcer := mountMonitoringSecurityPolicyEnforcer() - err := Mount(context.Background(), 0, 0, target, true, false, nil, nil, enforcer) + err := mount(context.Background(), 0, 0, target, true, false, nil, nil, enforcer) if err != nil { t.Fatalf("expected nil err, got: %v", err) } - err = Unmount(context.Background(), 0, 0, target, false, nil, enforcer) + err = unmount(context.Background(), 0, 0, target, false, nil, enforcer) if err != nil { t.Fatalf("expected nil err, got: %v", err) } @@ -668,7 +668,7 @@ func Test_CreateVerityTarget_And_Mount_Called_With_Correct_Parameters(t *testing return nil } - if err := Mount( + if err := mount( context.Background(), 0, 0, @@ -717,7 +717,7 @@ func Test_osMkdirAllFails_And_RemoveDevice_Called(t *testing.T) { return nil } - if err := Mount( + if err := mount( context.Background(), 0, 0, diff --git a/internal/protocol/guestrequest/types.go b/internal/protocol/guestrequest/types.go index 5c3d7111d4..d8d0c20b10 100644 --- a/internal/protocol/guestrequest/types.go +++ b/internal/protocol/guestrequest/types.go @@ -41,3 +41,16 @@ type RS4NetworkModifyRequest struct { RequestType RequestType `json:"RequestType,omitempty"` Settings interface{} `json:"Settings,omitempty"` } + +var ( + // V5 GUIDs for SCSI controllers + // These GUIDs are created with namespace GUID "d422512d-2bf2-4752-809d-7b82b5fcb1b4" + // and index as names. For example, first GUID is created like this: + // guid.NewV5("d422512d-2bf2-4752-809d-7b82b5fcb1b4", []byte("0")) + ScsiControllerGuids = []string{ + "df6d0690-79e5-55b6-a5ec-c1e2f77f580a", + "0110f83b-de10-5172-a266-78bca56bf50a", + "b5d2d8d4-3a75-51bf-945b-3444dc6b8579", + "305891a9-b251-5dfe-91a2-c25d9212275b", + } +) diff --git a/internal/protocol/guestrequest/types_test.go b/internal/protocol/guestrequest/types_test.go new file mode 100644 index 0000000000..52b5807c48 --- /dev/null +++ b/internal/protocol/guestrequest/types_test.go @@ -0,0 +1,15 @@ +package guestrequest + +import ( + "github.com/Microsoft/go-winio/pkg/guid" + "testing" +) + +func TestGuidValidity(t *testing.T) { + for _, g := range ScsiControllerGuids { + _, err := guid.FromString(g) + if err != nil { + t.Fatalf("GUID parsing failed: %s", err) + } + } +} diff --git a/internal/uvm/constants.go b/internal/uvm/constants.go index 842a8a5e8c..9be9fc4043 100644 --- a/internal/uvm/constants.go +++ b/internal/uvm/constants.go @@ -2,6 +2,8 @@ package uvm import ( "errors" + + "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" ) const ( @@ -34,4 +36,7 @@ const ( var ( errNotSupported = errors.New("not supported") errBadUVMOpts = errors.New("UVM options incorrect") + + // Maximum number of SCSI controllers allowed + MaxSCSIControllers = uint32(len(guestrequest.ScsiControllerGuids)) ) diff --git a/internal/uvm/create.go b/internal/uvm/create.go index a3d9fcd4e6..1a08bae535 100644 --- a/internal/uvm/create.go +++ b/internal/uvm/create.go @@ -94,6 +94,9 @@ type Options struct { // NoWritableFileShares disables adding any writable vSMB and Plan9 shares to the UVM NoWritableFileShares bool + + // The number of SCSI controllers. Defaults to 1 for WCOW and 4 for LCOW + SCSIControllerCount uint32 } // compares the create opts used during template creation with the create opts @@ -131,8 +134,8 @@ func verifyOptions(ctx context.Context, options interface{}) error { if opts.EnableDeferredCommit && !opts.AllowOvercommit { return errors.New("EnableDeferredCommit is not supported on physically backed VMs") } - if opts.SCSIControllerCount > 1 { - return errors.New("SCSI controller count must be 0 or 1") // Future extension here for up to 4 + if opts.SCSIControllerCount > MaxSCSIControllers { + return fmt.Errorf("SCSI controller count can't be more than %d", MaxSCSIControllers) } if opts.VPMemDeviceCount > MaxVPMEMCount { return fmt.Errorf("VPMem device count cannot be greater than %d", MaxVPMEMCount) @@ -141,10 +144,6 @@ func verifyOptions(ctx context.Context, options interface{}) error { if opts.VPMemSizeBytes%4096 != 0 { return errors.New("VPMemSizeBytes must be a multiple of 4096") } - } else { - if opts.PreferredRootFSType == PreferredRootFSTypeVHD { - return errors.New("PreferredRootFSTypeVHD requires at least one VPMem device") - } } if opts.KernelDirect && osversion.Build() < 18286 { return errors.New("KernelDirectBoot is not supported on builds older than 18286") @@ -160,6 +159,9 @@ func verifyOptions(ctx context.Context, options interface{}) error { if len(opts.LayerFolders) < 2 { return errors.New("at least 2 LayerFolders must be supplied") } + if opts.SCSIControllerCount != 1 { + return errors.New("exactly 1 SCSI controller is required for WCOW") + } if opts.IsClone && !verifyCloneUvmCreateOpts(&opts.TemplateConfig.CreateOpts, opts) { return errors.New("clone configuration doesn't match with template configuration") } @@ -188,6 +190,7 @@ func newDefaultOptions(id, owner string) *Options { ProcessorCount: defaultProcessorCount(), FullyPhysicallyBacked: false, NoWritableFileShares: false, + SCSIControllerCount: 1, } if opts.Owner == "" { diff --git a/internal/uvm/create_lcow.go b/internal/uvm/create_lcow.go index 1211f78ceb..ee42371826 100644 --- a/internal/uvm/create_lcow.go +++ b/internal/uvm/create_lcow.go @@ -23,6 +23,7 @@ import ( "github.com/Microsoft/hcsshim/internal/logfields" "github.com/Microsoft/hcsshim/internal/oc" "github.com/Microsoft/hcsshim/internal/processorinfo" + "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" "github.com/Microsoft/hcsshim/internal/schemaversion" "github.com/Microsoft/hcsshim/osversion" ) @@ -86,7 +87,6 @@ type OptionsLCOW struct { KernelBootOptions string // Additional boot options for the kernel EnableGraphicsConsole bool // If true, enable a graphics console for the utility VM ConsolePipe string // The named pipe path to use for the serial console. eg \\.\pipe\vmpipe - SCSIControllerCount uint32 // The number of SCSI controllers. Defaults to 1. Currently we only support 0 or 1. UseGuestConnection bool // Whether the HCS should connect to the UVM's GCS. Defaults to true ExecCommandLine string // The command line to exec from init. Defaults to GCS ForwardStdout bool // Whether stdout will be forwarded from the executed program. Defaults to false @@ -137,7 +137,6 @@ func NewDefaultOptionsLCOW(id, owner string) *OptionsLCOW { KernelBootOptions: "", EnableGraphicsConsole: false, ConsolePipe: "", - SCSIControllerCount: 1, UseGuestConnection: true, ExecCommandLine: fmt.Sprintf("/bin/gcs -v4 -log-format json -loglevel %s", logrus.StandardLogger().Level.String()), ForwardStdout: false, @@ -352,11 +351,11 @@ func makeLCOWVMGSDoc(ctx context.Context, opts *OptionsLCOW, uvm *UtilityVM) (_ } if uvm.scsiControllerCount > 0 { - // TODO: JTERRY75 - this should enumerate scsicount and add an entry per value. - doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{ - "0": { + doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{} + for i := 0; i < int(uvm.scsiControllerCount); i++ { + doc.VirtualMachine.Devices.Scsi[guestrequest.ScsiControllerGuids[i]] = hcsschema.Scsi{ Attachments: make(map[string]hcsschema.Attachment), - }, + } } } @@ -537,13 +536,14 @@ func makeLCOWDoc(ctx context.Context, opts *OptionsLCOW, uvm *UtilityVM) (_ *hcs } if uvm.scsiControllerCount > 0 { - // TODO: JTERRY75 - this should enumerate scsicount and add an entry per value. - doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{ - "0": { + doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{} + for i := 0; i < int(uvm.scsiControllerCount); i++ { + doc.VirtualMachine.Devices.Scsi[guestrequest.ScsiControllerGuids[i]] = hcsschema.Scsi{ Attachments: make(map[string]hcsschema.Attachment), - }, + } } } + if uvm.vpmemMaxCount > 0 { doc.VirtualMachine.Devices.VirtualPMem = &hcsschema.VirtualPMemController{ MaximumCount: uvm.vpmemMaxCount, @@ -558,48 +558,59 @@ func makeLCOWDoc(ctx context.Context, opts *OptionsLCOW, uvm *UtilityVM) (_ *hcs kernelArgs = "initrd=/" + opts.RootFSFile } case PreferredRootFSTypeVHD: - // Support for VPMem VHD(X) booting rather than initrd.. - kernelArgs = "root=/dev/pmem0 ro rootwait init=/init" - imageFormat := "Vhd1" - if strings.ToLower(filepath.Ext(opts.RootFSFile)) == "vhdx" { - imageFormat = "Vhdx" - } - doc.VirtualMachine.Devices.VirtualPMem.Devices = map[string]hcsschema.VirtualPMemDevice{ - "0": { - HostPath: rootfsFullPath, - ReadOnly: true, - ImageFormat: imageFormat, - }, - } - if uvm.vpmemMultiMapping { - pmem := newPackedVPMemDevice() - pmem.maxMappedDeviceCount = 1 - - st, err := os.Stat(rootfsFullPath) - if err != nil { - return nil, errors.Wrapf(err, "failed to stat rootfs: %q", rootfsFullPath) + if uvm.vpmemMaxCount > 0 { + // Support for VPMem VHD(X) booting rather than initrd.. + kernelArgs = "root=/dev/pmem0 ro rootwait init=/init" + imageFormat := "Vhd1" + if strings.ToLower(filepath.Ext(opts.RootFSFile)) == "vhdx" { + imageFormat = "Vhdx" } - devSize := pageAlign(uint64(st.Size())) - memReg, err := pmem.Allocate(devSize) - if err != nil { - return nil, errors.Wrap(err, "failed to allocate memory for rootfs") + doc.VirtualMachine.Devices.VirtualPMem.Devices = map[string]hcsschema.VirtualPMemDevice{ + "0": { + HostPath: rootfsFullPath, + ReadOnly: true, + ImageFormat: imageFormat, + }, } - defer func() { + if uvm.vpmemMultiMapping { + pmem := newPackedVPMemDevice() + pmem.maxMappedDeviceCount = 1 + + st, err := os.Stat(rootfsFullPath) if err != nil { - if err = pmem.Release(memReg); err != nil { - log.G(ctx).WithError(err).Debug("failed to release memory region") - } + return nil, errors.Wrapf(err, "failed to stat rootfs: %q", rootfsFullPath) } - }() + devSize := pageAlign(uint64(st.Size())) + memReg, err := pmem.Allocate(devSize) + if err != nil { + return nil, errors.Wrap(err, "failed to allocate memory for rootfs") + } + defer func() { + if err != nil { + if err = pmem.Release(memReg); err != nil { + log.G(ctx).WithError(err).Debug("failed to release memory region") + } + } + }() - dev := newVPMemMappedDevice(opts.RootFSFile, "/", devSize, memReg) - if err := pmem.mapVHDLayer(ctx, dev); err != nil { - return nil, errors.Wrapf(err, "failed to save internal state for a multi-mapped rootfs device") + dev := newVPMemMappedDevice(opts.RootFSFile, "/", devSize, memReg) + if err := pmem.mapVHDLayer(ctx, dev); err != nil { + return nil, errors.Wrapf(err, "failed to save internal state for a multi-mapped rootfs device") + } + uvm.vpmemDevicesMultiMapped[0] = pmem + } else { + dev := newDefaultVPMemInfo(opts.RootFSFile, "/") + uvm.vpmemDevicesDefault[0] = dev } - uvm.vpmemDevicesMultiMapped[0] = pmem } else { - dev := newDefaultVPMemInfo(opts.RootFSFile, "/") - uvm.vpmemDevicesDefault[0] = dev + kernelArgs = "root=/dev/sda ro rootwait init=/init" + doc.VirtualMachine.Devices.Scsi[guestrequest.ScsiControllerGuids[0]].Attachments["0"] = hcsschema.Attachment{ + Type_: "VirtualDisk", + Path: rootfsFullPath, + ReadOnly: true, + } + uvm.scsiLocations[0][0] = newSCSIMount(uvm, rootfsFullPath, "/", "VirtualDisk", "", 1, 0, 0, true, false) + } } @@ -737,6 +748,12 @@ func CreateLCOW(ctx context.Context, opts *OptionsLCOW) (_ *UtilityVM, err error } }() + // vpmemMaxCount has been set to 0 which means we are going to need multiple SCSI controllers + // to support lots of layers. + if osversion.Build() >= osversion.RS5 && uvm.vpmemMaxCount == 0 { + uvm.scsiControllerCount = 4 + } + if err = verifyOptions(ctx, opts); err != nil { return nil, errors.Wrap(err, errBadUVMOpts.Error()) } diff --git a/internal/uvm/create_wcow.go b/internal/uvm/create_wcow.go index 1d083ffda2..4a92fc962d 100644 --- a/internal/uvm/create_wcow.go +++ b/internal/uvm/create_wcow.go @@ -17,6 +17,7 @@ import ( "github.com/Microsoft/hcsshim/internal/logfields" "github.com/Microsoft/hcsshim/internal/oc" "github.com/Microsoft/hcsshim/internal/processorinfo" + "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" "github.com/Microsoft/hcsshim/internal/schemaversion" "github.com/Microsoft/hcsshim/internal/uvmfolder" "github.com/Microsoft/hcsshim/internal/wclayer" @@ -249,7 +250,7 @@ func CreateWCOW(ctx context.Context, opts *OptionsWCOW) (_ *UtilityVM, err error id: opts.ID, owner: opts.Owner, operatingSystem: "windows", - scsiControllerCount: 1, + scsiControllerCount: opts.SCSIControllerCount, vsmbDirShares: make(map[string]*VSMBShare), vsmbFileShares: make(map[string]*VSMBShare), vpciDevices: make(map[VPCIDeviceKey]*VPCIDevice), @@ -310,21 +311,23 @@ func CreateWCOW(ctx context.Context, opts *OptionsWCOW) (_ *UtilityVM, err error } } - doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{ - "0": { - Attachments: map[string]hcsschema.Attachment{ - "0": { - Path: scratchPath, - Type_: "VirtualDisk", - }, - }, - }, + doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{} + for i := 0; i < int(uvm.scsiControllerCount); i++ { + doc.VirtualMachine.Devices.Scsi[guestrequest.ScsiControllerGuids[i]] = hcsschema.Scsi{ + Attachments: make(map[string]hcsschema.Attachment), + } + } + + doc.VirtualMachine.Devices.Scsi[guestrequest.ScsiControllerGuids[0]].Attachments["0"] = hcsschema.Attachment{ + + Path: scratchPath, + Type_: "VirtualDisk", } uvm.scsiLocations[0][0] = newSCSIMount(uvm, - doc.VirtualMachine.Devices.Scsi["0"].Attachments["0"].Path, + doc.VirtualMachine.Devices.Scsi[guestrequest.ScsiControllerGuids[0]].Attachments["0"].Path, "", - doc.VirtualMachine.Devices.Scsi["0"].Attachments["0"].Type_, + doc.VirtualMachine.Devices.Scsi[guestrequest.ScsiControllerGuids[0]].Attachments["0"].Type_, "", 1, 0, diff --git a/internal/uvm/scsi.go b/internal/uvm/scsi.go index 5cac727387..78d7516ffa 100644 --- a/internal/uvm/scsi.go +++ b/internal/uvm/scsi.go @@ -8,7 +8,6 @@ import ( "io/ioutil" "os" "path/filepath" - "strconv" "strings" "github.com/Microsoft/go-winio/pkg/security" @@ -169,8 +168,8 @@ func newSCSIMount( // SCSI controllers associated with a utility VM to use. // Lock must be held when calling this function func (uvm *UtilityVM) allocateSCSISlot(ctx context.Context) (int, int, error) { - for controller, luns := range uvm.scsiLocations { - for lun, sm := range luns { + for controller := 0; controller < int(uvm.scsiControllerCount); controller++ { + for lun, sm := range uvm.scsiLocations[controller] { // If sm is nil, we have found an open slot so we allocate a new SCSIMount if sm == nil { return controller, lun, nil @@ -224,7 +223,7 @@ func (uvm *UtilityVM) RemoveSCSI(ctx context.Context, hostPath string) error { scsiModification := &hcsschema.ModifySettingRequest{ RequestType: guestrequest.RequestTypeRemove, - ResourcePath: fmt.Sprintf(resourcepaths.SCSIResourceFormat, strconv.Itoa(sm.Controller), sm.LUN), + ResourcePath: fmt.Sprintf(resourcepaths.SCSIResourceFormat, guestrequest.ScsiControllerGuids[sm.Controller], sm.LUN), } var verity *guestresource.DeviceVerityInfo @@ -408,11 +407,6 @@ func (uvm *UtilityVM) addSCSIActual(ctx context.Context, addReq *addSCSIRequest) return nil, ErrNoSCSIControllers } - // Note: Can remove this check post-RS5 if multiple controllers are supported - if sm.Controller > 0 { - return nil, ErrTooManyAttachments - } - SCSIModification := &hcsschema.ModifySettingRequest{ RequestType: guestrequest.RequestTypeAdd, Settings: hcsschema.Attachment{ @@ -421,7 +415,7 @@ func (uvm *UtilityVM) addSCSIActual(ctx context.Context, addReq *addSCSIRequest) ReadOnly: addReq.readOnly, ExtensibleVirtualDiskType: addReq.evdType, }, - ResourcePath: fmt.Sprintf(resourcepaths.SCSIResourceFormat, strconv.Itoa(sm.Controller), sm.LUN), + ResourcePath: fmt.Sprintf(resourcepaths.SCSIResourceFormat, guestrequest.ScsiControllerGuids[sm.Controller], sm.LUN), } if sm.UVMPath != "" { @@ -637,7 +631,7 @@ func (sm *SCSIMount) Clone(ctx context.Context, vm *UtilityVM, cd *cloneData) er dstVhdPath string = sm.HostPath err error dir string - conStr string = fmt.Sprintf("%d", sm.Controller) + conStr string = guestrequest.ScsiControllerGuids[sm.Controller] lunStr string = fmt.Sprintf("%d", sm.LUN) ) diff --git a/test/cri-containerd/disable_vpmem_test.go b/test/cri-containerd/disable_vpmem_test.go new file mode 100644 index 0000000000..d40e5f3148 --- /dev/null +++ b/test/cri-containerd/disable_vpmem_test.go @@ -0,0 +1,106 @@ +//go:build functional +// +build functional + +package cri_containerd + +import ( + "context" + "crypto/rand" + "encoding/base64" + "fmt" + "sync" + "testing" + "time" + + "github.com/Microsoft/hcsshim/pkg/annotations" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" +) + +// Use unique names for pods & containers so that if we run this test multiple times in parallel we don't +// get failures due to same pod/container names. +func uniqueRef() string { + t := time.Now() + var b [3]byte + // Ignore read failures, just decreases uniqueness + rand.Read(b[:]) + return fmt.Sprintf("%d-%s", t.UnixNano(), base64.URLEncoding.EncodeToString(b[:])) +} + +func Test_70LayerImagesWithNoVPmemForLayers(t *testing.T) { + requireFeatures(t, featureLCOW) + + ubuntu70Image := "cplatpublic.azurecr.io/ubuntu70extra:18.04" + alpine70Image := "cplatpublic.azurecr.io/alpine70extra:latest" + testImages := []string{ubuntu70Image, alpine70Image} + pullRequiredLCOWImages(t, []string{imageLcowK8sPause, ubuntu70Image, alpine70Image}) + + client := newTestRuntimeClient(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + nContainers := 4 + podID := "" + containerIDs := make([]string, nContainers) + + defer cleanupPod(t, client, ctx, &podID) + for i := 0; i < nContainers; i++ { + defer cleanupContainer(t, client, ctx, &containerIDs[i]) + } + + sandboxRequest := getRunPodSandboxRequest( + t, + lcowRuntimeHandler, + WithSandboxAnnotations(map[string]string{ + annotations.VPMemCount: "0", + }), + ) + // override pod name + sandboxRequest.Config.Metadata.Name = fmt.Sprintf("%s-pod-%s", t.Name(), uniqueRef()) + + response, err := client.RunPodSandbox(ctx, sandboxRequest) + if err != nil { + t.Fatalf("failed RunPodSandbox request with: %v", err) + } + podID = response.PodSandboxId + + var wg sync.WaitGroup + wg.Add(nContainers) + for idx := 0; idx < nContainers; idx++ { + go func(i int) { + defer wg.Done() + request := &runtime.CreateContainerRequest{ + PodSandboxId: podID, + Config: &runtime.ContainerConfig{ + Metadata: &runtime.ContainerMetadata{ + Name: fmt.Sprintf("%s-container", uniqueRef()), + }, + Image: &runtime.ImageSpec{ + Image: testImages[i%2], + }, + Command: []string{ + "/bin/sh", + "-c", + "while true; do echo 'Hello, World!'; sleep 1; done", + }, + }, + SandboxConfig: sandboxRequest.Config, + } + + containerIDs[i] = createContainer(t, client, ctx, request) + startContainer(t, client, ctx, containerIDs[i]) + }(idx) + } + wg.Wait() + + for i := 0; i < nContainers; i++ { + containerExecReq := &runtime.ExecSyncRequest{ + ContainerId: containerIDs[i], + Cmd: []string{"ls"}, + Timeout: 20, + } + r := execSync(t, client, ctx, containerExecReq) + if r.ExitCode != 0 { + t.Fatalf("failed to exec inside container, exit code: %d", r.ExitCode) + } + } +} diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/protocol/guestrequest/types.go b/test/vendor/github.com/Microsoft/hcsshim/internal/protocol/guestrequest/types.go index 5c3d7111d4..d8d0c20b10 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/protocol/guestrequest/types.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/protocol/guestrequest/types.go @@ -41,3 +41,16 @@ type RS4NetworkModifyRequest struct { RequestType RequestType `json:"RequestType,omitempty"` Settings interface{} `json:"Settings,omitempty"` } + +var ( + // V5 GUIDs for SCSI controllers + // These GUIDs are created with namespace GUID "d422512d-2bf2-4752-809d-7b82b5fcb1b4" + // and index as names. For example, first GUID is created like this: + // guid.NewV5("d422512d-2bf2-4752-809d-7b82b5fcb1b4", []byte("0")) + ScsiControllerGuids = []string{ + "df6d0690-79e5-55b6-a5ec-c1e2f77f580a", + "0110f83b-de10-5172-a266-78bca56bf50a", + "b5d2d8d4-3a75-51bf-945b-3444dc6b8579", + "305891a9-b251-5dfe-91a2-c25d9212275b", + } +) diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/constants.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/constants.go index 842a8a5e8c..9be9fc4043 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/constants.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/constants.go @@ -2,6 +2,8 @@ package uvm import ( "errors" + + "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" ) const ( @@ -34,4 +36,7 @@ const ( var ( errNotSupported = errors.New("not supported") errBadUVMOpts = errors.New("UVM options incorrect") + + // Maximum number of SCSI controllers allowed + MaxSCSIControllers = uint32(len(guestrequest.ScsiControllerGuids)) ) diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create.go index a3d9fcd4e6..1a08bae535 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create.go @@ -94,6 +94,9 @@ type Options struct { // NoWritableFileShares disables adding any writable vSMB and Plan9 shares to the UVM NoWritableFileShares bool + + // The number of SCSI controllers. Defaults to 1 for WCOW and 4 for LCOW + SCSIControllerCount uint32 } // compares the create opts used during template creation with the create opts @@ -131,8 +134,8 @@ func verifyOptions(ctx context.Context, options interface{}) error { if opts.EnableDeferredCommit && !opts.AllowOvercommit { return errors.New("EnableDeferredCommit is not supported on physically backed VMs") } - if opts.SCSIControllerCount > 1 { - return errors.New("SCSI controller count must be 0 or 1") // Future extension here for up to 4 + if opts.SCSIControllerCount > MaxSCSIControllers { + return fmt.Errorf("SCSI controller count can't be more than %d", MaxSCSIControllers) } if opts.VPMemDeviceCount > MaxVPMEMCount { return fmt.Errorf("VPMem device count cannot be greater than %d", MaxVPMEMCount) @@ -141,10 +144,6 @@ func verifyOptions(ctx context.Context, options interface{}) error { if opts.VPMemSizeBytes%4096 != 0 { return errors.New("VPMemSizeBytes must be a multiple of 4096") } - } else { - if opts.PreferredRootFSType == PreferredRootFSTypeVHD { - return errors.New("PreferredRootFSTypeVHD requires at least one VPMem device") - } } if opts.KernelDirect && osversion.Build() < 18286 { return errors.New("KernelDirectBoot is not supported on builds older than 18286") @@ -160,6 +159,9 @@ func verifyOptions(ctx context.Context, options interface{}) error { if len(opts.LayerFolders) < 2 { return errors.New("at least 2 LayerFolders must be supplied") } + if opts.SCSIControllerCount != 1 { + return errors.New("exactly 1 SCSI controller is required for WCOW") + } if opts.IsClone && !verifyCloneUvmCreateOpts(&opts.TemplateConfig.CreateOpts, opts) { return errors.New("clone configuration doesn't match with template configuration") } @@ -188,6 +190,7 @@ func newDefaultOptions(id, owner string) *Options { ProcessorCount: defaultProcessorCount(), FullyPhysicallyBacked: false, NoWritableFileShares: false, + SCSIControllerCount: 1, } if opts.Owner == "" { diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_lcow.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_lcow.go index 1211f78ceb..ee42371826 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_lcow.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_lcow.go @@ -23,6 +23,7 @@ import ( "github.com/Microsoft/hcsshim/internal/logfields" "github.com/Microsoft/hcsshim/internal/oc" "github.com/Microsoft/hcsshim/internal/processorinfo" + "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" "github.com/Microsoft/hcsshim/internal/schemaversion" "github.com/Microsoft/hcsshim/osversion" ) @@ -86,7 +87,6 @@ type OptionsLCOW struct { KernelBootOptions string // Additional boot options for the kernel EnableGraphicsConsole bool // If true, enable a graphics console for the utility VM ConsolePipe string // The named pipe path to use for the serial console. eg \\.\pipe\vmpipe - SCSIControllerCount uint32 // The number of SCSI controllers. Defaults to 1. Currently we only support 0 or 1. UseGuestConnection bool // Whether the HCS should connect to the UVM's GCS. Defaults to true ExecCommandLine string // The command line to exec from init. Defaults to GCS ForwardStdout bool // Whether stdout will be forwarded from the executed program. Defaults to false @@ -137,7 +137,6 @@ func NewDefaultOptionsLCOW(id, owner string) *OptionsLCOW { KernelBootOptions: "", EnableGraphicsConsole: false, ConsolePipe: "", - SCSIControllerCount: 1, UseGuestConnection: true, ExecCommandLine: fmt.Sprintf("/bin/gcs -v4 -log-format json -loglevel %s", logrus.StandardLogger().Level.String()), ForwardStdout: false, @@ -352,11 +351,11 @@ func makeLCOWVMGSDoc(ctx context.Context, opts *OptionsLCOW, uvm *UtilityVM) (_ } if uvm.scsiControllerCount > 0 { - // TODO: JTERRY75 - this should enumerate scsicount and add an entry per value. - doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{ - "0": { + doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{} + for i := 0; i < int(uvm.scsiControllerCount); i++ { + doc.VirtualMachine.Devices.Scsi[guestrequest.ScsiControllerGuids[i]] = hcsschema.Scsi{ Attachments: make(map[string]hcsschema.Attachment), - }, + } } } @@ -537,13 +536,14 @@ func makeLCOWDoc(ctx context.Context, opts *OptionsLCOW, uvm *UtilityVM) (_ *hcs } if uvm.scsiControllerCount > 0 { - // TODO: JTERRY75 - this should enumerate scsicount and add an entry per value. - doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{ - "0": { + doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{} + for i := 0; i < int(uvm.scsiControllerCount); i++ { + doc.VirtualMachine.Devices.Scsi[guestrequest.ScsiControllerGuids[i]] = hcsschema.Scsi{ Attachments: make(map[string]hcsschema.Attachment), - }, + } } } + if uvm.vpmemMaxCount > 0 { doc.VirtualMachine.Devices.VirtualPMem = &hcsschema.VirtualPMemController{ MaximumCount: uvm.vpmemMaxCount, @@ -558,48 +558,59 @@ func makeLCOWDoc(ctx context.Context, opts *OptionsLCOW, uvm *UtilityVM) (_ *hcs kernelArgs = "initrd=/" + opts.RootFSFile } case PreferredRootFSTypeVHD: - // Support for VPMem VHD(X) booting rather than initrd.. - kernelArgs = "root=/dev/pmem0 ro rootwait init=/init" - imageFormat := "Vhd1" - if strings.ToLower(filepath.Ext(opts.RootFSFile)) == "vhdx" { - imageFormat = "Vhdx" - } - doc.VirtualMachine.Devices.VirtualPMem.Devices = map[string]hcsschema.VirtualPMemDevice{ - "0": { - HostPath: rootfsFullPath, - ReadOnly: true, - ImageFormat: imageFormat, - }, - } - if uvm.vpmemMultiMapping { - pmem := newPackedVPMemDevice() - pmem.maxMappedDeviceCount = 1 - - st, err := os.Stat(rootfsFullPath) - if err != nil { - return nil, errors.Wrapf(err, "failed to stat rootfs: %q", rootfsFullPath) + if uvm.vpmemMaxCount > 0 { + // Support for VPMem VHD(X) booting rather than initrd.. + kernelArgs = "root=/dev/pmem0 ro rootwait init=/init" + imageFormat := "Vhd1" + if strings.ToLower(filepath.Ext(opts.RootFSFile)) == "vhdx" { + imageFormat = "Vhdx" } - devSize := pageAlign(uint64(st.Size())) - memReg, err := pmem.Allocate(devSize) - if err != nil { - return nil, errors.Wrap(err, "failed to allocate memory for rootfs") + doc.VirtualMachine.Devices.VirtualPMem.Devices = map[string]hcsschema.VirtualPMemDevice{ + "0": { + HostPath: rootfsFullPath, + ReadOnly: true, + ImageFormat: imageFormat, + }, } - defer func() { + if uvm.vpmemMultiMapping { + pmem := newPackedVPMemDevice() + pmem.maxMappedDeviceCount = 1 + + st, err := os.Stat(rootfsFullPath) if err != nil { - if err = pmem.Release(memReg); err != nil { - log.G(ctx).WithError(err).Debug("failed to release memory region") - } + return nil, errors.Wrapf(err, "failed to stat rootfs: %q", rootfsFullPath) } - }() + devSize := pageAlign(uint64(st.Size())) + memReg, err := pmem.Allocate(devSize) + if err != nil { + return nil, errors.Wrap(err, "failed to allocate memory for rootfs") + } + defer func() { + if err != nil { + if err = pmem.Release(memReg); err != nil { + log.G(ctx).WithError(err).Debug("failed to release memory region") + } + } + }() - dev := newVPMemMappedDevice(opts.RootFSFile, "/", devSize, memReg) - if err := pmem.mapVHDLayer(ctx, dev); err != nil { - return nil, errors.Wrapf(err, "failed to save internal state for a multi-mapped rootfs device") + dev := newVPMemMappedDevice(opts.RootFSFile, "/", devSize, memReg) + if err := pmem.mapVHDLayer(ctx, dev); err != nil { + return nil, errors.Wrapf(err, "failed to save internal state for a multi-mapped rootfs device") + } + uvm.vpmemDevicesMultiMapped[0] = pmem + } else { + dev := newDefaultVPMemInfo(opts.RootFSFile, "/") + uvm.vpmemDevicesDefault[0] = dev } - uvm.vpmemDevicesMultiMapped[0] = pmem } else { - dev := newDefaultVPMemInfo(opts.RootFSFile, "/") - uvm.vpmemDevicesDefault[0] = dev + kernelArgs = "root=/dev/sda ro rootwait init=/init" + doc.VirtualMachine.Devices.Scsi[guestrequest.ScsiControllerGuids[0]].Attachments["0"] = hcsschema.Attachment{ + Type_: "VirtualDisk", + Path: rootfsFullPath, + ReadOnly: true, + } + uvm.scsiLocations[0][0] = newSCSIMount(uvm, rootfsFullPath, "/", "VirtualDisk", "", 1, 0, 0, true, false) + } } @@ -737,6 +748,12 @@ func CreateLCOW(ctx context.Context, opts *OptionsLCOW) (_ *UtilityVM, err error } }() + // vpmemMaxCount has been set to 0 which means we are going to need multiple SCSI controllers + // to support lots of layers. + if osversion.Build() >= osversion.RS5 && uvm.vpmemMaxCount == 0 { + uvm.scsiControllerCount = 4 + } + if err = verifyOptions(ctx, opts); err != nil { return nil, errors.Wrap(err, errBadUVMOpts.Error()) } diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_wcow.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_wcow.go index 1d083ffda2..4a92fc962d 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_wcow.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_wcow.go @@ -17,6 +17,7 @@ import ( "github.com/Microsoft/hcsshim/internal/logfields" "github.com/Microsoft/hcsshim/internal/oc" "github.com/Microsoft/hcsshim/internal/processorinfo" + "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" "github.com/Microsoft/hcsshim/internal/schemaversion" "github.com/Microsoft/hcsshim/internal/uvmfolder" "github.com/Microsoft/hcsshim/internal/wclayer" @@ -249,7 +250,7 @@ func CreateWCOW(ctx context.Context, opts *OptionsWCOW) (_ *UtilityVM, err error id: opts.ID, owner: opts.Owner, operatingSystem: "windows", - scsiControllerCount: 1, + scsiControllerCount: opts.SCSIControllerCount, vsmbDirShares: make(map[string]*VSMBShare), vsmbFileShares: make(map[string]*VSMBShare), vpciDevices: make(map[VPCIDeviceKey]*VPCIDevice), @@ -310,21 +311,23 @@ func CreateWCOW(ctx context.Context, opts *OptionsWCOW) (_ *UtilityVM, err error } } - doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{ - "0": { - Attachments: map[string]hcsschema.Attachment{ - "0": { - Path: scratchPath, - Type_: "VirtualDisk", - }, - }, - }, + doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{} + for i := 0; i < int(uvm.scsiControllerCount); i++ { + doc.VirtualMachine.Devices.Scsi[guestrequest.ScsiControllerGuids[i]] = hcsschema.Scsi{ + Attachments: make(map[string]hcsschema.Attachment), + } + } + + doc.VirtualMachine.Devices.Scsi[guestrequest.ScsiControllerGuids[0]].Attachments["0"] = hcsschema.Attachment{ + + Path: scratchPath, + Type_: "VirtualDisk", } uvm.scsiLocations[0][0] = newSCSIMount(uvm, - doc.VirtualMachine.Devices.Scsi["0"].Attachments["0"].Path, + doc.VirtualMachine.Devices.Scsi[guestrequest.ScsiControllerGuids[0]].Attachments["0"].Path, "", - doc.VirtualMachine.Devices.Scsi["0"].Attachments["0"].Type_, + doc.VirtualMachine.Devices.Scsi[guestrequest.ScsiControllerGuids[0]].Attachments["0"].Type_, "", 1, 0, diff --git a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/scsi.go b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/scsi.go index 5cac727387..78d7516ffa 100644 --- a/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/scsi.go +++ b/test/vendor/github.com/Microsoft/hcsshim/internal/uvm/scsi.go @@ -8,7 +8,6 @@ import ( "io/ioutil" "os" "path/filepath" - "strconv" "strings" "github.com/Microsoft/go-winio/pkg/security" @@ -169,8 +168,8 @@ func newSCSIMount( // SCSI controllers associated with a utility VM to use. // Lock must be held when calling this function func (uvm *UtilityVM) allocateSCSISlot(ctx context.Context) (int, int, error) { - for controller, luns := range uvm.scsiLocations { - for lun, sm := range luns { + for controller := 0; controller < int(uvm.scsiControllerCount); controller++ { + for lun, sm := range uvm.scsiLocations[controller] { // If sm is nil, we have found an open slot so we allocate a new SCSIMount if sm == nil { return controller, lun, nil @@ -224,7 +223,7 @@ func (uvm *UtilityVM) RemoveSCSI(ctx context.Context, hostPath string) error { scsiModification := &hcsschema.ModifySettingRequest{ RequestType: guestrequest.RequestTypeRemove, - ResourcePath: fmt.Sprintf(resourcepaths.SCSIResourceFormat, strconv.Itoa(sm.Controller), sm.LUN), + ResourcePath: fmt.Sprintf(resourcepaths.SCSIResourceFormat, guestrequest.ScsiControllerGuids[sm.Controller], sm.LUN), } var verity *guestresource.DeviceVerityInfo @@ -408,11 +407,6 @@ func (uvm *UtilityVM) addSCSIActual(ctx context.Context, addReq *addSCSIRequest) return nil, ErrNoSCSIControllers } - // Note: Can remove this check post-RS5 if multiple controllers are supported - if sm.Controller > 0 { - return nil, ErrTooManyAttachments - } - SCSIModification := &hcsschema.ModifySettingRequest{ RequestType: guestrequest.RequestTypeAdd, Settings: hcsschema.Attachment{ @@ -421,7 +415,7 @@ func (uvm *UtilityVM) addSCSIActual(ctx context.Context, addReq *addSCSIRequest) ReadOnly: addReq.readOnly, ExtensibleVirtualDiskType: addReq.evdType, }, - ResourcePath: fmt.Sprintf(resourcepaths.SCSIResourceFormat, strconv.Itoa(sm.Controller), sm.LUN), + ResourcePath: fmt.Sprintf(resourcepaths.SCSIResourceFormat, guestrequest.ScsiControllerGuids[sm.Controller], sm.LUN), } if sm.UVMPath != "" { @@ -637,7 +631,7 @@ func (sm *SCSIMount) Clone(ctx context.Context, vm *UtilityVM, cd *cloneData) er dstVhdPath string = sm.HostPath err error dir string - conStr string = fmt.Sprintf("%d", sm.Controller) + conStr string = guestrequest.ScsiControllerGuids[sm.Controller] lunStr string = fmt.Sprintf("%d", sm.LUN) )