Skip to content

Commit

Permalink
Add GMSA support for V2 HCS schema xenon containers
Browse files Browse the repository at this point in the history
* Add new UVM function 'UpdateHvSockServiceTable' to be able to hot add
Hvsocket service table entries.
* Add disabled field to HvSocketServiceConfig (used to be private in the schema)
* Remove hardcoded error if supplying a cred spec and the client asked for a
hypervisor isolated container.
* Misc refactors (comments, style)

Signed-off-by: Daniel Canter <[email protected]>
  • Loading branch information
dcantah committed Aug 5, 2020
1 parent e50252d commit e2566e2
Show file tree
Hide file tree
Showing 7 changed files with 94 additions and 29 deletions.
42 changes: 21 additions & 21 deletions internal/hcsoci/credentials.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,26 +28,27 @@ import (
// setting up instances manually is not needed, the GMSA credential specification
// simply needs to be present in the V1 container document.

// CCGInstance stores the id used when creating a ccg instance. Used when
// CCGResource stores the id used when creating a ccg instance. Used when
// closing a container to be able to release the instance.
type CCGInstance struct {
type CCGResource struct {
// ID of container that instance belongs to.
id string
}

// Release calls into hcs to remove the ccg instance. These do not get cleaned up automatically
// they MUST be explicitly removed with a call to ModifyServiceSettings. The instances will persist
// unless vmcompute.exe exits or they are removed manually as done here.
func (instance *CCGInstance) Release(ctx context.Context) error {
if err := removeCredentialGuard(ctx, instance.id); err != nil {
// Release calls into hcs to remove the ccg instance for the container matching CCGResource.id.
// These do not get cleaned up automatically they MUST be explicitly removed with a call to
// ModifyServiceSettings. The instances will persist unless vmcompute.exe exits or they are removed
// manually as done here.
func (ccgResource *CCGResource) Release(ctx context.Context) error {
if err := removeCredentialGuard(ctx, ccgResource.id); err != nil {
return fmt.Errorf("failed to remove container credential guard instance: %s", err)
}
return nil
}

// CreateCredentialGuard creates a container credential guard instance and
// returns the state object to be placed in a v2 container doc.
func CreateCredentialGuard(ctx context.Context, id, credSpec string, hypervisorIsolated bool) (*hcsschema.ContainerCredentialGuardState, *CCGInstance, error) {
func CreateCredentialGuard(ctx context.Context, id, credSpec string, hypervisorIsolated bool) (*hcsschema.ContainerCredentialGuardInstance, *CCGResource, error) {
log.G(ctx).WithField("containerID", id).Debug("creating container credential guard instance")
// V2 schema ccg setup a little different as its expected to be passed
// through all the way to the gcs. Can no longer be enabled just through
Expand All @@ -56,19 +57,21 @@ func CreateCredentialGuard(ctx context.Context, id, credSpec string, hypervisorI
// 1. Call HcsModifyServiceSettings with a ModificationRequest set with a
// ContainerCredentialGuardAddInstanceRequest. This is where the cred spec
// gets passed in. Transport either "LRPC" (Argon) or "HvSocket" (Xenon).
//
// 2. Query the instance with a call to HcsGetServiceProperties with the
// PropertyType "ContainerCredentialGuard". This will return all instances
//
// 3. Parse for the id of our container to find which one correlates to the
// container we're building the doc for, then add to the V2 doc.
// 4. If xenon container the hvsocketconfig will need to be in the UVMs V2
// schema HcsComputeSystem document before being created/sent to HCS. It must
// be in the doc at creation time as we do not support hot adding hvsocket
// service table entries.
// This is currently a blocker for adding support for hyper-v gmsa.
//
// 4. If xenon container the CCG instance with the Hvsocket service table
// information is expected to be in the Utility VMs doc before being sent
// to HCS for creation. For pod scenarios currently we don't have the OCI
// spec of a container at UVM creation time, therefore the service table entry
// for the CCG instance will have to be hot added.
transport := "LRPC"
if hypervisorIsolated {
// TODO(Dcantah) Set transport to HvSocket here when this is supported
return nil, nil, errors.New("hypervisor isolated containers with v2 HCS schema do not support GMSA")
transport = "HvSocket"
}
req := hcsschema.ModificationRequest{
PropertyType: hcsschema.PTContainerCredentialGuard,
Expand Down Expand Up @@ -102,10 +105,10 @@ func CreateCredentialGuard(ctx context.Context, id, credSpec string, hypervisorI
}
for _, ccgInstance := range ccgSysInfo.Instances {
if ccgInstance.Id == id {
instance := &CCGInstance{
ccgResource := &CCGResource{
id,
}
return ccgInstance.CredentialGuard, instance, nil
return &ccgInstance, ccgResource, nil
}
}
return nil, nil, fmt.Errorf("failed to find credential guard instance with container ID %s", id)
Expand All @@ -124,8 +127,5 @@ func removeCredentialGuard(ctx context.Context, id string) error {
},
},
}
if err := hcs.ModifyServiceSettings(ctx, req); err != nil {
return err
}
return nil
return hcs.ModifyServiceSettings(ctx, req)
}
7 changes: 3 additions & 4 deletions internal/hcsoci/resources.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,15 +92,14 @@ func ReleaseResources(ctx context.Context, r *Resources, vm *uvm.UtilityVM, all
}
r.createdNetNS = false
}
case *CCGInstance:
case *CCGResource:
if err := r.resources[i].Release(ctx); err != nil {
log.G(ctx).WithError(err).Error("failed to release container resource")
releaseErr = true
}
default:
// Don't need to check if vm != nil here anymore as they wouldnt
// have been added in the first place. All resources have embedded
// vm they belong to.
// Don't need to check if vm != nil here as they wouldnt have been added
// in the first place. All resources have embedded vm they belong to.
if all {
if err := r.resources[i].Release(ctx); err != nil {
log.G(ctx).WithError(err).Error("failed to release container resource")
Expand Down
24 changes: 20 additions & 4 deletions internal/hcsoci/resources_wcow.go
Original file line number Diff line number Diff line change
Expand Up @@ -131,13 +131,29 @@ func allocateWindowsResources(ctx context.Context, coi *createOptionsInternal, r
// Only need to create a CCG instance for v2 containers
if schemaversion.IsV21(coi.actualSchemaVersion) {
hypervisorIsolated := coi.HostingSystem != nil
ccgState, ccgInstance, err := CreateCredentialGuard(ctx, coi.actualID, cs, hypervisorIsolated)
ccgInstance, ccgResource, err := CreateCredentialGuard(ctx, coi.actualID, cs, hypervisorIsolated)
if err != nil {
return err
}
coi.ccgState = ccgState
r.resources = append(r.resources, ccgInstance)
//TODO dcantah: If/when dynamic service table entries is supported register the RpcEndpoint with hvsocket here
coi.ccgState = ccgInstance.CredentialGuard
if hypervisorIsolated {
// If hypervisor isolated we need to add an hvsocket service table entry
// By default HVSocket won't allow something inside the VM to connect
// back to a process on the host. We need to update the HVSocket service table
// to allow a connection to CCG.exe on the host, so that GMSA can function.
// We need to hot add this here because at UVM creation time we don't know what containers
// will be launched in the UVM, nonetheless if they will ask for GMSA. This is a workaround
// for the previous design requirement for CCG V2 where the service entry
// must be present in the UVM'S HCS document before being sent over as hot adding
// an HvSocket service was not possible.
hvSockConfig := ccgInstance.HvSocketConfig
hss, err := coi.HostingSystem.UpdateHvSocketService(ctx, hvSockConfig.ServiceId, hvSockConfig.ServiceConfig)
if err != nil {
return fmt.Errorf("failed to update hvsocket service: %s", err)
}
r.resources = append(r.resources, hss)
}
r.resources = append(r.resources, ccgResource)
}
}
return nil
Expand Down
2 changes: 2 additions & 0 deletions internal/schema2/hv_socket_service_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,4 +19,6 @@ type HvSocketServiceConfig struct {

// If true, HvSocket will process wildcard binds for this service/system combination. Wildcard binds are secured in the registry at SOFTWARE/Microsoft/Windows NT/CurrentVersion/Virtualization/HvSocket/WildcardDescriptors
AllowWildcardBinds bool `json:"AllowWildcardBinds,omitempty"`

Disabled bool `json:"Disabled,omitempty"`
}
45 changes: 45 additions & 0 deletions internal/uvm/hvsocket.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
package uvm

import (
"context"
"fmt"

"github.com/Microsoft/hcsshim/internal/requesttype"
hcsschema "github.com/Microsoft/hcsshim/internal/schema2"
)

// UpdateHvSocketService calls HCS to update/create the hvsocket service for
// the UVM. Takes in a service ID and the hvsocket service configuration. If there is no
// entry for the service ID already it will be created. The same call on HvSockets side
// handles the Create/Update/Delete cases based on what is passed in. Here is the logic
// for the call.
//
// 1. If the service ID does not currently exist in the VMs service table, it will be created
// with whatever descriptors and state was specified (disabled or not).
// 2. If the service already exists and empty descriptors and Disabled is passed in for the
// service config, the service will be removed.
// 3. Otherwise any combination that is not Disabled && Empty descriptors will just update the
// service.
//
// This function should preferably be called for the Update/Create flow but there isn't anything
// stopping this being used for the delete case. Prefer RemoveHvSocketService as this sets the
// relevant fields for the Delete case on HCS' side.
// This is currently only used for updating the service table to allow the UVM to
// communicate with the Container Credential Guard (ccg.exe) process on the host after being launched.
func (uvm *UtilityVM) UpdateHvSocketService(ctx context.Context, sid string, doc *hcsschema.HvSocketServiceConfig) (*HvSocketService, error) {
request := &hcsschema.ModifySettingRequest{
RequestType: requesttype.Update,
ResourcePath: fmt.Sprintf(hvsocketConfigResourceFormat, sid),
Settings: doc,
}
return uvm.modify(ctx, request)
}

// RemoveHvSocketService will remove an hvsocket service entry if it exists.
func (uvm *UtilityVM) RemoveHvSocketService(ctx context.Context, sid string) error {
request := &hcsschema.ModifySettingRequest{
RequestType: requesttype.Remove,
ResourcePath: fmt.Sprintf(hvsocketConfigResourceFormat, sid),
}
return uvm.modify(ctx, request)
}
1 change: 1 addition & 0 deletions internal/uvm/resourcepaths.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,4 +18,5 @@ const (
vPMemControllerResourceFormat string = "VirtualMachine/Devices/VirtualPMem/Devices/%d"
vPMemDeviceResourceFormat string = "VirtualMachine/Devices/VirtualPMem/Devices/%d/Mappings/%d"
vSmbShareResourcePath string = "VirtualMachine/Devices/VirtualSmb/Shares"
hvsocketConfigResourceFormat string = "VirtualMachine/Devices/HvSocket/HvSocketConfig/ServiceTable/%s"
)
2 changes: 2 additions & 0 deletions internal/uvm/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,8 @@ type UtilityVM struct {
vpmemMaxCount uint32 // The max number of VPMem devices.
vpmemMaxSizeBytes uint64 // The max size of the layer in bytes per vPMem device.

hvsocketServices map[string]*HvSocketService // map of HvSocket service ID to HvSocketService

// SCSI devices that are mapped into a Windows or Linux utility VM
scsiLocations [4][64]*SCSIMount // Hyper-V supports 4 controllers, 64 slots per controller. Limited to 1 controller for now though.
scsiControllerCount uint32 // Number of SCSI controllers in the utility VM
Expand Down

0 comments on commit e2566e2

Please sign in to comment.