Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Migrate container configuration in portlayer in memory #4146

Merged
merged 9 commits into from
Mar 8, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 12 additions & 2 deletions lib/apiservers/engine/backends/container.go
Original file line number Diff line number Diff line change
Expand Up @@ -481,8 +481,18 @@ func (c *Container) ContainerRm(name string, config *types.ContainerRmConfig) er

// Use the force and stop the container first
secs := 0

if config.ForceRemove {
c.containerProxy.Stop(vc, name, &secs, true)
} else {
state, err := c.containerProxy.State(vc)
if err != nil {
return err
}
// force stop if container state is error to make sure container is deletable later
if state.Status == ContainerError {
c.containerProxy.Stop(vc, name, &secs, true)
}
}

//call the remove directly on the name. No need for using a handle.
Expand Down Expand Up @@ -536,12 +546,12 @@ func (c *Container) cleanupPortBindings(vc *viccontainer.VicContainer) error {
// port bindings were cleaned up by another operation.
continue
}
running, err := c.containerProxy.IsRunning(cc)
state, err := c.containerProxy.State(cc)
if err != nil {
return fmt.Errorf("Failed to get container %q power state: %s",
mappedCtr, err)
}
if running {
if state.Running {
log.Debugf("Running container %q still holds port %s", mappedCtr, hPort)
continue
}
Expand Down
40 changes: 22 additions & 18 deletions lib/apiservers/engine/backends/container_proxy.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ type VicContainerProxy interface {
StreamContainerLogs(name string, out io.Writer, started chan struct{}, showTimestamps bool, followLogs bool, since int64, tailLines int64) error

Stop(vc *viccontainer.VicContainer, name string, seconds *int, unbound bool) error
IsRunning(vc *viccontainer.VicContainer) (bool, error)
State(vc *viccontainer.VicContainer) (*types.ContainerState, error)
Wait(vc *viccontainer.VicContainer, timeout time.Duration) (exitCode int32, processStatus string, containerState string, reterr error)
Signal(vc *viccontainer.VicContainer, sig uint64) error
Resize(vc *viccontainer.VicContainer, height, width int32) error
Expand Down Expand Up @@ -123,6 +123,11 @@ const (
DriverArgFlagKey = "flags"
DriverArgContainerKey = "container"
DriverArgImageKey = "image"

ContainerRunning = "running"
ContainerError = "error"
ContainerStopped = "stopped"
ContainerExited = "exited"
)

// NewContainerProxy creates a new ContainerProxy
Expand Down Expand Up @@ -477,12 +482,13 @@ func (c *ContainerProxy) Stop(vc *viccontainer.VicContainer, name string, second
// we have a container on the PL side lets check the state before proceeding
// ignore the error since others will be checking below..this is an attempt to short circuit the op
// TODO: can be replaced with simple cache check once power events are propagated to persona
running, err := c.IsRunning(vc)
state, err := c.State(vc)
if err != nil && IsNotFoundError(err) {
cache.ContainerCache().DeleteContainer(vc.ContainerID)
return err
}
if !running {
// attempt to stop container if status is running or broken
if !state.Running && state.Status != ContainerError {
return nil
}

Expand Down Expand Up @@ -537,33 +543,31 @@ func (c *ContainerProxy) Stop(vc *viccontainer.VicContainer, name string, second
return nil
}

// IsRunning returns true if the given container is running
func (c *ContainerProxy) IsRunning(vc *viccontainer.VicContainer) (bool, error) {
// State returns container state
func (c *ContainerProxy) State(vc *viccontainer.VicContainer) (*types.ContainerState, error) {
defer trace.End(trace.Begin(""))

if c.client == nil {
return false, InternalServerError("ContainerProxy.IsRunning failed to get a portlayer client")
return nil, InternalServerError("ContainerProxy.State failed to get a portlayer client")
}

results, err := c.client.Containers.GetContainerInfo(containers.NewGetContainerInfoParamsWithContext(ctx).WithID(vc.ContainerID))
if err != nil {
switch err := err.(type) {
case *containers.GetContainerInfoNotFound:
return false, NotFoundError(fmt.Sprintf("No such container: %s", vc.ContainerID))
return nil, NotFoundError(vc.Name)
case *containers.GetContainerInfoInternalServerError:
return false, InternalServerError(err.Payload.Message)
return nil, InternalServerError(err.Payload.Message)
default:
return false, InternalServerError(fmt.Sprintf("Unknown error from the interaction port layer: %s", err))
return nil, InternalServerError(fmt.Sprintf("Unknown error from the interaction port layer: %s", err))
}
}

inspectJSON, err := ContainerInfoToDockerContainerInspect(vc, results.Payload, c.portlayerName)
if err != nil {
log.Errorf("containerInfoToDockerContainerInspect failed with %s", err)
return false, err
return nil, err
}

return inspectJSON.State.Running, nil
return inspectJSON.State, nil
}

func (c *ContainerProxy) Wait(vc *viccontainer.VicContainer, timeout time.Duration) (
Expand Down Expand Up @@ -626,7 +630,7 @@ func (c *ContainerProxy) Signal(vc *viccontainer.VicContainer, sig uint64) error
return InternalServerError("Signal failed to create a portlayer client")
}

if running, err := c.IsRunning(vc); !running && err == nil {
if state, err := c.State(vc); !state.Running && err == nil {
return fmt.Errorf("%s is not running", vc.ContainerID)
}

Expand All @@ -646,7 +650,7 @@ func (c *ContainerProxy) Signal(vc *viccontainer.VicContainer, sig uint64) error
}
}

if running, err := c.IsRunning(vc); !running && err == nil {
if state, err := c.State(vc); !state.Running && err == nil {
// unmap ports
if err = UnmapPorts(vc.HostConfig); err != nil {
return err
Expand Down Expand Up @@ -1099,10 +1103,10 @@ func ContainerInfoToDockerContainerInspect(vc *viccontainer.VicContainer, info *
containerState.Status = strings.ToLower(info.ContainerConfig.State)

// https://github.com/docker/docker/blob/master/container/state.go#L77
if containerState.Status == "stopped" {
containerState.Status = "exited"
if containerState.Status == ContainerStopped {
containerState.Status = ContainerExited
}
if containerState.Status == "running" {
if containerState.Status == ContainerRunning {
containerState.Running = true
}

Expand Down
13 changes: 8 additions & 5 deletions lib/apiservers/engine/backends/container_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,12 @@ import (

"context"

"github.com/docker/docker/api/types/backend"
derr "github.com/docker/docker/api/errors"
"github.com/docker/docker/reference"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/container"
dnetwork "github.com/docker/docker/api/types/network"
"github.com/docker/docker/reference"
"github.com/docker/go-connections/nat"
"github.com/stretchr/testify/assert"
"github.com/vishvananda/netlink"
Expand Down Expand Up @@ -294,16 +294,19 @@ func (m *MockContainerProxy) Stop(vc *viccontainer.VicContainer, name string, se
return nil
}

func (m *MockContainerProxy) IsRunning(vc *viccontainer.VicContainer) (bool, error) {
func (m *MockContainerProxy) State(vc *viccontainer.VicContainer) (*types.ContainerState, error) {
// Assume container is running if container in cache. If we need other conditions
// in the future, we can add it, but for now, just assume running.
c := cache.ContainerCache().GetContainer(vc.ContainerID)

if c == nil {
return false, nil
return nil, nil
}

return true, nil
state := &types.ContainerState{
Running: true,
}
return state, nil
}

func (m *MockContainerProxy) Wait(vc *viccontainer.VicContainer, timeout time.Duration) (exitCode int32, processStatus string, containerState string, reterr error) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -361,9 +361,8 @@ func (handler *ContainersHandlersImpl) GetContainerLogsHandler(params containers
return containers.NewGetContainerLogsInternalServerError()
}

// containers with PluginVersion > 0 will use updated output logging on the backend
// containers with ExecConfig.Version = nil are assumed to be PluginVersion 0
if container.ExecConfig.Version != nil && container.ExecConfig.Version.PluginVersion > 0 {
// containers with DataVersion > 0 will use updated output logging on the backend
if container.DataVersion > 0 {
// wrap the reader in a LogReader to deserialize persisted containerVM output
reader = iolog.NewLogReader(reader)
}
Expand Down Expand Up @@ -430,8 +429,15 @@ func convertContainerToContainerInfo(container *exec.ContainerInfo) *models.Cont
ccid := container.ExecConfig.ID
info.ContainerConfig.ContainerID = ccid

s := container.State().String()
info.ContainerConfig.State = s
var state string
if container.MigrationError != nil {
state = "error"
info.ProcessConfig.ErrorMsg = fmt.Sprintf("Migration failed: %s", container.MigrationError.Error())
info.ProcessConfig.Status = state
} else {
state = container.State().String()
}
info.ContainerConfig.State = state
info.ContainerConfig.LayerID = container.ExecConfig.LayerID
info.ContainerConfig.RepoName = &container.ExecConfig.RepoName
info.ContainerConfig.CreateTime = container.ExecConfig.CreateTime
Expand Down
2 changes: 1 addition & 1 deletion lib/config/executor/container_vm.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ type Common struct {
// A reference to the components hosting execution environment, if any
ExecutionEnvironment string

// Unambiguous ID with meaning in the context of its hosting execution environment
// Unambiguous ID with meaning in the context of its hosting execution environment. Changing this definition will cause container backward compatibility issue. Please don't change this.
ID string `vic:"0.1" scope:"read-only" key:"id"`

// Convenience field to record a human readable name
Expand Down
9 changes: 7 additions & 2 deletions lib/migration/migrator.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ import (
//
// Note: Input map conf is VCH appliance guestinfo map, and returned map is the new guestinfo.
// Returns false without error means no need to migrate, and returned map is the copy of input map
// If there is error returned, returned map might have half-migrated value
// If there is error returned, returns true and half-migrated value
func MigrateApplianceConfig(ctx context.Context, s *session.Session, conf map[string]string) (map[string]string, bool, error) {
return migrateConfig(ctx, s, conf, manager.ApplianceConfigure, manager.ApplianceVersionKey)
}
Expand All @@ -42,7 +42,7 @@ func MigrateApplianceConfig(ctx context.Context, s *session.Session, conf map[st
//
// Note: Migrated data will be returned in map, and input object is not changed.
// Returns false without error means no need to migrate, and returned map is the copy of input map
// If there is error returned, returned map might have half-migrated value.
// If there is error returned, returns true and half-migrated value
func MigrateContainerConfig(conf map[string]string) (map[string]string, bool, error) {
return migrateConfig(nil, nil, conf, manager.ContainerConfigure, manager.ContainerVersionKey)
}
Expand All @@ -57,6 +57,11 @@ func ApplianceDataIsOlder(conf map[string]string) (bool, error) {
return dataIsOlder(conf, manager.ApplianceConfigure, manager.ApplianceVersionKey)
}

// ContainerDataVersion returns container data version
func ContainerDataVersion(conf map[string]string) (int, error) {
return getCurrentID(conf, manager.ContainerVersionKey)
}

// dataIsOlder returns true if data is older than latest. If error happens, always returns false
func dataIsOlder(data map[string]string, target string, verKey string) (bool, error) {
defer trace.End(trace.Begin(fmt.Sprintf("target: %s, version key: %s", target, verKey)))
Expand Down
6 changes: 6 additions & 0 deletions lib/portlayer/attach/attach.go
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,9 @@ func Bind(h interface{}) (interface{}, error) {
if !ok {
return nil, fmt.Errorf("Type assertion failed for %#+v", handle)
}
if handle.MigrationError != nil {
return nil, fmt.Errorf("Migration failed %s", handle.MigrationError)
}
return toggle(handle, true)
}

Expand All @@ -158,5 +161,8 @@ func Unbind(h interface{}) (interface{}, error) {
if !ok {
return nil, fmt.Errorf("Type assertion failed for %#+v", handle)
}
if handle.MigrationError != nil {
return nil, fmt.Errorf("Migration failed %s", handle.MigrationError)
}
return toggle(handle, false)
}
20 changes: 17 additions & 3 deletions lib/portlayer/exec/base.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import (
"github.com/vmware/govmomi/vim25/mo"
"github.com/vmware/govmomi/vim25/types"
"github.com/vmware/vic/lib/config/executor"
"github.com/vmware/vic/lib/migration"
"github.com/vmware/vic/pkg/trace"
"github.com/vmware/vic/pkg/vsphere/extraconfig"
"github.com/vmware/vic/pkg/vsphere/extraconfig/vmomi"
Expand All @@ -52,6 +53,12 @@ func (e NotYetExistError) Error() string {
type containerBase struct {
ExecConfig *executor.ExecutorConfig

// Migrated is used during in memory migration to assign whether an execConfig is viable for a commit phase
Migrated bool
// MigrationError means the errors happens during data migration, some operation might fail for we cannot extract the whole container configuration
MigrationError error
DataVersion int

// original - can be pointers so long as refreshes
// use different instances of the structures
Config *types.VirtualMachineConfigInfo
Expand All @@ -71,8 +78,11 @@ func newBase(vm *vm.VirtualMachine, c *types.VirtualMachineConfigInfo, r *types.

// construct a working copy of the exec config
if c != nil && c.ExtraConfig != nil {
src := vmomi.OptionValueSource(c.ExtraConfig)
extraconfig.Decode(src, base.ExecConfig)
var migratedConf map[string]string
containerExecKeyValues := vmomi.OptionValueMap(c.ExtraConfig)
base.DataVersion, _ = migration.ContainerDataVersion(containerExecKeyValues)
migratedConf, base.Migrated, base.MigrationError = migration.MigrateContainerConfig(containerExecKeyValues)
extraconfig.Decode(extraconfig.MapSource(migratedConf), base.ExecConfig)
}

return base
Expand Down Expand Up @@ -116,7 +126,11 @@ func (c *containerBase) updates(ctx context.Context) (*containerBase, error) {
}

// Get the ExtraConfig
extraconfig.Decode(vmomi.OptionValueSource(o.Config.ExtraConfig), base.ExecConfig)
var migratedConf map[string]string
containerExecKeyValues := vmomi.OptionValueMap(o.Config.ExtraConfig)
base.DataVersion, _ = migration.ContainerDataVersion(containerExecKeyValues)
migratedConf, base.Migrated, base.MigrationError = migration.MigrateContainerConfig(containerExecKeyValues)
extraconfig.Decode(extraconfig.MapSource(migratedConf), base.ExecConfig)

return base, nil
}
Expand Down
6 changes: 6 additions & 0 deletions lib/portlayer/exec/commit.go
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,12 @@ func Commit(ctx context.Context, sess *session.Session, h *Handle, waitTime *int
log.Debugf("Nilifying ExtraConfig as we are running")
s.ExtraConfig = nil
}
// nilify ExtraConfig if container configuration is migrated
// in this case, VCH and container are in different version. Migrated configuration cannot be written back to old container, to avoid data loss in old version's container
if h.Migrated {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We are setting ExtraConfig to nil if we are powered on so why do we need this?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is because configuration data is migrated. If write back to container vmx file, the old binary cannot read back new data. We ever talked about that part in one meeting, remember?

log.Debugf("Nilifying ExtraConfig as configuration of container %s is migrated", h.ExecConfig.ID)
s.ExtraConfig = nil
}

_, err := h.vm.WaitForResult(ctx, func(ctx context.Context) (tasks.Task, error) {
return h.vm.Reconfigure(ctx, *s)
Expand Down