diff --git a/lib/apiservers/engine/backends/container.go b/lib/apiservers/engine/backends/container.go index a00f1d7aa4..c102af4b47 100644 --- a/lib/apiservers/engine/backends/container.go +++ b/lib/apiservers/engine/backends/container.go @@ -481,8 +481,18 @@ func (c *Container) ContainerRm(name string, config *types.ContainerRmConfig) er // Use the force and stop the container first secs := 0 + if config.ForceRemove { c.containerProxy.Stop(vc, name, &secs, true) + } else { + state, err := c.containerProxy.State(vc) + if err != nil { + return err + } + // force stop if container state is error to make sure container is deletable later + if state.Status == ContainerError { + c.containerProxy.Stop(vc, name, &secs, true) + } } //call the remove directly on the name. No need for using a handle. @@ -536,12 +546,12 @@ func (c *Container) cleanupPortBindings(vc *viccontainer.VicContainer) error { // port bindings were cleaned up by another operation. continue } - running, err := c.containerProxy.IsRunning(cc) + state, err := c.containerProxy.State(cc) if err != nil { return fmt.Errorf("Failed to get container %q power state: %s", mappedCtr, err) } - if running { + if state.Running { log.Debugf("Running container %q still holds port %s", mappedCtr, hPort) continue } diff --git a/lib/apiservers/engine/backends/container_proxy.go b/lib/apiservers/engine/backends/container_proxy.go index bbec563dda..28280b46d5 100644 --- a/lib/apiservers/engine/backends/container_proxy.go +++ b/lib/apiservers/engine/backends/container_proxy.go @@ -87,7 +87,7 @@ type VicContainerProxy interface { StreamContainerLogs(name string, out io.Writer, started chan struct{}, showTimestamps bool, followLogs bool, since int64, tailLines int64) error Stop(vc *viccontainer.VicContainer, name string, seconds *int, unbound bool) error - IsRunning(vc *viccontainer.VicContainer) (bool, error) + State(vc *viccontainer.VicContainer) (*types.ContainerState, error) Wait(vc *viccontainer.VicContainer, timeout time.Duration) (exitCode int32, processStatus string, containerState string, reterr error) Signal(vc *viccontainer.VicContainer, sig uint64) error Resize(vc *viccontainer.VicContainer, height, width int32) error @@ -123,6 +123,11 @@ const ( DriverArgFlagKey = "flags" DriverArgContainerKey = "container" DriverArgImageKey = "image" + + ContainerRunning = "running" + ContainerError = "error" + ContainerStopped = "stopped" + ContainerExited = "exited" ) // NewContainerProxy creates a new ContainerProxy @@ -477,12 +482,13 @@ func (c *ContainerProxy) Stop(vc *viccontainer.VicContainer, name string, second // we have a container on the PL side lets check the state before proceeding // ignore the error since others will be checking below..this is an attempt to short circuit the op // TODO: can be replaced with simple cache check once power events are propagated to persona - running, err := c.IsRunning(vc) + state, err := c.State(vc) if err != nil && IsNotFoundError(err) { cache.ContainerCache().DeleteContainer(vc.ContainerID) return err } - if !running { + // attempt to stop container if status is running or broken + if !state.Running && state.Status != ContainerError { return nil } @@ -537,33 +543,31 @@ func (c *ContainerProxy) Stop(vc *viccontainer.VicContainer, name string, second return nil } -// IsRunning returns true if the given container is running -func (c *ContainerProxy) IsRunning(vc *viccontainer.VicContainer) (bool, error) { +// State returns container state +func (c *ContainerProxy) State(vc *viccontainer.VicContainer) (*types.ContainerState, error) { defer trace.End(trace.Begin("")) if c.client == nil { - return false, InternalServerError("ContainerProxy.IsRunning failed to get a portlayer client") + return nil, InternalServerError("ContainerProxy.State failed to get a portlayer client") } results, err := c.client.Containers.GetContainerInfo(containers.NewGetContainerInfoParamsWithContext(ctx).WithID(vc.ContainerID)) if err != nil { switch err := err.(type) { case *containers.GetContainerInfoNotFound: - return false, NotFoundError(fmt.Sprintf("No such container: %s", vc.ContainerID)) + return nil, NotFoundError(vc.Name) case *containers.GetContainerInfoInternalServerError: - return false, InternalServerError(err.Payload.Message) + return nil, InternalServerError(err.Payload.Message) default: - return false, InternalServerError(fmt.Sprintf("Unknown error from the interaction port layer: %s", err)) + return nil, InternalServerError(fmt.Sprintf("Unknown error from the interaction port layer: %s", err)) } } inspectJSON, err := ContainerInfoToDockerContainerInspect(vc, results.Payload, c.portlayerName) if err != nil { - log.Errorf("containerInfoToDockerContainerInspect failed with %s", err) - return false, err + return nil, err } - - return inspectJSON.State.Running, nil + return inspectJSON.State, nil } func (c *ContainerProxy) Wait(vc *viccontainer.VicContainer, timeout time.Duration) ( @@ -626,7 +630,7 @@ func (c *ContainerProxy) Signal(vc *viccontainer.VicContainer, sig uint64) error return InternalServerError("Signal failed to create a portlayer client") } - if running, err := c.IsRunning(vc); !running && err == nil { + if state, err := c.State(vc); !state.Running && err == nil { return fmt.Errorf("%s is not running", vc.ContainerID) } @@ -646,7 +650,7 @@ func (c *ContainerProxy) Signal(vc *viccontainer.VicContainer, sig uint64) error } } - if running, err := c.IsRunning(vc); !running && err == nil { + if state, err := c.State(vc); !state.Running && err == nil { // unmap ports if err = UnmapPorts(vc.HostConfig); err != nil { return err @@ -1099,10 +1103,10 @@ func ContainerInfoToDockerContainerInspect(vc *viccontainer.VicContainer, info * containerState.Status = strings.ToLower(info.ContainerConfig.State) // https://github.com/docker/docker/blob/master/container/state.go#L77 - if containerState.Status == "stopped" { - containerState.Status = "exited" + if containerState.Status == ContainerStopped { + containerState.Status = ContainerExited } - if containerState.Status == "running" { + if containerState.Status == ContainerRunning { containerState.Running = true } diff --git a/lib/apiservers/engine/backends/container_test.go b/lib/apiservers/engine/backends/container_test.go index 39507d09a9..92a5894df7 100644 --- a/lib/apiservers/engine/backends/container_test.go +++ b/lib/apiservers/engine/backends/container_test.go @@ -24,12 +24,12 @@ import ( "context" - "github.com/docker/docker/api/types/backend" derr "github.com/docker/docker/api/errors" - "github.com/docker/docker/reference" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" "github.com/docker/docker/api/types/container" dnetwork "github.com/docker/docker/api/types/network" + "github.com/docker/docker/reference" "github.com/docker/go-connections/nat" "github.com/stretchr/testify/assert" "github.com/vishvananda/netlink" @@ -294,16 +294,19 @@ func (m *MockContainerProxy) Stop(vc *viccontainer.VicContainer, name string, se return nil } -func (m *MockContainerProxy) IsRunning(vc *viccontainer.VicContainer) (bool, error) { +func (m *MockContainerProxy) State(vc *viccontainer.VicContainer) (*types.ContainerState, error) { // Assume container is running if container in cache. If we need other conditions // in the future, we can add it, but for now, just assume running. c := cache.ContainerCache().GetContainer(vc.ContainerID) if c == nil { - return false, nil + return nil, nil } - return true, nil + state := &types.ContainerState{ + Running: true, + } + return state, nil } func (m *MockContainerProxy) Wait(vc *viccontainer.VicContainer, timeout time.Duration) (exitCode int32, processStatus string, containerState string, reterr error) { diff --git a/lib/apiservers/portlayer/restapi/handlers/containers_handlers.go b/lib/apiservers/portlayer/restapi/handlers/containers_handlers.go index 2393156843..a67f43a81d 100644 --- a/lib/apiservers/portlayer/restapi/handlers/containers_handlers.go +++ b/lib/apiservers/portlayer/restapi/handlers/containers_handlers.go @@ -361,9 +361,8 @@ func (handler *ContainersHandlersImpl) GetContainerLogsHandler(params containers return containers.NewGetContainerLogsInternalServerError() } - // containers with PluginVersion > 0 will use updated output logging on the backend - // containers with ExecConfig.Version = nil are assumed to be PluginVersion 0 - if container.ExecConfig.Version != nil && container.ExecConfig.Version.PluginVersion > 0 { + // containers with DataVersion > 0 will use updated output logging on the backend + if container.DataVersion > 0 { // wrap the reader in a LogReader to deserialize persisted containerVM output reader = iolog.NewLogReader(reader) } @@ -430,8 +429,15 @@ func convertContainerToContainerInfo(container *exec.ContainerInfo) *models.Cont ccid := container.ExecConfig.ID info.ContainerConfig.ContainerID = ccid - s := container.State().String() - info.ContainerConfig.State = s + var state string + if container.MigrationError != nil { + state = "error" + info.ProcessConfig.ErrorMsg = fmt.Sprintf("Migration failed: %s", container.MigrationError.Error()) + info.ProcessConfig.Status = state + } else { + state = container.State().String() + } + info.ContainerConfig.State = state info.ContainerConfig.LayerID = container.ExecConfig.LayerID info.ContainerConfig.RepoName = &container.ExecConfig.RepoName info.ContainerConfig.CreateTime = container.ExecConfig.CreateTime diff --git a/lib/config/executor/container_vm.go b/lib/config/executor/container_vm.go index 820614f542..fe1c657aac 100644 --- a/lib/config/executor/container_vm.go +++ b/lib/config/executor/container_vm.go @@ -34,7 +34,7 @@ type Common struct { // A reference to the components hosting execution environment, if any ExecutionEnvironment string - // Unambiguous ID with meaning in the context of its hosting execution environment + // Unambiguous ID with meaning in the context of its hosting execution environment. Changing this definition will cause container backward compatibility issue. Please don't change this. ID string `vic:"0.1" scope:"read-only" key:"id"` // Convenience field to record a human readable name diff --git a/lib/migration/migrator.go b/lib/migration/migrator.go index 035c8fbdb6..45cc36c173 100644 --- a/lib/migration/migrator.go +++ b/lib/migration/migrator.go @@ -33,7 +33,7 @@ import ( // // Note: Input map conf is VCH appliance guestinfo map, and returned map is the new guestinfo. // Returns false without error means no need to migrate, and returned map is the copy of input map -// If there is error returned, returned map might have half-migrated value +// If there is error returned, returns true and half-migrated value func MigrateApplianceConfig(ctx context.Context, s *session.Session, conf map[string]string) (map[string]string, bool, error) { return migrateConfig(ctx, s, conf, manager.ApplianceConfigure, manager.ApplianceVersionKey) } @@ -42,7 +42,7 @@ func MigrateApplianceConfig(ctx context.Context, s *session.Session, conf map[st // // Note: Migrated data will be returned in map, and input object is not changed. // Returns false without error means no need to migrate, and returned map is the copy of input map -// If there is error returned, returned map might have half-migrated value. +// If there is error returned, returns true and half-migrated value func MigrateContainerConfig(conf map[string]string) (map[string]string, bool, error) { return migrateConfig(nil, nil, conf, manager.ContainerConfigure, manager.ContainerVersionKey) } @@ -57,6 +57,11 @@ func ApplianceDataIsOlder(conf map[string]string) (bool, error) { return dataIsOlder(conf, manager.ApplianceConfigure, manager.ApplianceVersionKey) } +// ContainerDataVersion returns container data version +func ContainerDataVersion(conf map[string]string) (int, error) { + return getCurrentID(conf, manager.ContainerVersionKey) +} + // dataIsOlder returns true if data is older than latest. If error happens, always returns false func dataIsOlder(data map[string]string, target string, verKey string) (bool, error) { defer trace.End(trace.Begin(fmt.Sprintf("target: %s, version key: %s", target, verKey))) diff --git a/lib/portlayer/attach/attach.go b/lib/portlayer/attach/attach.go index 61e6f3219d..1b33184d63 100644 --- a/lib/portlayer/attach/attach.go +++ b/lib/portlayer/attach/attach.go @@ -147,6 +147,9 @@ func Bind(h interface{}) (interface{}, error) { if !ok { return nil, fmt.Errorf("Type assertion failed for %#+v", handle) } + if handle.MigrationError != nil { + return nil, fmt.Errorf("Migration failed %s", handle.MigrationError) + } return toggle(handle, true) } @@ -158,5 +161,8 @@ func Unbind(h interface{}) (interface{}, error) { if !ok { return nil, fmt.Errorf("Type assertion failed for %#+v", handle) } + if handle.MigrationError != nil { + return nil, fmt.Errorf("Migration failed %s", handle.MigrationError) + } return toggle(handle, false) } diff --git a/lib/portlayer/exec/base.go b/lib/portlayer/exec/base.go index f6a6daa7c0..1c53e1b7c0 100644 --- a/lib/portlayer/exec/base.go +++ b/lib/portlayer/exec/base.go @@ -27,6 +27,7 @@ import ( "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" "github.com/vmware/vic/lib/config/executor" + "github.com/vmware/vic/lib/migration" "github.com/vmware/vic/pkg/trace" "github.com/vmware/vic/pkg/vsphere/extraconfig" "github.com/vmware/vic/pkg/vsphere/extraconfig/vmomi" @@ -52,6 +53,12 @@ func (e NotYetExistError) Error() string { type containerBase struct { ExecConfig *executor.ExecutorConfig + // Migrated is used during in memory migration to assign whether an execConfig is viable for a commit phase + Migrated bool + // MigrationError means the errors happens during data migration, some operation might fail for we cannot extract the whole container configuration + MigrationError error + DataVersion int + // original - can be pointers so long as refreshes // use different instances of the structures Config *types.VirtualMachineConfigInfo @@ -71,8 +78,11 @@ func newBase(vm *vm.VirtualMachine, c *types.VirtualMachineConfigInfo, r *types. // construct a working copy of the exec config if c != nil && c.ExtraConfig != nil { - src := vmomi.OptionValueSource(c.ExtraConfig) - extraconfig.Decode(src, base.ExecConfig) + var migratedConf map[string]string + containerExecKeyValues := vmomi.OptionValueMap(c.ExtraConfig) + base.DataVersion, _ = migration.ContainerDataVersion(containerExecKeyValues) + migratedConf, base.Migrated, base.MigrationError = migration.MigrateContainerConfig(containerExecKeyValues) + extraconfig.Decode(extraconfig.MapSource(migratedConf), base.ExecConfig) } return base @@ -116,7 +126,11 @@ func (c *containerBase) updates(ctx context.Context) (*containerBase, error) { } // Get the ExtraConfig - extraconfig.Decode(vmomi.OptionValueSource(o.Config.ExtraConfig), base.ExecConfig) + var migratedConf map[string]string + containerExecKeyValues := vmomi.OptionValueMap(o.Config.ExtraConfig) + base.DataVersion, _ = migration.ContainerDataVersion(containerExecKeyValues) + migratedConf, base.Migrated, base.MigrationError = migration.MigrateContainerConfig(containerExecKeyValues) + extraconfig.Decode(extraconfig.MapSource(migratedConf), base.ExecConfig) return base, nil } diff --git a/lib/portlayer/exec/commit.go b/lib/portlayer/exec/commit.go index 99940f06c9..8abfcb2b14 100644 --- a/lib/portlayer/exec/commit.go +++ b/lib/portlayer/exec/commit.go @@ -146,6 +146,12 @@ func Commit(ctx context.Context, sess *session.Session, h *Handle, waitTime *int log.Debugf("Nilifying ExtraConfig as we are running") s.ExtraConfig = nil } + // nilify ExtraConfig if container configuration is migrated + // in this case, VCH and container are in different version. Migrated configuration cannot be written back to old container, to avoid data loss in old version's container + if h.Migrated { + log.Debugf("Nilifying ExtraConfig as configuration of container %s is migrated", h.ExecConfig.ID) + s.ExtraConfig = nil + } _, err := h.vm.WaitForResult(ctx, func(ctx context.Context) (tasks.Task, error) { return h.vm.Reconfigure(ctx, *s)