From 4be76e996966728ec77318be4e6bfe61677bf673 Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Thu, 20 Dec 2018 11:19:08 +0100 Subject: [PATCH 01/13] virtcontainers: Initial Store implementation Store is a replacement for the current resource storage virtcontainers implementation and the Manager is the front-end for it. The back-ends will provide actual storage capabilities and the first one will be the filesystem one, for storing virtcontainers Items on a local filesystem. The main design goals for Store are the following ones: - Simplicity: The API should be short and simple. - Transparency: The core virtcontainers code should not care about the storage backend details. - Extensibility: It should be easily extensible to add non local and in memory backends. Manger provides a very short and simple API for the rest of the virtcontainers code base to consume: New: Creates a new Store, if needed. Load: Loads an Item from a Store Store: Stores an Item into a Store. Signed-off-by: Samuel Ortiz --- virtcontainers/store/manager.go | 156 +++++++++++++++++++++++++++ virtcontainers/store/manager_test.go | 21 ++++ 2 files changed, 177 insertions(+) create mode 100644 virtcontainers/store/manager.go create mode 100644 virtcontainers/store/manager_test.go diff --git a/virtcontainers/store/manager.go b/virtcontainers/store/manager.go new file mode 100644 index 0000000000..108603f8f7 --- /dev/null +++ b/virtcontainers/store/manager.go @@ -0,0 +1,156 @@ +// Copyright (c) 2019 Intel Corporation +// +// SPDX-License-Identifier: Apache-2.0 +// + +package store + +import ( + "context" + "net/url" + "sync" + + opentracing "github.com/opentracing/opentracing-go" + "github.com/sirupsen/logrus" +) + +// Item represents a virtcontainers items that will be managed through the store. +type Item uint8 + +const ( + // Configuration represents a configuration item to be stored + Configuration Item = iota + + // State represents a state item to be stored. + State + + // Network represents a networking item to be stored. + Network + + // Hypervisor represents an hypervisor item to be stored. + Hypervisor + + // Agent represents a agent item to be stored. + Agent + + // Process represents a container process item to be stored. + Process + + // Lock represents a lock item to be stored. + Lock + + // Mounts represents a set of mounts related item to be stored. + Mounts + + // Devices represents a set of devices related item to be stored. + Devices + + // DeviceIDs represents a set of reference IDs item to be stored. + DeviceIDs +) + +func (i Item) String() string { + switch i { + case Configuration: + return "Configuration" + case State: + return "State" + case Network: + return "Network" + case Hypervisor: + return "Hypervisor" + case Agent: + return "Agent" + case Process: + return "Process" + case Lock: + return "Lock" + case Mounts: + return "Mounts" + case Devices: + return "Devices" + case DeviceIDs: + return "Device IDs" + } + + return "" +} + +// Store is an opaque structure representing a virtcontainers Store. +type Store struct { + sync.RWMutex + ctx context.Context + + url string + scheme string + path string + host string +} + +// New will return a new virtcontainers Store. +// If there is already a Store for the URL, we will re-use it. +// Otherwise a new Store is created. +func New(ctx context.Context, storeURL string) (*Store, error) { + u, err := url.Parse(storeURL) + if err != nil { + return nil, err + } + + return &Store{ + ctx: ctx, + url: storeURL, + scheme: u.Scheme, + path: u.Path, + host: u.Host, + }, nil +} + +var storeLog = logrus.WithField("source", "virtcontainers/store") + +// Logger returns a logrus logger appropriate for logging Store messages +func (s *Store) Logger() *logrus.Entry { + return storeLog.WithFields(logrus.Fields{ + "subsystem": "store", + "path": s.path, + }) +} + +func (s *Store) trace(name string) (opentracing.Span, context.Context) { + if s.ctx == nil { + s.Logger().WithField("type", "bug").Error("trace called before context set") + s.ctx = context.Background() + } + + span, ctx := opentracing.StartSpanFromContext(s.ctx, name) + + span.SetTag("subsystem", "store") + span.SetTag("path", s.path) + + return span, ctx +} + +// Load loads a virtcontainers item from a Store. +func (s *Store) Load(item Item, data interface{}) error { + span, _ := s.trace("Load") + defer span.Finish() + + span.SetTag("item", item) + + s.RLock() + defer s.RUnlock() + + return nil +} + +// Store stores a virtcontainers item into a Store. +func (s *Store) Store(item Item, data interface{}) error { + span, _ := s.trace("Store") + defer span.Finish() + + span.SetTag("item", item) + + s.Lock() + defer s.Unlock() + + return nil +} diff --git a/virtcontainers/store/manager_test.go b/virtcontainers/store/manager_test.go new file mode 100644 index 0000000000..2e48783776 --- /dev/null +++ b/virtcontainers/store/manager_test.go @@ -0,0 +1,21 @@ +// Copyright (c) 2019 Intel Corporation +// +// SPDX-License-Identifier: Apache-2.0 +// + +package store + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewStore(t *testing.T) { + s, err := New(context.Background(), "file:///root/") + assert.Nil(t, err) + assert.Equal(t, s.scheme, "file") + assert.Equal(t, s.host, "") + assert.Equal(t, s.path, "/root/") +} From efd50ecac973e6af94f898de6eb647ef1ce0c01c Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Thu, 20 Dec 2018 12:54:34 +0100 Subject: [PATCH 02/13] virtcontainers: Add a Store manager Each virtcontainers module/component should be able to get a handler on a Store for loading component specific items. The Store manager is an internal Store layer for tracking all created Stores. Signed-off-by: Samuel Ortiz --- virtcontainers/store/manager.go | 43 +++++++++++++++++ virtcontainers/store/manager_test.go | 69 +++++++++++++++++++++++++++- 2 files changed, 110 insertions(+), 2 deletions(-) diff --git a/virtcontainers/store/manager.go b/virtcontainers/store/manager.go index 108603f8f7..d2238538a4 100644 --- a/virtcontainers/store/manager.go +++ b/virtcontainers/store/manager.go @@ -7,6 +7,7 @@ package store import ( "context" + "fmt" "net/url" "sync" @@ -87,6 +88,48 @@ type Store struct { host string } +type manager struct { + sync.RWMutex + stores map[string]*Store +} + +var stores = &manager{stores: make(map[string]*Store)} + +func (m *manager) addStore(s *Store) (err error) { + if s == nil { + return fmt.Errorf("Store can not be nil") + } + + if s.url == "" { + return fmt.Errorf("Store URL can not be nil") + } + + m.Lock() + defer m.Unlock() + + if m.stores[s.url] != nil { + return fmt.Errorf("Store %s already added", s.url) + } + + m.stores[s.url] = s + + return nil +} + +func (m *manager) removeStore(url string) { + m.Lock() + defer m.Unlock() + + delete(m.stores, url) +} + +func (m *manager) findStore(url string) *Store { + m.RLock() + defer m.RUnlock() + + return m.stores[url] +} + // New will return a new virtcontainers Store. // If there is already a Store for the URL, we will re-use it. // Otherwise a new Store is created. diff --git a/virtcontainers/store/manager_test.go b/virtcontainers/store/manager_test.go index 2e48783776..f2a3f52da8 100644 --- a/virtcontainers/store/manager_test.go +++ b/virtcontainers/store/manager_test.go @@ -12,10 +12,75 @@ import ( "github.com/stretchr/testify/assert" ) +var storeRoot = "file:///root1/" + func TestNewStore(t *testing.T) { - s, err := New(context.Background(), "file:///root/") + s, err := New(context.Background(), storeRoot) assert.Nil(t, err) assert.Equal(t, s.scheme, "file") assert.Equal(t, s.host, "") - assert.Equal(t, s.path, "/root/") + assert.Equal(t, s.path, "/root1/") +} + +func TestManagerAddStore(t *testing.T) { + s, err := New(context.Background(), storeRoot) + assert.Nil(t, err) + err = stores.addStore(s) + defer stores.removeStore(storeRoot) + assert.Nil(t, err, "addStore failed") + + // Duplicate, should fail + err = stores.addStore(s) + assert.NotNil(t, err, "addStore should have failed") + + // Try with an empty URL + sEmpty, err := New(context.Background(), storeRoot) + assert.Nil(t, err) + sEmpty.url = "" + err = stores.addStore(sEmpty) + assert.NotNil(t, err, "addStore should have failed on an empty store URL") + +} + +func TestManagerRemoveStore(t *testing.T) { + s, err := New(context.Background(), storeRoot) + assert.Nil(t, err) + + err = stores.addStore(s) + assert.Nil(t, err, "addStore failed") + + // Positive find + newStore := stores.findStore(storeRoot) + assert.NotNil(t, newStore, "findStore failed") + + // Negative removal + stores.removeStore(storeRoot + "foobar") + + // We should still find storeRoot + newStore = stores.findStore(storeRoot) + assert.NotNil(t, newStore, "findStore failed") + + // Positive removal + stores.removeStore(storeRoot) + + // We should no longer find storeRoot + newStore = stores.findStore(storeRoot) + assert.Nil(t, newStore, "findStore should not have found %s", storeRoot) +} + +func TestManagerFindStore(t *testing.T) { + s, err := New(context.Background(), storeRoot) + assert.Nil(t, err) + + err = stores.addStore(s) + defer stores.removeStore(storeRoot) + assert.Nil(t, err, "addStore failed") + + // Positive find + newStore := stores.findStore(storeRoot) + assert.NotNil(t, newStore, "findStore failed") + + // Negative find + newStore = stores.findStore(storeRoot + "foobar") + assert.Nil(t, newStore, "findStore should not have found a new store") } From 6b87ecfc1b4b2c2c8c9ccb0147e503b820d10e71 Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Thu, 20 Dec 2018 13:07:25 +0100 Subject: [PATCH 03/13] virtcontainers: store: Keep track of newly created Stores When a component creates a new store from a given root path, we add it to the store manager and return it back when another component asks for it. Signed-off-by: Samuel Ortiz --- virtcontainers/store/manager.go | 15 +++++++++++++-- virtcontainers/store/manager_test.go | 16 ++++++---------- 2 files changed, 19 insertions(+), 12 deletions(-) diff --git a/virtcontainers/store/manager.go b/virtcontainers/store/manager.go index d2238538a4..de64b1aea9 100644 --- a/virtcontainers/store/manager.go +++ b/virtcontainers/store/manager.go @@ -134,18 +134,29 @@ func (m *manager) findStore(url string) *Store { // If there is already a Store for the URL, we will re-use it. // Otherwise a new Store is created. func New(ctx context.Context, storeURL string) (*Store, error) { + // Do we already have such store? + if s := stores.findStore(storeURL); s != nil { + return s, nil + } + u, err := url.Parse(storeURL) if err != nil { return nil, err } - return &Store{ + s := &Store{ ctx: ctx, url: storeURL, scheme: u.Scheme, path: u.Path, host: u.Host, - }, nil + } + + if err := stores.addStore(s); err != nil { + return nil, err + } + + return s, nil } var storeLog = logrus.WithField("source", "virtcontainers/store") diff --git a/virtcontainers/store/manager_test.go b/virtcontainers/store/manager_test.go index f2a3f52da8..61a3aa0797 100644 --- a/virtcontainers/store/manager_test.go +++ b/virtcontainers/store/manager_test.go @@ -25,9 +25,11 @@ func TestNewStore(t *testing.T) { func TestManagerAddStore(t *testing.T) { s, err := New(context.Background(), storeRoot) assert.Nil(t, err) - err = stores.addStore(s) defer stores.removeStore(storeRoot) - assert.Nil(t, err, "addStore failed") + + // Positive find + newStore := stores.findStore(storeRoot) + assert.NotNil(t, newStore, "findStore failed") // Duplicate, should fail err = stores.addStore(s) @@ -43,12 +45,9 @@ func TestManagerAddStore(t *testing.T) { } func TestManagerRemoveStore(t *testing.T) { - s, err := New(context.Background(), storeRoot) + _, err := New(context.Background(), storeRoot) assert.Nil(t, err) - err = stores.addStore(s) - assert.Nil(t, err, "addStore failed") - // Positive find newStore := stores.findStore(storeRoot) assert.NotNil(t, newStore, "findStore failed") @@ -69,12 +68,9 @@ func TestManagerRemoveStore(t *testing.T) { } func TestManagerFindStore(t *testing.T) { - s, err := New(context.Background(), storeRoot) + _, err := New(context.Background(), storeRoot) assert.Nil(t, err) - - err = stores.addStore(s) defer stores.removeStore(storeRoot) - assert.Nil(t, err, "addStore failed") // Positive find newStore := stores.findStore(storeRoot) From d22cdf2dd9f09a80804c24fb0efa80572ca37b86 Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Thu, 20 Dec 2018 13:17:41 +0100 Subject: [PATCH 04/13] virtcontainers: store: Add an internal backend interface All Store backends will have to implement that simple interface. Signed-off-by: Samuel Ortiz --- virtcontainers/store/backend.go | 50 +++++++++++++++++++++ virtcontainers/store/filesystem.go | 70 ++++++++++++++++++++++++++++++ virtcontainers/store/manager.go | 18 +++++++- 3 files changed, 136 insertions(+), 2 deletions(-) create mode 100644 virtcontainers/store/backend.go create mode 100644 virtcontainers/store/filesystem.go diff --git a/virtcontainers/store/backend.go b/virtcontainers/store/backend.go new file mode 100644 index 0000000000..f61d2df59a --- /dev/null +++ b/virtcontainers/store/backend.go @@ -0,0 +1,50 @@ +// Copyright (c) 2019 Intel Corporation +// +// SPDX-License-Identifier: Apache-2.0 +// + +package store + +import ( + "context" + "fmt" +) + +type backendType string + +const ( + filesystemBackend backendType = "filesystem" +) + +const ( + filesystemScheme string = "file" +) + +func schemeToBackendType(scheme string) (backendType, error) { + switch scheme { + case filesystemScheme: + return filesystemBackend, nil + } + + return "", fmt.Errorf("Unsupported scheme %s", scheme) +} + +func newBackend(scheme string) (backend, error) { + t, err := schemeToBackendType(scheme) + if err != nil { + return nil, err + } + + switch t { + case filesystemBackend: + return &filesystem{}, nil + } + + return nil, fmt.Errorf("Unsupported scheme %s", scheme) +} + +type backend interface { + new(ctx context.Context, path string, host string) error + load(item Item, data interface{}) error + store(item Item, data interface{}) error +} diff --git a/virtcontainers/store/filesystem.go b/virtcontainers/store/filesystem.go new file mode 100644 index 0000000000..56e9aa2fa1 --- /dev/null +++ b/virtcontainers/store/filesystem.go @@ -0,0 +1,70 @@ +// Copyright (c) 2019 Intel Corporation +// +// SPDX-License-Identifier: Apache-2.0 +// + +package store + +import ( + "context" + + opentracing "github.com/opentracing/opentracing-go" + "github.com/sirupsen/logrus" +) + +type filesystem struct { + ctx context.Context + + path string +} + +// Logger returns a logrus logger appropriate for logging Store filesystem messages +func (f *filesystem) logger() *logrus.Entry { + return storeLog.WithFields(logrus.Fields{ + "subsystem": "store", + "backend": "filesystem", + "path": f.path, + }) +} + +func (f *filesystem) trace(name string) (opentracing.Span, context.Context) { + if f.ctx == nil { + f.logger().WithField("type", "bug").Error("trace called before context set") + f.ctx = context.Background() + } + + span, ctx := opentracing.StartSpanFromContext(f.ctx, name) + + span.SetTag("subsystem", "store") + span.SetTag("type", "filesystem") + span.SetTag("path", f.path) + + return span, ctx +} + +func (f *filesystem) new(ctx context.Context, path string, host string) error { + f.ctx = ctx + f.path = path + + f.logger().Infof("New filesystem store backend for %s", path) + + return nil +} + +func (f *filesystem) load(item Item, data interface{}) error { + span, _ := f.trace("load") + defer span.Finish() + + span.SetTag("item", item) + + return nil +} + +func (f *filesystem) store(item Item, data interface{}) error { + span, _ := f.trace("store") + defer span.Finish() + + span.SetTag("item", item) + + return nil +} diff --git a/virtcontainers/store/manager.go b/virtcontainers/store/manager.go index de64b1aea9..cfae50bb49 100644 --- a/virtcontainers/store/manager.go +++ b/virtcontainers/store/manager.go @@ -86,6 +86,8 @@ type Store struct { scheme string path string host string + + backend backend } type manager struct { @@ -152,6 +154,18 @@ func New(ctx context.Context, storeURL string) (*Store, error) { host: u.Host, } + backend, err := newBackend(s.scheme) + if err != nil { + return nil, err + } + + s.backend = backend + + // Create new backend + if err := s.backend.new(ctx, s.path, s.host); err != nil { + return nil, err + } + if err := stores.addStore(s); err != nil { return nil, err } @@ -193,7 +207,7 @@ func (s *Store) Load(item Item, data interface{}) error { s.RLock() defer s.RUnlock() - return nil + return s.backend.load(item, data) } // Store stores a virtcontainers item into a Store. @@ -206,5 +220,5 @@ func (s *Store) Store(item Item, data interface{}) error { s.Lock() defer s.Unlock() - return nil + return s.backend.store(item, data) } From f2ab58d84183eb2f829dad116ef40725be9533ce Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Thu, 20 Dec 2018 15:40:09 +0100 Subject: [PATCH 05/13] virtcontainers: store: Implement the filesystem backend new() only creates the backend and initialized the first layout. Signed-off-by: Samuel Ortiz --- virtcontainers/store/filesystem.go | 70 ------ virtcontainers/store/filesystem_backend.go | 227 ++++++++++++++++++ .../store/filesystem_backend_test.go | 66 +++++ virtcontainers/store/manager_test.go | 4 +- 4 files changed, 295 insertions(+), 72 deletions(-) delete mode 100644 virtcontainers/store/filesystem.go create mode 100644 virtcontainers/store/filesystem_backend.go create mode 100644 virtcontainers/store/filesystem_backend_test.go diff --git a/virtcontainers/store/filesystem.go b/virtcontainers/store/filesystem.go deleted file mode 100644 index 56e9aa2fa1..0000000000 --- a/virtcontainers/store/filesystem.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (c) 2019 Intel Corporation -// -// SPDX-License-Identifier: Apache-2.0 -// - -package store - -import ( - "context" - - opentracing "github.com/opentracing/opentracing-go" - "github.com/sirupsen/logrus" -) - -type filesystem struct { - ctx context.Context - - path string -} - -// Logger returns a logrus logger appropriate for logging Store filesystem messages -func (f *filesystem) logger() *logrus.Entry { - return storeLog.WithFields(logrus.Fields{ - "subsystem": "store", - "backend": "filesystem", - "path": f.path, - }) -} - -func (f *filesystem) trace(name string) (opentracing.Span, context.Context) { - if f.ctx == nil { - f.logger().WithField("type", "bug").Error("trace called before context set") - f.ctx = context.Background() - } - - span, ctx := opentracing.StartSpanFromContext(f.ctx, name) - - span.SetTag("subsystem", "store") - span.SetTag("type", "filesystem") - span.SetTag("path", f.path) - - return span, ctx -} - -func (f *filesystem) new(ctx context.Context, path string, host string) error { - f.ctx = ctx - f.path = path - - f.logger().Infof("New filesystem store backend for %s", path) - - return nil -} - -func (f *filesystem) load(item Item, data interface{}) error { - span, _ := f.trace("load") - defer span.Finish() - - span.SetTag("item", item) - - return nil -} - -func (f *filesystem) store(item Item, data interface{}) error { - span, _ := f.trace("store") - defer span.Finish() - - span.SetTag("item", item) - - return nil -} diff --git a/virtcontainers/store/filesystem_backend.go b/virtcontainers/store/filesystem_backend.go new file mode 100644 index 0000000000..4e5fb2c8df --- /dev/null +++ b/virtcontainers/store/filesystem_backend.go @@ -0,0 +1,227 @@ +// Copyright (c) 2019 Intel Corporation +// +// SPDX-License-Identifier: Apache-2.0 +// + +package store + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + opentracing "github.com/opentracing/opentracing-go" + "github.com/sirupsen/logrus" +) + +const ( + // ConfigurationFile is the file name used for every JSON sandbox configuration. + ConfigurationFile string = "config.json" + + // StateFile is the file name storing a sandbox state. + StateFile = "state.json" + + // NetworkFile is the file name storing a sandbox network. + NetworkFile = "network.json" + + // HypervisorFile is the file name storing a hypervisor's state. + HypervisorFile = "hypervisor.json" + + // AgentFile is the file name storing an agent's state. + AgentFile = "agent.json" + + // ProcessFile is the file name storing a container process. + ProcessFile = "process.json" + + // LockFile is the file name locking the usage of a resource. + LockFile = "lock" + + // MountsFile is the file name storing a container's mount points. + MountsFile = "mounts.json" + + // DevicesFile is the file name storing a container's devices. + DevicesFile = "devices.json" +) + +// DirMode is the permission bits used for creating a directory +const DirMode = os.FileMode(0750) | os.ModeDir + +// StoragePathSuffix is the suffix used for all storage paths +// +// Note: this very brief path represents "virtcontainers". It is as +// terse as possible to minimise path length. +const StoragePathSuffix = "vc" + +// SandboxPathSuffix is the suffix used for sandbox storage +const SandboxPathSuffix = "sbs" + +// VMPathSuffix is the suffix used for guest VMs. +const VMPathSuffix = "vm" + +// ConfigStoragePath is the sandbox configuration directory. +// It will contain one config.json file for each created sandbox. +var ConfigStoragePath = filepath.Join("/var/lib", StoragePathSuffix, SandboxPathSuffix) + +// RunStoragePath is the sandbox runtime directory. +// It will contain one state.json and one lock file for each created sandbox. +var RunStoragePath = filepath.Join("/run", StoragePathSuffix, SandboxPathSuffix) + +// RunVMStoragePath is the vm directory. +// It will contain all guest vm sockets and shared mountpoints. +var RunVMStoragePath = filepath.Join("/run", StoragePathSuffix, VMPathSuffix) + +func itemToFile(item Item) (string, error) { + switch item { + case Configuration: + return ConfigurationFile, nil + case State: + return StateFile, nil + case Network: + return NetworkFile, nil + case Hypervisor: + return HypervisorFile, nil + case Agent: + return AgentFile, nil + case Process: + return ProcessFile, nil + case Lock: + return LockFile, nil + case Mounts: + return MountsFile, nil + case Devices, DeviceIDs: + return DevicesFile, nil + } + + return "", fmt.Errorf("Unknown item %s", item) +} + +type filesystem struct { + ctx context.Context + + path string +} + +// Logger returns a logrus logger appropriate for logging Store filesystem messages +func (f *filesystem) logger() *logrus.Entry { + return storeLog.WithFields(logrus.Fields{ + "subsystem": "store", + "backend": "filesystem", + "path": f.path, + }) +} + +func (f *filesystem) trace(name string) (opentracing.Span, context.Context) { + if f.ctx == nil { + f.logger().WithField("type", "bug").Error("trace called before context set") + f.ctx = context.Background() + } + + span, ctx := opentracing.StartSpanFromContext(f.ctx, name) + + span.SetTag("subsystem", "store") + span.SetTag("type", "filesystem") + span.SetTag("path", f.path) + + return span, ctx +} + +func (f *filesystem) itemToPath(item Item) (string, error) { + fileName, err := itemToFile(item) + if err != nil { + return "", err + } + + return filepath.Join(f.path, fileName), nil +} + +func (f *filesystem) initialize() error { + _, err := os.Stat(f.path) + if err == nil { + return nil + } + + // Our root path does not exist, we need to create the initial layout: + // The root directory and a lock file + + // Root directory + if err := os.MkdirAll(f.path, DirMode); err != nil { + return err + } + + // Lock + lockPath := filepath.Join(f.path, LockFile) + + lockFile, err := os.Create(lockPath) + if err != nil { + f.delete() + return err + } + lockFile.Close() + + return nil +} + +func (f *filesystem) new(ctx context.Context, path string, host string) error { + f.ctx = ctx + f.path = path + + f.logger().Debugf("New filesystem store backend for %s", path) + + return f.initialize() +} + +func (f *filesystem) delete() { + os.RemoveAll(f.path) +} + +func (f *filesystem) load(item Item, data interface{}) error { + span, _ := f.trace("load") + defer span.Finish() + + span.SetTag("item", item) + + filePath, err := f.itemToPath(item) + if err != nil { + return err + } + + fileData, err := ioutil.ReadFile(filePath) + if err != nil { + return err + } + + if err := json.Unmarshal(fileData, data); err != nil { + return err + } + + return nil +} + +func (f *filesystem) store(item Item, data interface{}) error { + span, _ := f.trace("store") + defer span.Finish() + + span.SetTag("item", item) + + filePath, err := f.itemToPath(item) + if err != nil { + return err + } + + file, err := os.Create(filePath) + if err != nil { + return err + } + defer file.Close() + + jsonOut, err := json.Marshal(data) + if err != nil { + return fmt.Errorf("Could not marshall data: %s", err) + } + file.Write(jsonOut) + + return nil +} diff --git a/virtcontainers/store/filesystem_backend_test.go b/virtcontainers/store/filesystem_backend_test.go new file mode 100644 index 0000000000..460a61bd83 --- /dev/null +++ b/virtcontainers/store/filesystem_backend_test.go @@ -0,0 +1,66 @@ +// Copyright (c) 2019 Intel Corporation +// +// SPDX-License-Identifier: Apache-2.0 +// + +package store + +import ( + "context" + "io/ioutil" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" +) + +type TestNoopStructure struct { + Field1 string + Field2 string +} + +var rootPath = "/tmp/root1/" +var expectedFilesystemData = "{\"Field1\":\"value1\",\"Field2\":\"value2\"}" + +func TestStoreFilesystemStore(t *testing.T) { + f := filesystem{} + + err := f.new(context.Background(), rootPath, "") + defer f.delete() + assert.Nil(t, err) + + data := TestNoopStructure{ + Field1: "value1", + Field2: "value2", + } + + err = f.store(State, data) + assert.Nil(t, err) + + filesystemData, err := ioutil.ReadFile(filepath.Join(rootPath, StateFile)) + assert.Nil(t, err) + assert.Equal(t, string(filesystemData), expectedFilesystemData) +} + +func TestStoreFilesystemLoad(t *testing.T) { + f := filesystem{} + + err := f.new(context.Background(), rootPath, "") + defer f.delete() + assert.Nil(t, err) + + data := TestNoopStructure{ + Field1: "value1", + Field2: "value2", + } + + // Store test data + err = f.store(State, data) + assert.Nil(t, err) + + // Load and compare + newData := TestNoopStructure{} + err = f.load(State, &newData) + assert.Nil(t, err) + assert.Equal(t, newData, data) +} diff --git a/virtcontainers/store/manager_test.go b/virtcontainers/store/manager_test.go index 61a3aa0797..066e586256 100644 --- a/virtcontainers/store/manager_test.go +++ b/virtcontainers/store/manager_test.go @@ -12,14 +12,14 @@ import ( "github.com/stretchr/testify/assert" ) -var storeRoot = "file:///root1/" +var storeRoot = "file:///tmp/root1/" func TestNewStore(t *testing.T) { s, err := New(context.Background(), storeRoot) assert.Nil(t, err) assert.Equal(t, s.scheme, "file") assert.Equal(t, s.host, "") - assert.Equal(t, s.path, "/root1/") + assert.Equal(t, s.path, "/tmp/root1/") } func TestManagerAddStore(t *testing.T) { From ef11bf52a6a24eb66ec01bdabfcacff72d71c6be Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Thu, 20 Dec 2018 19:05:32 +0100 Subject: [PATCH 06/13] virtcontainers: store: Add a Delete API It's going to be used to completely clean a Store away. Signed-off-by: Samuel Ortiz --- virtcontainers/store/backend.go | 1 + virtcontainers/store/filesystem_backend.go | 4 +-- .../store/filesystem_backend_test.go | 23 ++++++++++++++++ virtcontainers/store/manager.go | 26 +++++++++++++++++++ virtcontainers/store/manager_test.go | 12 +++++++++ 5 files changed, 64 insertions(+), 2 deletions(-) diff --git a/virtcontainers/store/backend.go b/virtcontainers/store/backend.go index f61d2df59a..c5a6a7e39d 100644 --- a/virtcontainers/store/backend.go +++ b/virtcontainers/store/backend.go @@ -45,6 +45,7 @@ func newBackend(scheme string) (backend, error) { type backend interface { new(ctx context.Context, path string, host string) error + delete() error load(item Item, data interface{}) error store(item Item, data interface{}) error } diff --git a/virtcontainers/store/filesystem_backend.go b/virtcontainers/store/filesystem_backend.go index 4e5fb2c8df..efe3f26f8c 100644 --- a/virtcontainers/store/filesystem_backend.go +++ b/virtcontainers/store/filesystem_backend.go @@ -173,8 +173,8 @@ func (f *filesystem) new(ctx context.Context, path string, host string) error { return f.initialize() } -func (f *filesystem) delete() { - os.RemoveAll(f.path) +func (f *filesystem) delete() error { + return os.RemoveAll(f.path) } func (f *filesystem) load(item Item, data interface{}) error { diff --git a/virtcontainers/store/filesystem_backend_test.go b/virtcontainers/store/filesystem_backend_test.go index 460a61bd83..b8a3bbd92a 100644 --- a/virtcontainers/store/filesystem_backend_test.go +++ b/virtcontainers/store/filesystem_backend_test.go @@ -8,6 +8,7 @@ package store import ( "context" "io/ioutil" + "os" "path/filepath" "testing" @@ -64,3 +65,25 @@ func TestStoreFilesystemLoad(t *testing.T) { assert.Nil(t, err) assert.Equal(t, newData, data) } + +func TestStoreFilesystemDelete(t *testing.T) { + f := filesystem{} + + err := f.new(context.Background(), rootPath, "") + assert.Nil(t, err) + + data := TestNoopStructure{ + Field1: "value1", + Field2: "value2", + } + + // Store test data + err = f.store(State, data) + assert.Nil(t, err) + + err = f.delete() + assert.Nil(t, err) + + _, err = os.Stat(f.path) + assert.NotNil(t, err) +} diff --git a/virtcontainers/store/manager.go b/virtcontainers/store/manager.go index cfae50bb49..ef4a09f78b 100644 --- a/virtcontainers/store/manager.go +++ b/virtcontainers/store/manager.go @@ -173,6 +173,13 @@ func New(ctx context.Context, storeURL string) (*Store, error) { return s, nil } +// DeleteAll deletes all Stores from the manager. +func DeleteAll() { + for _, s := range stores.stores { + s.Delete() + } +} + var storeLog = logrus.WithField("source", "virtcontainers/store") // Logger returns a logrus logger appropriate for logging Store messages @@ -222,3 +229,22 @@ func (s *Store) Store(item Item, data interface{}) error { return s.backend.store(item, data) } + +// Delete deletes all artifacts created by a Store. +// The Store is also removed from the manager. +func (s *Store) Delete() error { + span, _ := s.trace("Store") + defer span.Finish() + + s.Lock() + defer s.Unlock() + + if err := s.backend.delete(); err != nil { + return err + } + + stores.removeStore(s.url) + s.url = "" + + return nil +} diff --git a/virtcontainers/store/manager_test.go b/virtcontainers/store/manager_test.go index 066e586256..f8668a0e2f 100644 --- a/virtcontainers/store/manager_test.go +++ b/virtcontainers/store/manager_test.go @@ -22,6 +22,18 @@ func TestNewStore(t *testing.T) { assert.Equal(t, s.path, "/tmp/root1/") } +func TestDeleteStore(t *testing.T) { + s, err := New(context.Background(), storeRoot) + assert.Nil(t, err) + + err = s.Delete() + assert.Nil(t, err) + + // We should no longer find storeRoot + newStore := stores.findStore(storeRoot) + assert.Nil(t, newStore, "findStore should not have found %s", storeRoot) +} + func TestManagerAddStore(t *testing.T) { s, err := New(context.Background(), storeRoot) assert.Nil(t, err) From c25c60898bf0664faad3115636cc260bc6756b97 Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Fri, 21 Dec 2018 10:45:08 +0100 Subject: [PATCH 07/13] virtcontainers: store: Add a VC specific Store This is basically a Store dispatcher, for storing items into their right Store (either configuration or state). There's very little logic here, except for finding out which store an item belongs to in the virtcontainers context. vc.go also provides virtcontainers specific utilities. Signed-off-by: Samuel Ortiz --- virtcontainers/store/manager_test.go | 36 ++++ virtcontainers/store/vc.go | 282 +++++++++++++++++++++++++++ virtcontainers/store/vc_test.go | 118 +++++++++++ 3 files changed, 436 insertions(+) create mode 100644 virtcontainers/store/vc.go create mode 100644 virtcontainers/store/vc_test.go diff --git a/virtcontainers/store/manager_test.go b/virtcontainers/store/manager_test.go index f8668a0e2f..0a15ffc6fb 100644 --- a/virtcontainers/store/manager_test.go +++ b/virtcontainers/store/manager_test.go @@ -7,11 +7,22 @@ package store import ( "context" + "io/ioutil" + "os" + "path/filepath" "testing" "github.com/stretchr/testify/assert" ) +const testSandboxID = "7f49d00d-1995-4156-8c79-5f5ab24ce138" + +var sandboxDirConfig = "" +var sandboxFileConfig = "" +var sandboxDirState = "" +var sandboxDirLock = "" +var sandboxFileState = "" +var sandboxFileLock = "" var storeRoot = "file:///tmp/root1/" func TestNewStore(t *testing.T) { @@ -92,3 +103,28 @@ func TestManagerFindStore(t *testing.T) { newStore = stores.findStore(storeRoot + "foobar") assert.Nil(t, newStore, "findStore should not have found a new store") } + +// TestMain is the common main function used by ALL the test functions +// for the store. +func TestMain(m *testing.M) { + testDir, err := ioutil.TempDir("", "store-tmp-") + if err != nil { + panic(err) + } + + // allow the tests to run without affecting the host system. + ConfigStoragePath = filepath.Join(testDir, StoragePathSuffix, "config") + RunStoragePath = filepath.Join(testDir, StoragePathSuffix, "run") + + // set now that ConfigStoragePath has been overridden. + sandboxDirConfig = filepath.Join(ConfigStoragePath, testSandboxID) + sandboxFileConfig = filepath.Join(ConfigStoragePath, testSandboxID, ConfigurationFile) + sandboxDirState = filepath.Join(RunStoragePath, testSandboxID) + sandboxDirLock = filepath.Join(RunStoragePath, testSandboxID) + sandboxFileState = filepath.Join(RunStoragePath, testSandboxID, StateFile) + sandboxFileLock = filepath.Join(RunStoragePath, testSandboxID, LockFile) + + ret := m.Run() + + os.Exit(ret) +} diff --git a/virtcontainers/store/vc.go b/virtcontainers/store/vc.go new file mode 100644 index 0000000000..fb43e6fd23 --- /dev/null +++ b/virtcontainers/store/vc.go @@ -0,0 +1,282 @@ +// Copyright (c) 2019 Intel Corporation +// +// SPDX-License-Identifier: Apache-2.0 +// + +package store + +import ( + "context" + "encoding/json" + "fmt" + "path/filepath" + + "github.com/kata-containers/runtime/virtcontainers/device/api" + "github.com/kata-containers/runtime/virtcontainers/device/config" + "github.com/kata-containers/runtime/virtcontainers/device/drivers" + "github.com/kata-containers/runtime/virtcontainers/types" +) + +// VCStore is a virtcontainers specific Store. +// Virtcontainers typically needs a configuration Store for +// storing permanent items across reboots. +// It also needs a state Store for storing states and other run-time +// related items. Those should not survive a reboot. +// +// VCStore simply dispatches items into the right Store. +type VCStore struct { + config, state *Store +} + +func (s *VCStore) itemToStore(item Item) *Store { + switch item { + case Configuration: + return s.config + case State, Network, Hypervisor, Agent, Process, Lock, Mounts, Devices, DeviceIDs: + return s.state + } + + return s.state +} + +// NewVCStore creates a virtcontainers specific Store. +func NewVCStore(ctx context.Context, configRoot, stateRoot string) (*VCStore, error) { + config, err := New(ctx, configRoot) + if err != nil { + return nil, err + } + + state, err := New(ctx, stateRoot) + if err != nil { + return nil, err + } + + return &VCStore{ + config: config, + state: state, + }, nil +} + +// NewVCSandboxStore creates a virtcontainers sandbox Store, with filesystem backend. +func NewVCSandboxStore(ctx context.Context, sandboxID string) (*VCStore, error) { + if sandboxID == "" { + return nil, fmt.Errorf("sandbox ID can not be empty") + } + + return NewVCStore(ctx, + SandboxConfigurationRoot(sandboxID), + SandboxRuntimeRoot(sandboxID), + ) +} + +// NewVCContainerStore creates a virtcontainers container Store, with filesystem backend. +func NewVCContainerStore(ctx context.Context, sandboxID, containerID string) (*VCStore, error) { + if sandboxID == "" { + return nil, fmt.Errorf("sandbox ID can not be empty") + } + + if containerID == "" { + return nil, fmt.Errorf("container ID can not be empty") + } + + return NewVCStore(ctx, + ContainerConfigurationRoot(sandboxID, containerID), + ContainerRuntimeRoot(sandboxID, containerID), + ) +} + +// Store stores a virtcontainers item into the right Store. +func (s *VCStore) Store(item Item, data interface{}) error { + return s.itemToStore(item).Store(item, data) +} + +// Load loads a virtcontainers item from the right Store. +func (s *VCStore) Load(item Item, data interface{}) error { + return s.itemToStore(item).Load(item, data) +} + +// Delete deletes all artifacts created by a VCStore. +// Both config and state Stores are also removed from the manager. +func (s *VCStore) Delete() error { + if err := s.config.Delete(); err != nil { + return err + } + + if err := s.state.Delete(); err != nil { + return err + } + + return nil +} + +// LoadState loads an returns a virtcontainer state +func (s *VCStore) LoadState() (types.State, error) { + var state types.State + + if err := s.state.Load(State, &state); err != nil { + return types.State{}, err + } + + return state, nil +} + +// TypedDevice is used as an intermediate representation for marshalling +// and unmarshalling Device implementations. +type TypedDevice struct { + Type string + + // Data is assigned the Device object. + // This being declared as RawMessage prevents it from being marshalled/unmarshalled. + // We do that explicitly depending on Type. + Data json.RawMessage +} + +// StoreDevices stores a virtcontainers devices slice. +// The Device slice is first marshalled into a TypedDevice +// one to include the type of the Device objects. +func (s *VCStore) StoreDevices(devices []api.Device) error { + var typedDevices []TypedDevice + + for _, d := range devices { + tempJSON, _ := json.Marshal(d) + typedDevice := TypedDevice{ + Type: string(d.DeviceType()), + Data: tempJSON, + } + typedDevices = append(typedDevices, typedDevice) + } + + return s.state.Store(Devices, typedDevices) +} + +// LoadDevices loads an returns a virtcontainer devices slice. +// We need a custom unmarshalling routine for translating TypedDevices +// into api.Devices based on their type. +func (s *VCStore) LoadDevices() ([]api.Device, error) { + var typedDevices []TypedDevice + var devices []api.Device + + if err := s.state.Load(Devices, &typedDevices); err != nil { + return []api.Device{}, err + } + + for _, d := range typedDevices { + switch d.Type { + case string(config.DeviceVFIO): + // TODO: remove dependency of drivers package + var device drivers.VFIODevice + if err := json.Unmarshal(d.Data, &device); err != nil { + return []api.Device{}, err + } + devices = append(devices, &device) + case string(config.DeviceBlock): + // TODO: remove dependency of drivers package + var device drivers.BlockDevice + if err := json.Unmarshal(d.Data, &device); err != nil { + return []api.Device{}, err + } + devices = append(devices, &device) + case string(config.DeviceGeneric): + // TODO: remove dependency of drivers package + var device drivers.GenericDevice + if err := json.Unmarshal(d.Data, &device); err != nil { + return []api.Device{}, err + } + devices = append(devices, &device) + default: + return []api.Device{}, fmt.Errorf("Unknown device type, could not unmarshal") + } + } + + return devices, nil +} + +// Utilities for virtcontainers + +// SandboxConfigurationRoot returns a virtcontainers sandbox configuration root URL. +// This will hold across host reboot persistent data about a sandbox configuration. +// It should look like file:///var/lib/vc/sbs// +func SandboxConfigurationRoot(id string) string { + return filesystemScheme + "://" + filepath.Join(ConfigStoragePath, id) +} + +// SandboxConfigurationRootPath returns a virtcontainers sandbox configuration root path. +func SandboxConfigurationRootPath(id string) string { + return filepath.Join(ConfigStoragePath, id) +} + +// SandboxConfigurationItemPath returns a virtcontainers sandbox configuration item path. +func SandboxConfigurationItemPath(id string, item Item) (string, error) { + if id == "" { + return "", fmt.Errorf("Empty sandbox ID") + } + + itemFile, err := itemToFile(item) + if err != nil { + return "", err + } + + return filepath.Join(ConfigStoragePath, id, itemFile), nil +} + +// SandboxRuntimeRoot returns a virtcontainers sandbox runtime root URL. +// This will hold data related to a sandbox run-time state that will not +// be persistent across host reboots. +// It should look like file:///run/vc/sbs// +func SandboxRuntimeRoot(id string) string { + return filesystemScheme + "://" + filepath.Join(RunStoragePath, id) +} + +// SandboxRuntimeRootPath returns a virtcontainers sandbox runtime root path. +func SandboxRuntimeRootPath(id string) string { + return filepath.Join(RunStoragePath, id) +} + +// SandboxRuntimeItemPath returns a virtcontainers sandbox runtime item path. +func SandboxRuntimeItemPath(id string, item Item) (string, error) { + if id == "" { + return "", fmt.Errorf("Empty sandbox ID") + } + + itemFile, err := itemToFile(item) + if err != nil { + return "", err + } + + return filepath.Join(RunStoragePath, id, itemFile), nil +} + +// ContainerConfigurationRoot returns a virtcontainers container configuration root URL. +// This will hold across host reboot persistent data about a container configuration. +// It should look like file:///var/lib/vc/sbs// +func ContainerConfigurationRoot(sandboxID, containerID string) string { + return filesystemScheme + "://" + filepath.Join(ConfigStoragePath, sandboxID, containerID) +} + +// ContainerConfigurationRootPath returns a virtcontainers container configuration root path. +func ContainerConfigurationRootPath(sandboxID, containerID string) string { + return filepath.Join(ConfigStoragePath, sandboxID, containerID) +} + +// ContainerRuntimeRoot returns a virtcontainers container runtime root URL. +// This will hold data related to a container run-time state that will not +// be persistent across host reboots. +// It should look like file:///run/vc/sbs/// +func ContainerRuntimeRoot(sandboxID, containerID string) string { + return filesystemScheme + "://" + filepath.Join(RunStoragePath, sandboxID, containerID) +} + +// ContainerRuntimeRootPath returns a virtcontainers container runtime root path. +func ContainerRuntimeRootPath(sandboxID, containerID string) string { + return filepath.Join(RunStoragePath, sandboxID, containerID) +} + +// VCSandboxStoreExists returns true if a sandbox store already exists. +func VCSandboxStoreExists(ctx context.Context, sandboxID string) bool { + s := stores.findStore(SandboxConfigurationRoot(sandboxID)) + if s != nil { + return true + } + + return false +} diff --git a/virtcontainers/store/vc_test.go b/virtcontainers/store/vc_test.go new file mode 100644 index 0000000000..63449249bd --- /dev/null +++ b/virtcontainers/store/vc_test.go @@ -0,0 +1,118 @@ +// Copyright (c) 2019 Intel Corporation +// +// SPDX-License-Identifier: Apache-2.0 +// + +package store + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestStoreVCRoots(t *testing.T) { + rootURL := filesystemScheme + "://" + ConfigStoragePath + sandboxID := "sandbox" + containerID := "container" + sConfigRoot := rootURL + "/" + sandboxID + cConfigRoot := rootURL + "/" + sandboxID + "/" + containerID + + assert.Equal(t, SandboxConfigurationRoot(sandboxID), sConfigRoot) + assert.Equal(t, ContainerConfigurationRoot(sandboxID, containerID), cConfigRoot) +} + +func testStoreVCSandboxDir(t *testing.T, item Item, expected string) error { + var dir string + if item == Configuration { + dir = SandboxConfigurationRootPath(testSandboxID) + } else { + dir = SandboxRuntimeRootPath(testSandboxID) + } + + if dir != expected { + return fmt.Errorf("Unexpected sandbox directory %s vs %s", dir, expected) + } + + return nil +} + +func testStoreVCSandboxFile(t *testing.T, item Item, expected string) error { + var file string + var err error + + if item == Configuration { + file, err = SandboxConfigurationItemPath(testSandboxID, item) + } else { + file, err = SandboxRuntimeItemPath(testSandboxID, item) + } + + if err != nil { + return err + } + + if file != expected { + return fmt.Errorf("Unexpected sandbox file %s vs %s", file, expected) + } + + return nil +} + +func TestStoreVCSandboxDirConfig(t *testing.T) { + err := testStoreVCSandboxDir(t, Configuration, sandboxDirConfig) + assert.Nil(t, err) +} + +func TestStoreVCSandboxDirState(t *testing.T) { + err := testStoreVCSandboxDir(t, State, sandboxDirState) + assert.Nil(t, err) +} + +func TestStoreVCSandboxDirLock(t *testing.T) { + err := testStoreVCSandboxDir(t, Lock, sandboxDirLock) + assert.Nil(t, err) +} + +func TestStoreVCSandboxFileConfig(t *testing.T) { + err := testStoreVCSandboxFile(t, Configuration, sandboxFileConfig) + assert.Nil(t, err) +} + +func TestStoreVCSandboxFileState(t *testing.T) { + err := testStoreVCSandboxFile(t, State, sandboxFileState) + assert.Nil(t, err) +} + +func TestStoreVCSandboxFileLock(t *testing.T) { + err := testStoreVCSandboxFile(t, Lock, sandboxFileLock) + assert.Nil(t, err) +} + +func TestStoreVCSandboxFileNegative(t *testing.T) { + _, err := SandboxConfigurationItemPath("", State) + assert.NotNil(t, err) + + _, err = SandboxRuntimeItemPath("", State) + assert.NotNil(t, err) +} + +func TestStoreVCNewVCSandboxStore(t *testing.T) { + _, err := NewVCSandboxStore(context.Background(), testSandboxID) + assert.Nil(t, err) + + _, err = NewVCSandboxStore(context.Background(), "") + assert.NotNil(t, err) +} + +func TestStoreVCNewVCContainerStore(t *testing.T) { + _, err := NewVCContainerStore(context.Background(), testSandboxID, "foobar") + assert.Nil(t, err) + + _, err = NewVCContainerStore(context.Background(), "", "foobar") + assert.NotNil(t, err) + + _, err = NewVCContainerStore(context.Background(), "", "foobar") + assert.NotNil(t, err) +} From 6e9256f483ef30b331edd3549b495e01c7e37500 Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Tue, 8 Jan 2019 15:55:52 +0100 Subject: [PATCH 08/13] virtcontainers: store: Add a Raw API The Raw API creates a raw item, i.e. an item that must be handled directly by the caller. A raw item is one that's not defined by the store.Item enum, i.e. it is a custom, caller defined one. The caller gets a URL back and is responsible for handling the item directly from this URL. Signed-off-by: Samuel Ortiz --- virtcontainers/store/backend.go | 5 +++ virtcontainers/store/filesystem_backend.go | 38 ++++++++++++++++++- .../store/filesystem_backend_test.go | 12 ++++++ virtcontainers/store/manager.go | 13 +++++++ virtcontainers/store/vc.go | 11 ++++++ 5 files changed, 77 insertions(+), 2 deletions(-) diff --git a/virtcontainers/store/backend.go b/virtcontainers/store/backend.go index c5a6a7e39d..e183e96054 100644 --- a/virtcontainers/store/backend.go +++ b/virtcontainers/store/backend.go @@ -48,4 +48,9 @@ type backend interface { delete() error load(item Item, data interface{}) error store(item Item, data interface{}) error + // raw creates a raw Store item. A raw item is one that is + // not defined through the Item enum. + // The caller gets an item URL back and handles it directly, + // outside of the top level Store API. + raw(id string) (string, error) } diff --git a/virtcontainers/store/filesystem_backend.go b/virtcontainers/store/filesystem_backend.go index efe3f26f8c..5c6206d318 100644 --- a/virtcontainers/store/filesystem_backend.go +++ b/virtcontainers/store/filesystem_backend.go @@ -101,7 +101,8 @@ func itemToFile(item Item) (string, error) { type filesystem struct { ctx context.Context - path string + path string + rawPath string } // Logger returns a logrus logger appropriate for logging Store filesystem messages @@ -144,13 +145,18 @@ func (f *filesystem) initialize() error { } // Our root path does not exist, we need to create the initial layout: - // The root directory and a lock file + // The root directory, a lock file and a raw files directory. // Root directory if err := os.MkdirAll(f.path, DirMode); err != nil { return err } + // Raw directory + if err := os.MkdirAll(f.rawPath, DirMode); err != nil { + return err + } + // Lock lockPath := filepath.Join(f.path, LockFile) @@ -167,6 +173,7 @@ func (f *filesystem) initialize() error { func (f *filesystem) new(ctx context.Context, path string, host string) error { f.ctx = ctx f.path = path + f.rawPath = filepath.Join(f.path, "raw") f.logger().Debugf("New filesystem store backend for %s", path) @@ -225,3 +232,30 @@ func (f *filesystem) store(item Item, data interface{}) error { return nil } + +func (f *filesystem) raw(id string) (string, error) { + span, _ := f.trace("raw") + defer span.Finish() + + span.SetTag("id", id) + + // We must use the item ID. + if id != "" { + filePath := filepath.Join(f.rawPath, id) + file, err := os.Create(filePath) + if err != nil { + return "", err + } + + return filesystemScheme + "://" + file.Name(), nil + } + + // Generate a random temporary file. + file, err := ioutil.TempFile(f.rawPath, "raw-") + if err != nil { + return "", err + } + defer file.Close() + + return filesystemScheme + "://" + file.Name(), nil +} diff --git a/virtcontainers/store/filesystem_backend_test.go b/virtcontainers/store/filesystem_backend_test.go index b8a3bbd92a..4d62a043c0 100644 --- a/virtcontainers/store/filesystem_backend_test.go +++ b/virtcontainers/store/filesystem_backend_test.go @@ -87,3 +87,15 @@ func TestStoreFilesystemDelete(t *testing.T) { _, err = os.Stat(f.path) assert.NotNil(t, err) } + +func TestStoreFilesystemRaw(t *testing.T) { + f := filesystem{} + + err := f.new(context.Background(), rootPath, "") + defer f.delete() + assert.Nil(t, err) + + path, err := f.raw("roah") + assert.Nil(t, err) + assert.Equal(t, path, filesystemScheme+"://"+filepath.Join(rootPath, "raw", "roah")) +} diff --git a/virtcontainers/store/manager.go b/virtcontainers/store/manager.go index ef4a09f78b..d5e96b2e48 100644 --- a/virtcontainers/store/manager.go +++ b/virtcontainers/store/manager.go @@ -248,3 +248,16 @@ func (s *Store) Delete() error { return nil } + +// Raw creates a raw item to be handled directly by the API caller. +// It returns a full URL to the item and the caller is responsible +// for handling the item through this URL. +func (s *Store) Raw(id string) (string, error) { + span, _ := s.trace("Raw") + defer span.Finish() + + s.Lock() + defer s.Unlock() + + return s.backend.raw(id) +} diff --git a/virtcontainers/store/vc.go b/virtcontainers/store/vc.go index fb43e6fd23..d3b0835624 100644 --- a/virtcontainers/store/vc.go +++ b/virtcontainers/store/vc.go @@ -191,6 +191,17 @@ func (s *VCStore) LoadDevices() ([]api.Device, error) { return devices, nil } +// Raw creates a raw item in the virtcontainer state Store. A raw +// item is a custom one, not defined through the Item enum, and that +// the caller needs to handle directly. +// Typically this is used to create a custom virtcontainers file. +// For example the Firecracker code uses this API to create temp +// files under the sandbox state root path, and uses them as block +// driver backend placeholder. +func (s *VCStore) Raw(id string) (string, error) { + return s.state.Raw(id) +} + // Utilities for virtcontainers // SandboxConfigurationRoot returns a virtcontainers sandbox configuration root URL. From 2ecffda17032ad2fe180fec091358a34aa025830 Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Wed, 9 Jan 2019 14:10:32 +0100 Subject: [PATCH 09/13] virtcontainers: store: Add a ItemLock API The ItemLock API allows for taking shared and exclusive locks on all items. For virtcontainers, this is specialized into taking locks on the Lock item, and will be used for sandbox locking. Signed-off-by: Samuel Ortiz --- virtcontainers/store/backend.go | 2 + virtcontainers/store/filesystem_backend.go | 50 +++++++++++++++++++ .../store/filesystem_backend_test.go | 48 ++++++++++++++++++ virtcontainers/store/manager.go | 10 ++++ virtcontainers/store/vc.go | 15 ++++++ 5 files changed, 125 insertions(+) diff --git a/virtcontainers/store/backend.go b/virtcontainers/store/backend.go index e183e96054..99a56317c1 100644 --- a/virtcontainers/store/backend.go +++ b/virtcontainers/store/backend.go @@ -53,4 +53,6 @@ type backend interface { // The caller gets an item URL back and handles it directly, // outside of the top level Store API. raw(id string) (string, error) + lock(item Item, exclusive bool) (string, error) + unlock(item Item, token string) error } diff --git a/virtcontainers/store/filesystem_backend.go b/virtcontainers/store/filesystem_backend.go index 5c6206d318..e57f8b7ed3 100644 --- a/virtcontainers/store/filesystem_backend.go +++ b/virtcontainers/store/filesystem_backend.go @@ -12,7 +12,9 @@ import ( "io/ioutil" "os" "path/filepath" + "syscall" + "github.com/kata-containers/runtime/virtcontainers/pkg/uuid" opentracing "github.com/opentracing/opentracing-go" "github.com/sirupsen/logrus" ) @@ -103,6 +105,8 @@ type filesystem struct { path string rawPath string + + lockTokens map[string]*os.File } // Logger returns a logrus logger appropriate for logging Store filesystem messages @@ -174,6 +178,7 @@ func (f *filesystem) new(ctx context.Context, path string, host string) error { f.ctx = ctx f.path = path f.rawPath = filepath.Join(f.path, "raw") + f.lockTokens = make(map[string]*os.File) f.logger().Debugf("New filesystem store backend for %s", path) @@ -259,3 +264,48 @@ func (f *filesystem) raw(id string) (string, error) { return filesystemScheme + "://" + file.Name(), nil } + +func (f *filesystem) lock(item Item, exclusive bool) (string, error) { + itemPath, err := f.itemToPath(item) + if err != nil { + return "", err + } + + itemFile, err := os.Open(itemPath) + if err != nil { + return "", err + } + + var lockType int + if exclusive { + lockType = syscall.LOCK_EX + } else { + lockType = syscall.LOCK_SH + } + + if err := syscall.Flock(int(itemFile.Fd()), lockType); err != nil { + itemFile.Close() + return "", err + } + + token := uuid.Generate().String() + f.lockTokens[token] = itemFile + + return token, nil +} + +func (f *filesystem) unlock(item Item, token string) error { + itemFile := f.lockTokens[token] + if itemFile == nil { + return fmt.Errorf("No lock for token %s", token) + } + + if err := syscall.Flock(int(itemFile.Fd()), syscall.LOCK_UN); err != nil { + return err + } + + itemFile.Close() + delete(f.lockTokens, token) + + return nil +} diff --git a/virtcontainers/store/filesystem_backend_test.go b/virtcontainers/store/filesystem_backend_test.go index 4d62a043c0..28589ae57f 100644 --- a/virtcontainers/store/filesystem_backend_test.go +++ b/virtcontainers/store/filesystem_backend_test.go @@ -99,3 +99,51 @@ func TestStoreFilesystemRaw(t *testing.T) { assert.Nil(t, err) assert.Equal(t, path, filesystemScheme+"://"+filepath.Join(rootPath, "raw", "roah")) } + +func TestStoreFilesystemLockShared(t *testing.T) { + f := filesystem{} + + err := f.new(context.Background(), rootPath, "") + defer f.delete() + assert.Nil(t, err) + + // Take 2 shared locks + token1, err := f.lock(Lock, false) + assert.Nil(t, err) + + token2, err := f.lock(Lock, false) + assert.Nil(t, err) + + err = f.unlock(Lock, token1) + assert.Nil(t, err) + + err = f.unlock(Lock, token2) + assert.Nil(t, err) + + err = f.unlock(Lock, token2) + assert.NotNil(t, err) +} + +func TestStoreFilesystemLockExclusive(t *testing.T) { + f := filesystem{} + + err := f.new(context.Background(), rootPath, "") + defer f.delete() + assert.Nil(t, err) + + // Take 1 exclusive lock + token, err := f.lock(Lock, true) + assert.Nil(t, err) + + err = f.unlock(Lock, token) + assert.Nil(t, err) + + token, err = f.lock(Lock, true) + assert.Nil(t, err) + + err = f.unlock(Lock, token) + assert.Nil(t, err) + + err = f.unlock(Lock, token) + assert.NotNil(t, err) +} diff --git a/virtcontainers/store/manager.go b/virtcontainers/store/manager.go index d5e96b2e48..fa18ab586d 100644 --- a/virtcontainers/store/manager.go +++ b/virtcontainers/store/manager.go @@ -261,3 +261,13 @@ func (s *Store) Raw(id string) (string, error) { return s.backend.raw(id) } + +// ItemLock takes a lock on an item. +func (s *Store) ItemLock(item Item, exclusive bool) (string, error) { + return s.backend.lock(item, exclusive) +} + +// ItemUnlock unlocks an item. +func (s *Store) ItemUnlock(item Item, token string) error { + return s.backend.unlock(item, token) +} diff --git a/virtcontainers/store/vc.go b/virtcontainers/store/vc.go index d3b0835624..b51cf725aa 100644 --- a/virtcontainers/store/vc.go +++ b/virtcontainers/store/vc.go @@ -202,6 +202,21 @@ func (s *VCStore) Raw(id string) (string, error) { return s.state.Raw(id) } +// Lock takes an exclusive lock on the virtcontainers state Lock item. +func (s *VCStore) Lock() (string, error) { + return s.state.ItemLock(Lock, true) +} + +// RLock takes a shared lock on the virtcontainers state Lock item. +func (s *VCStore) RLock() (string, error) { + return s.state.ItemLock(Lock, false) +} + +// Unlock unlocks the virtcontainers state Lock item. +func (s *VCStore) Unlock(token string) error { + return s.state.ItemUnlock(Lock, token) +} + // Utilities for virtcontainers // SandboxConfigurationRoot returns a virtcontainers sandbox configuration root URL. From fad23ea54eab97d5ba6c292b70dab88051692252 Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Fri, 21 Dec 2018 18:45:51 +0100 Subject: [PATCH 10/13] virtcontainers: Conversion to Stores We convert the whole virtcontainers code to use the store package instead of the resource_storage one. The resource_storage removal will happen in a separate change for a more logical split. This change is fairly big but mostly does not change the code logic. What really changes is when we create a store for a container or a sandbox. We now need to explictly do so instead of just assigning a filesystem{} instance. Other than that, the logic is kept intact. Fixes: #1099 Signed-off-by: Samuel Ortiz --- virtcontainers/api.go | 51 +-- virtcontainers/api_test.go | 88 ++--- virtcontainers/container.go | 103 +++-- virtcontainers/container_test.go | 42 ++- virtcontainers/factory/template/template.go | 5 +- virtcontainers/fc.go | 24 +- .../filesystem_resource_storage_test.go | 1 - virtcontainers/hyperstart_agent.go | 15 +- virtcontainers/hyperstart_agent_test.go | 15 +- virtcontainers/hypervisor.go | 3 +- virtcontainers/kata_agent.go | 13 +- virtcontainers/kata_agent_test.go | 21 +- virtcontainers/mock_hypervisor.go | 3 +- virtcontainers/mock_hypervisor_test.go | 5 +- virtcontainers/noop_agent_test.go | 2 + virtcontainers/proxy.go | 3 +- virtcontainers/proxy_test.go | 7 +- virtcontainers/qemu.go | 41 +- virtcontainers/qemu_arch_base_test.go | 3 +- virtcontainers/qemu_test.go | 52 ++- virtcontainers/sandbox.go | 207 ++++------- virtcontainers/sandbox_test.go | 351 +++--------------- virtcontainers/virtcontainers_test.go | 26 +- virtcontainers/vm.go | 12 +- 24 files changed, 408 insertions(+), 685 deletions(-) diff --git a/virtcontainers/api.go b/virtcontainers/api.go index 52caeb1d2b..8deded3c1f 100644 --- a/virtcontainers/api.go +++ b/virtcontainers/api.go @@ -14,6 +14,7 @@ import ( deviceApi "github.com/kata-containers/runtime/virtcontainers/device/api" deviceConfig "github.com/kata-containers/runtime/virtcontainers/device/config" vcTypes "github.com/kata-containers/runtime/virtcontainers/pkg/types" + "github.com/kata-containers/runtime/virtcontainers/store" "github.com/kata-containers/runtime/virtcontainers/types" specs "github.com/opencontainers/runtime-spec/specs-go" opentracing "github.com/opentracing/opentracing-go" @@ -142,7 +143,7 @@ func DeleteSandbox(ctx context.Context, sandboxID string) (VCSandbox, error) { if err != nil { return nil, err } - defer unlockSandbox(lockFile) + defer unlockSandbox(sandboxID, lockFile) // Fetch the sandbox from storage and create it. s, err := fetchSandbox(ctx, sandboxID) @@ -175,7 +176,7 @@ func FetchSandbox(ctx context.Context, sandboxID string) (VCSandbox, error) { if err != nil { return nil, err } - defer unlockSandbox(lockFile) + defer unlockSandbox(sandboxID, lockFile) // Fetch the sandbox from storage and create it. s, err := fetchSandbox(ctx, sandboxID) @@ -212,7 +213,7 @@ func StartSandbox(ctx context.Context, sandboxID string) (VCSandbox, error) { if err != nil { return nil, err } - defer unlockSandbox(lockFile) + defer unlockSandbox(sandboxID, lockFile) // Fetch the sandbox from storage and create it. s, err := fetchSandbox(ctx, sandboxID) @@ -244,7 +245,7 @@ func StopSandbox(ctx context.Context, sandboxID string) (VCSandbox, error) { if err != nil { return nil, err } - defer unlockSandbox(lockFile) + defer unlockSandbox(sandboxID, lockFile) // Fetch the sandbox from storage and create it. s, err := fetchSandbox(ctx, sandboxID) @@ -279,7 +280,7 @@ func RunSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor if err != nil { return nil, err } - defer unlockSandbox(lockFile) + defer unlockSandbox(s.id, lockFile) // Start the sandbox err = s.Start() @@ -295,7 +296,7 @@ func ListSandbox(ctx context.Context) ([]SandboxStatus, error) { span, ctx := trace(ctx, "ListSandbox") defer span.Finish() - dir, err := os.Open(configStoragePath) + dir, err := os.Open(store.ConfigStoragePath) if err != nil { if os.IsNotExist(err) { // No sandbox directory is not an error @@ -338,11 +339,11 @@ func StatusSandbox(ctx context.Context, sandboxID string) (SandboxStatus, error) if err != nil { return SandboxStatus{}, err } - defer unlockSandbox(lockFile) + defer unlockSandbox(sandboxID, lockFile) s, err := fetchSandbox(ctx, sandboxID) if err != nil { - unlockSandbox(lockFile) + unlockSandbox(sandboxID, lockFile) return SandboxStatus{}, err } defer s.releaseStatelessSandbox() @@ -384,7 +385,7 @@ func CreateContainer(ctx context.Context, sandboxID string, containerConfig Cont if err != nil { return nil, nil, err } - defer unlockSandbox(lockFile) + defer unlockSandbox(sandboxID, lockFile) s, err := fetchSandbox(ctx, sandboxID) if err != nil { @@ -419,7 +420,7 @@ func DeleteContainer(ctx context.Context, sandboxID, containerID string) (VCCont if err != nil { return nil, err } - defer unlockSandbox(lockFile) + defer unlockSandbox(sandboxID, lockFile) s, err := fetchSandbox(ctx, sandboxID) if err != nil { @@ -448,7 +449,7 @@ func StartContainer(ctx context.Context, sandboxID, containerID string) (VCConta if err != nil { return nil, err } - defer unlockSandbox(lockFile) + defer unlockSandbox(sandboxID, lockFile) s, err := fetchSandbox(ctx, sandboxID) if err != nil { @@ -477,7 +478,7 @@ func StopContainer(ctx context.Context, sandboxID, containerID string) (VCContai if err != nil { return nil, err } - defer unlockSandbox(lockFile) + defer unlockSandbox(sandboxID, lockFile) s, err := fetchSandbox(ctx, sandboxID) if err != nil { @@ -506,7 +507,7 @@ func EnterContainer(ctx context.Context, sandboxID, containerID string, cmd type if err != nil { return nil, nil, nil, err } - defer unlockSandbox(lockFile) + defer unlockSandbox(sandboxID, lockFile) s, err := fetchSandbox(ctx, sandboxID) if err != nil { @@ -540,11 +541,11 @@ func StatusContainer(ctx context.Context, sandboxID, containerID string) (Contai if err != nil { return ContainerStatus{}, err } - defer unlockSandbox(lockFile) + defer unlockSandbox(sandboxID, lockFile) s, err := fetchSandbox(ctx, sandboxID) if err != nil { - unlockSandbox(lockFile) + unlockSandbox(sandboxID, lockFile) return ContainerStatus{}, err } defer s.releaseStatelessSandbox() @@ -621,7 +622,7 @@ func KillContainer(ctx context.Context, sandboxID, containerID string, signal sy if err != nil { return err } - defer unlockSandbox(lockFile) + defer unlockSandbox(sandboxID, lockFile) s, err := fetchSandbox(ctx, sandboxID) if err != nil { @@ -668,7 +669,7 @@ func ProcessListContainer(ctx context.Context, sandboxID, containerID string, op if err != nil { return nil, err } - defer unlockSandbox(lockFile) + defer unlockSandbox(sandboxID, lockFile) s, err := fetchSandbox(ctx, sandboxID) if err != nil { @@ -697,7 +698,7 @@ func UpdateContainer(ctx context.Context, sandboxID, containerID string, resourc if err != nil { return err } - defer unlockSandbox(lockFile) + defer unlockSandbox(sandboxID, lockFile) s, err := fetchSandbox(ctx, sandboxID) if err != nil { @@ -726,7 +727,7 @@ func StatsContainer(ctx context.Context, sandboxID, containerID string) (Contain return ContainerStats{}, err } - defer unlockSandbox(lockFile) + defer unlockSandbox(sandboxID, lockFile) s, err := fetchSandbox(ctx, sandboxID) if err != nil { @@ -750,7 +751,7 @@ func togglePauseContainer(ctx context.Context, sandboxID, containerID string, pa if err != nil { return err } - defer unlockSandbox(lockFile) + defer unlockSandbox(sandboxID, lockFile) s, err := fetchSandbox(ctx, sandboxID) if err != nil { @@ -794,7 +795,7 @@ func AddDevice(ctx context.Context, sandboxID string, info deviceConfig.DeviceIn if err != nil { return nil, err } - defer unlockSandbox(lockFile) + defer unlockSandbox(sandboxID, lockFile) s, err := fetchSandbox(ctx, sandboxID) if err != nil { @@ -814,7 +815,7 @@ func toggleInterface(ctx context.Context, sandboxID string, inf *vcTypes.Interfa if err != nil { return nil, err } - defer unlockSandbox(lockFile) + defer unlockSandbox(sandboxID, lockFile) s, err := fetchSandbox(ctx, sandboxID) if err != nil { @@ -858,7 +859,7 @@ func ListInterfaces(ctx context.Context, sandboxID string) ([]*vcTypes.Interface if err != nil { return nil, err } - defer unlockSandbox(lockFile) + defer unlockSandbox(sandboxID, lockFile) s, err := fetchSandbox(ctx, sandboxID) if err != nil { @@ -882,7 +883,7 @@ func UpdateRoutes(ctx context.Context, sandboxID string, routes []*vcTypes.Route if err != nil { return nil, err } - defer unlockSandbox(lockFile) + defer unlockSandbox(sandboxID, lockFile) s, err := fetchSandbox(ctx, sandboxID) if err != nil { @@ -906,7 +907,7 @@ func ListRoutes(ctx context.Context, sandboxID string) ([]*vcTypes.Route, error) if err != nil { return nil, err } - defer unlockSandbox(lockFile) + defer unlockSandbox(sandboxID, lockFile) s, err := fetchSandbox(ctx, sandboxID) if err != nil { diff --git a/virtcontainers/api_test.go b/virtcontainers/api_test.go index f7aebf7205..ec19e72acc 100644 --- a/virtcontainers/api_test.go +++ b/virtcontainers/api_test.go @@ -20,6 +20,7 @@ import ( "github.com/containernetworking/plugins/pkg/ns" "github.com/kata-containers/runtime/virtcontainers/pkg/mock" vcTypes "github.com/kata-containers/runtime/virtcontainers/pkg/types" + "github.com/kata-containers/runtime/virtcontainers/store" "github.com/kata-containers/runtime/virtcontainers/types" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/stretchr/testify/assert" @@ -195,7 +196,7 @@ func TestCreateSandboxNoopAgentSuccessful(t *testing.T) { t.Fatal(err) } - sandboxDir := filepath.Join(configStoragePath, p.ID()) + sandboxDir := store.SandboxConfigurationRootPath(p.ID()) _, err = os.Stat(sandboxDir) if err != nil { t.Fatal(err) @@ -240,7 +241,7 @@ func TestCreateSandboxHyperstartAgentSuccessful(t *testing.T) { t.Fatal(err) } - sandboxDir := filepath.Join(configStoragePath, p.ID()) + sandboxDir := store.SandboxConfigurationRootPath(p.ID()) _, err = os.Stat(sandboxDir) if err != nil { t.Fatal(err) @@ -281,7 +282,7 @@ func TestCreateSandboxKataAgentSuccessful(t *testing.T) { t.Fatal(err) } - sandboxDir := filepath.Join(configStoragePath, p.ID()) + sandboxDir := store.SandboxConfigurationRootPath(p.ID()) _, err = os.Stat(sandboxDir) if err != nil { t.Fatal(err) @@ -310,7 +311,7 @@ func TestDeleteSandboxNoopAgentSuccessful(t *testing.T) { t.Fatal(err) } - sandboxDir := filepath.Join(configStoragePath, p.ID()) + sandboxDir := store.SandboxConfigurationRootPath(p.ID()) _, err = os.Stat(sandboxDir) if err != nil { t.Fatal(err) @@ -355,7 +356,7 @@ func TestDeleteSandboxHyperstartAgentSuccessful(t *testing.T) { t.Fatal(err) } - sandboxDir := filepath.Join(configStoragePath, p.ID()) + sandboxDir := store.SandboxConfigurationRootPath(p.ID()) _, err = os.Stat(sandboxDir) if err != nil { t.Fatal(err) @@ -407,7 +408,7 @@ func TestDeleteSandboxKataAgentSuccessful(t *testing.T) { t.Fatal(err) } - sandboxDir := filepath.Join(configStoragePath, p.ID()) + sandboxDir := store.SandboxConfigurationRootPath(p.ID()) _, err = os.Stat(sandboxDir) if err != nil { t.Fatal(err) @@ -427,7 +428,7 @@ func TestDeleteSandboxKataAgentSuccessful(t *testing.T) { func TestDeleteSandboxFailing(t *testing.T) { cleanUp() - sandboxDir := filepath.Join(configStoragePath, testSandboxID) + sandboxDir := store.SandboxConfigurationRootPath(testSandboxID) os.Remove(sandboxDir) p, err := DeleteSandbox(context.Background(), testSandboxID) @@ -527,7 +528,7 @@ func TestStartSandboxKataAgentSuccessful(t *testing.T) { func TestStartSandboxFailing(t *testing.T) { cleanUp() - sandboxDir := filepath.Join(configStoragePath, testSandboxID) + sandboxDir := store.SandboxConfigurationRootPath(testSandboxID) os.Remove(sandboxDir) p, err := StartSandbox(context.Background(), testSandboxID) @@ -694,7 +695,7 @@ func TestStopSandboxKataAgentSuccessful(t *testing.T) { func TestStopSandboxFailing(t *testing.T) { cleanUp() - sandboxDir := filepath.Join(configStoragePath, testSandboxID) + sandboxDir := store.SandboxConfigurationRootPath(testSandboxID) os.Remove(sandboxDir) p, err := StopSandbox(context.Background(), testSandboxID) @@ -713,7 +714,7 @@ func TestRunSandboxNoopAgentSuccessful(t *testing.T) { t.Fatal(err) } - sandboxDir := filepath.Join(configStoragePath, p.ID()) + sandboxDir := store.SandboxConfigurationRootPath(p.ID()) _, err = os.Stat(sandboxDir) if err != nil { t.Fatal(err) @@ -750,7 +751,7 @@ func TestRunSandboxHyperstartAgentSuccessful(t *testing.T) { t.Fatal(err) } - sandboxDir := filepath.Join(configStoragePath, p.ID()) + sandboxDir := store.SandboxConfigurationRootPath(p.ID()) _, err = os.Stat(sandboxDir) if err != nil { t.Fatal(err) @@ -797,7 +798,7 @@ func TestRunSandboxKataAgentSuccessful(t *testing.T) { t.Fatal(err) } - sandboxDir := filepath.Join(configStoragePath, p.ID()) + sandboxDir := store.SandboxConfigurationRootPath(p.ID()) _, err = os.Stat(sandboxDir) if err != nil { t.Fatal(err) @@ -823,8 +824,6 @@ func TestRunSandboxFailing(t *testing.T) { func TestListSandboxSuccessful(t *testing.T) { cleanUp() - os.RemoveAll(configStoragePath) - config := newTestSandboxConfigNoop() ctx := context.Background() @@ -842,8 +841,6 @@ func TestListSandboxSuccessful(t *testing.T) { func TestListSandboxNoSandboxDirectory(t *testing.T) { cleanUp() - os.RemoveAll(configStoragePath) - _, err := ListSandbox(context.Background()) if err != nil { t.Fatal(fmt.Sprintf("unexpected ListSandbox error from non-existent sandbox directory: %v", err)) @@ -982,8 +979,7 @@ func TestStatusSandboxFailingFetchSandboxConfig(t *testing.T) { t.Fatal(err) } - path := filepath.Join(configStoragePath, p.ID()) - os.RemoveAll(path) + store.DeleteAll() globalSandboxList.removeSandbox(p.ID()) _, err = StatusSandbox(ctx, p.ID()) @@ -1003,10 +999,7 @@ func TestStatusPodSandboxFailingFetchSandboxState(t *testing.T) { t.Fatal(err) } - pImpl, ok := p.(*Sandbox) - assert.True(t, ok) - - os.RemoveAll(pImpl.configPath) + store.DeleteAll() globalSandboxList.removeSandbox(p.ID()) _, err = StatusSandbox(ctx, p.ID()) @@ -1039,7 +1032,7 @@ func TestCreateContainerSuccessful(t *testing.T) { t.Fatal(err) } - sandboxDir := filepath.Join(configStoragePath, p.ID()) + sandboxDir := store.SandboxConfigurationRootPath(p.ID()) _, err = os.Stat(sandboxDir) if err != nil { t.Fatal(err) @@ -1076,7 +1069,7 @@ func TestCreateContainerFailingNoSandbox(t *testing.T) { t.Fatal(err) } - sandboxDir := filepath.Join(configStoragePath, p.ID()) + sandboxDir := store.SandboxConfigurationRootPath(p.ID()) _, err = os.Stat(sandboxDir) if err == nil { t.Fatal() @@ -1102,7 +1095,7 @@ func TestDeleteContainerSuccessful(t *testing.T) { t.Fatal(err) } - sandboxDir := filepath.Join(configStoragePath, p.ID()) + sandboxDir := store.SandboxConfigurationRootPath(p.ID()) _, err = os.Stat(sandboxDir) if err != nil { t.Fatal(err) @@ -1135,10 +1128,7 @@ func TestDeleteContainerSuccessful(t *testing.T) { func TestDeleteContainerFailingNoSandbox(t *testing.T) { cleanUp() - sandboxDir := filepath.Join(configStoragePath, testSandboxID) contID := "100" - os.RemoveAll(sandboxDir) - c, err := DeleteContainer(context.Background(), testSandboxID, contID) if c != nil || err == nil { t.Fatal() @@ -1157,7 +1147,7 @@ func TestDeleteContainerFailingNoContainer(t *testing.T) { t.Fatal(err) } - sandboxDir := filepath.Join(configStoragePath, p.ID()) + sandboxDir := store.SandboxConfigurationRootPath(p.ID()) _, err = os.Stat(sandboxDir) if err != nil { t.Fatal(err) @@ -1203,10 +1193,7 @@ func TestStartContainerNoopAgentSuccessful(t *testing.T) { func TestStartContainerFailingNoSandbox(t *testing.T) { cleanUp() - sandboxDir := filepath.Join(configStoragePath, testSandboxID) contID := "100" - os.RemoveAll(sandboxDir) - c, err := StartContainer(context.Background(), testSandboxID, contID) if c != nil || err == nil { t.Fatal() @@ -1225,7 +1212,7 @@ func TestStartContainerFailingNoContainer(t *testing.T) { t.Fatal(err) } - sandboxDir := filepath.Join(configStoragePath, p.ID()) + sandboxDir := store.SandboxConfigurationRootPath(p.ID()) _, err = os.Stat(sandboxDir) if err != nil { t.Fatal(err) @@ -1249,7 +1236,7 @@ func TestStartContainerFailingSandboxNotStarted(t *testing.T) { t.Fatal(err) } - sandboxDir := filepath.Join(configStoragePath, p.ID()) + sandboxDir := store.SandboxConfigurationRootPath(p.ID()) _, err = os.Stat(sandboxDir) if err != nil { t.Fatal(err) @@ -1426,10 +1413,7 @@ func TestStartStopSandboxHyperstartAgentSuccessfulWithDefaultNetwork(t *testing. func TestStopContainerFailingNoSandbox(t *testing.T) { cleanUp() - sandboxDir := filepath.Join(configStoragePath, testSandboxID) contID := "100" - os.RemoveAll(sandboxDir) - c, err := StopContainer(context.Background(), testSandboxID, contID) if c != nil || err == nil { t.Fatal() @@ -1448,7 +1432,7 @@ func TestStopContainerFailingNoContainer(t *testing.T) { t.Fatal(err) } - sandboxDir := filepath.Join(configStoragePath, p.ID()) + sandboxDir := store.SandboxConfigurationRootPath(p.ID()) _, err = os.Stat(sandboxDir) if err != nil { t.Fatal(err) @@ -1611,11 +1595,7 @@ func TestEnterContainerHyperstartAgentSuccessful(t *testing.T) { func TestEnterContainerFailingNoSandbox(t *testing.T) { cleanUp() - - sandboxDir := filepath.Join(configStoragePath, testSandboxID) contID := "100" - os.RemoveAll(sandboxDir) - cmd := newBasicTestCmd() _, c, _, err := EnterContainer(context.Background(), testSandboxID, contID, cmd) @@ -1636,7 +1616,7 @@ func TestEnterContainerFailingNoContainer(t *testing.T) { t.Fatal(err) } - sandboxDir := filepath.Join(configStoragePath, p.ID()) + sandboxDir := store.SandboxConfigurationRootPath(p.ID()) _, err = os.Stat(sandboxDir) if err != nil { t.Fatal(err) @@ -1696,7 +1676,7 @@ func TestStatusContainerSuccessful(t *testing.T) { t.Fatal(err) } - sandboxDir := filepath.Join(configStoragePath, p.ID()) + sandboxDir := store.SandboxConfigurationRootPath(p.ID()) _, err = os.Stat(sandboxDir) if err != nil { t.Fatal(err) @@ -1748,7 +1728,7 @@ func TestStatusContainerStateReady(t *testing.T) { t.Fatal(err) } - sandboxDir := filepath.Join(configStoragePath, p.ID()) + sandboxDir := store.SandboxConfigurationRootPath(p.ID()) _, err = os.Stat(sandboxDir) if err != nil { t.Fatal(err) @@ -1818,7 +1798,7 @@ func TestStatusContainerStateRunning(t *testing.T) { t.Fatal(err) } - sandboxDir := filepath.Join(configStoragePath, p.ID()) + sandboxDir := store.SandboxConfigurationRootPath(p.ID()) _, err = os.Stat(sandboxDir) if err != nil { t.Fatal(err) @@ -1887,10 +1867,7 @@ func TestStatusContainerFailing(t *testing.T) { t.Fatal(err) } - pImpl, ok := p.(*Sandbox) - assert.True(t, ok) - - os.RemoveAll(pImpl.configPath) + store.DeleteAll() globalSandboxList.removeSandbox(p.ID()) _, err = StatusContainer(ctx, p.ID(), contID) @@ -1911,10 +1888,7 @@ func TestStatsContainerFailing(t *testing.T) { t.Fatal(err) } - pImpl, ok := p.(*Sandbox) - assert.True(t, ok) - - os.RemoveAll(pImpl.configPath) + store.DeleteAll() globalSandboxList.removeSandbox(p.ID()) _, err = StatsContainer(ctx, p.ID(), contID) @@ -1951,7 +1925,7 @@ func TestStatsContainer(t *testing.T) { pImpl, ok := p.(*Sandbox) assert.True(ok) - defer os.RemoveAll(pImpl.configPath) + defer store.DeleteAll() contConfig := newTestContainerConfigNoop(contID) _, c, err := CreateContainer(ctx, p.ID(), contConfig) @@ -1997,7 +1971,7 @@ func TestProcessListContainer(t *testing.T) { pImpl, ok := p.(*Sandbox) assert.True(ok) - defer os.RemoveAll(pImpl.configPath) + defer store.DeleteAll() contConfig := newTestContainerConfigNoop(contID) _, c, err := CreateContainer(ctx, p.ID(), contConfig) @@ -2087,7 +2061,7 @@ func createAndStartSandbox(ctx context.Context, config SandboxConfig) (sandbox V return nil, "", err } - sandboxDir = filepath.Join(configStoragePath, sandbox.ID()) + sandboxDir = store.SandboxConfigurationRootPath(sandbox.ID()) _, err = os.Stat(sandboxDir) if err != nil { return nil, "", err diff --git a/virtcontainers/container.go b/virtcontainers/container.go index 5e1cb96575..07c65594c6 100644 --- a/virtcontainers/container.go +++ b/virtcontainers/container.go @@ -26,6 +26,7 @@ import ( "github.com/kata-containers/runtime/virtcontainers/device/config" "github.com/kata-containers/runtime/virtcontainers/device/manager" + "github.com/kata-containers/runtime/virtcontainers/store" ) // https://github.com/torvalds/linux/blob/master/include/uapi/linux/major.h @@ -285,6 +286,8 @@ type Container struct { systemMountsInfo SystemMountsInfo ctx context.Context + + store *store.VCStore } // ID returns the container identifier string. @@ -343,23 +346,13 @@ func (c *Container) SetPid(pid int) error { func (c *Container) setStateBlockIndex(index int) error { c.state.BlockIndex = index - err := c.sandbox.storage.storeContainerResource(c.sandbox.id, c.id, stateFileType, c.state) - if err != nil { - return err - } - - return nil + return c.storeState() } func (c *Container) setStateFstype(fstype string) error { c.state.Fstype = fstype - err := c.sandbox.storage.storeContainerResource(c.sandbox.id, c.id, stateFileType, c.state) - if err != nil { - return err - } - - return nil + return c.storeState() } // GetAnnotations returns container's annotations @@ -367,35 +360,44 @@ func (c *Container) GetAnnotations() map[string]string { return c.config.Annotations } +// storeContainer stores a container config. +func (c *Container) storeContainer() error { + return c.store.Store(store.Configuration, *(c.config)) +} + func (c *Container) storeProcess() error { - return c.sandbox.storage.storeContainerProcess(c.sandboxID, c.id, c.process) + return c.store.Store(store.Process, c.process) } func (c *Container) storeMounts() error { - return c.sandbox.storage.storeContainerMounts(c.sandboxID, c.id, c.mounts) + return c.store.Store(store.Mounts, c.mounts) } -func (c *Container) fetchMounts() ([]Mount, error) { - return c.sandbox.storage.fetchContainerMounts(c.sandboxID, c.id) +func (c *Container) storeDevices() error { + return c.store.Store(store.DeviceIDs, c.devices) } -func (c *Container) storeDevices() error { - return c.sandbox.storage.storeContainerDevices(c.sandboxID, c.id, c.devices) +func (c *Container) storeState() error { + return c.store.Store(store.State, c.state) } -func (c *Container) fetchDevices() ([]ContainerDevice, error) { - return c.sandbox.storage.fetchContainerDevices(c.sandboxID, c.id) +func (c *Container) loadMounts() ([]Mount, error) { + var mounts []Mount + if err := c.store.Load(store.Mounts, &mounts); err != nil { + return []Mount{}, err + } + + return mounts, nil } -// storeContainer stores a container config. -func (c *Container) storeContainer() error { - fs := filesystem{} - err := fs.storeContainerResource(c.sandbox.id, c.id, configFileType, *(c.config)) - if err != nil { - return err +func (c *Container) loadDevices() ([]ContainerDevice, error) { + var devices []ContainerDevice + + if err := c.store.Load(store.DeviceIDs, &devices); err != nil { + return []ContainerDevice{}, err } - return nil + return devices, nil } // setContainerState sets both the in-memory and on-disk state of the @@ -410,7 +412,7 @@ func (c *Container) setContainerState(state types.StateString) error { c.state.State = state // update on-disk state - err := c.sandbox.storage.storeContainerResource(c.sandbox.id, c.id, stateFileType, c.state) + err := c.store.Store(store.State, c.state) if err != nil { return err } @@ -418,21 +420,6 @@ func (c *Container) setContainerState(state types.StateString) error { return nil } -func (c *Container) createContainersDirs() error { - err := os.MkdirAll(c.runPath, dirMode) - if err != nil { - return err - } - - err = os.MkdirAll(c.configPath, dirMode) - if err != nil { - c.sandbox.storage.deleteContainerResources(c.sandboxID, c.id, nil) - return err - } - - return nil -} - func (c *Container) shareFiles(m Mount, idx int, hostSharedDir, guestSharedDir string) (string, bool, error) { randBytes, err := utils.GenerateRandomBytes(8) if err != nil { @@ -612,8 +599,8 @@ func newContainer(sandbox *Sandbox, contConfig ContainerConfig) (*Container, err rootFs: contConfig.RootFs, config: &contConfig, sandbox: sandbox, - runPath: filepath.Join(runStoragePath, sandbox.id, contConfig.ID), - configPath: filepath.Join(configStoragePath, sandbox.id, contConfig.ID), + runPath: store.ContainerRuntimeRootPath(sandbox.id, contConfig.ID), + configPath: store.ContainerConfigurationRootPath(sandbox.id, contConfig.ID), containerPath: filepath.Join(sandbox.id, contConfig.ID), state: types.State{}, process: Process{}, @@ -621,17 +608,24 @@ func newContainer(sandbox *Sandbox, contConfig ContainerConfig) (*Container, err ctx: sandbox.ctx, } - state, err := c.sandbox.storage.fetchContainerState(c.sandboxID, c.id) + ctrStore, err := store.NewVCContainerStore(sandbox.ctx, c.sandboxID, c.id) + if err != nil { + return nil, err + } + + c.store = ctrStore + + state, err := c.store.LoadState() if err == nil { c.state = state } - process, err := c.sandbox.storage.fetchContainerProcess(c.sandboxID, c.id) - if err == nil { + var process Process + if err := c.store.Load(store.Process, &process); err == nil { c.process = process } - mounts, err := c.fetchMounts() + mounts, err := c.loadMounts() if err == nil { // restore mounts from disk c.mounts = mounts @@ -671,8 +665,8 @@ func newContainer(sandbox *Sandbox, contConfig ContainerConfig) (*Container, err } // Devices will be found in storage after create stage has completed. - // We fetch devices from storage at all other stages. - storedDevices, err := c.fetchDevices() + // We load devices from storage at all other stages. + storedDevices, err := c.loadDevices() if err == nil { c.devices = storedDevices } else { @@ -724,11 +718,6 @@ func (c *Container) checkBlockDeviceSupport() bool { // createContainer creates and start a container inside a Sandbox. It has to be // called only when a new container, not known by the sandbox, has to be created. func (c *Container) create() (err error) { - - if err = c.createContainersDirs(); err != nil { - return - } - // In case the container creation fails, the following takes care // of rolling back all the actions previously performed. defer func() { @@ -791,7 +780,7 @@ func (c *Container) delete() error { return err } - return c.sandbox.storage.deleteContainerResources(c.sandboxID, c.id, nil) + return c.store.Delete() } // checkSandboxRunning validates the container state. diff --git a/virtcontainers/container_test.go b/virtcontainers/container_test.go index 1a0bd7a4fb..e97d699215 100644 --- a/virtcontainers/container_test.go +++ b/virtcontainers/container_test.go @@ -20,6 +20,7 @@ import ( "github.com/kata-containers/runtime/virtcontainers/device/config" "github.com/kata-containers/runtime/virtcontainers/device/drivers" "github.com/kata-containers/runtime/virtcontainers/device/manager" + "github.com/kata-containers/runtime/virtcontainers/store" "github.com/kata-containers/runtime/virtcontainers/types" "github.com/stretchr/testify/assert" ) @@ -89,18 +90,23 @@ func TestContainerSandbox(t *testing.T) { func TestContainerRemoveDrive(t *testing.T) { sandbox := &Sandbox{ + ctx: context.Background(), id: "sandbox", devManager: manager.NewDeviceManager(manager.VirtioSCSI, nil), - storage: &filesystem{}, } + vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id) + assert.Nil(t, err) + + sandbox.store = vcStore + container := Container{ sandbox: sandbox, id: "testContainer", } container.state.Fstype = "" - err := container.removeDrive() + err = container.removeDrive() // hotplugRemoveDevice for hypervisor should not be called. // test should pass without a hypervisor created for the container's sandbox. @@ -121,11 +127,6 @@ func TestContainerRemoveDrive(t *testing.T) { assert.True(t, ok) err = device.Attach(devReceiver) assert.Nil(t, err) - err = sandbox.storage.createAllResources(context.Background(), sandbox) - if err != nil { - t.Fatal(err) - } - err = sandbox.storeSandboxDevices() assert.Nil(t, err) @@ -170,7 +171,7 @@ func testSetupFakeRootfs(t *testing.T) (testRawFile, loopDev, mntDir string, err } mntDir = filepath.Join(tmpDir, "rootfs") - err = os.Mkdir(mntDir, dirMode) + err = os.Mkdir(mntDir, store.DirMode) if err != nil { t.Fatalf("Error creating dir %s: %s", mntDir, err) } @@ -212,11 +213,10 @@ func TestContainerAddDriveDir(t *testing.T) { t.Fatalf("Error while setting up fake rootfs: %v, Skipping test", err) } - fs := &filesystem{} sandbox := &Sandbox{ + ctx: context.Background(), id: testSandboxID, devManager: manager.NewDeviceManager(manager.VirtioSCSI, nil), - storage: fs, hypervisor: &mockHypervisor{}, agent: &noopAgent{}, config: &SandboxConfig{ @@ -226,6 +226,12 @@ func TestContainerAddDriveDir(t *testing.T) { }, } + defer store.DeleteAll() + + sandboxStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id) + assert.Nil(t, err) + sandbox.store = sandboxStore + contID := "100" container := Container{ sandbox: sandbox, @@ -233,23 +239,19 @@ func TestContainerAddDriveDir(t *testing.T) { rootFs: fakeRootfs, } - // create state file - path := filepath.Join(runStoragePath, testSandboxID, container.ID()) - err = os.MkdirAll(path, dirMode) - if err != nil { - t.Fatal(err) - } - - defer os.RemoveAll(path) + containerStore, err := store.NewVCContainerStore(sandbox.ctx, sandbox.id, container.id) + assert.Nil(t, err) + container.store = containerStore - stateFilePath := filepath.Join(path, stateFile) + // create state file + path := store.ContainerRuntimeRootPath(testSandboxID, container.ID()) + stateFilePath := filepath.Join(path, store.StateFile) os.Remove(stateFilePath) _, err = os.Create(stateFilePath) if err != nil { t.Fatal(err) } - defer os.Remove(stateFilePath) // Make the checkStorageDriver func variable point to a fake check function savedFunc := checkStorageDriver diff --git a/virtcontainers/factory/template/template.go b/virtcontainers/factory/template/template.go index 979e91dc66..968b61c28c 100644 --- a/virtcontainers/factory/template/template.go +++ b/virtcontainers/factory/template/template.go @@ -16,6 +16,7 @@ import ( vc "github.com/kata-containers/runtime/virtcontainers" "github.com/kata-containers/runtime/virtcontainers/factory/base" "github.com/kata-containers/runtime/virtcontainers/factory/direct" + "github.com/kata-containers/runtime/virtcontainers/store" ) type template struct { @@ -29,7 +30,7 @@ var templateWaitForAgent = 2 * time.Second // Fetch finds and returns a pre-built template factory. // TODO: save template metadata and fetch from storage. func Fetch(config vc.VMConfig) (base.FactoryBase, error) { - statePath := vc.RunVMStoragePath + "/template" + statePath := store.RunVMStoragePath + "/template" t := &template{statePath, config} err := t.checkTemplateVM() @@ -42,7 +43,7 @@ func Fetch(config vc.VMConfig) (base.FactoryBase, error) { // New creates a new VM template factory. func New(ctx context.Context, config vc.VMConfig) base.FactoryBase { - statePath := vc.RunVMStoragePath + "/template" + statePath := store.RunVMStoragePath + "/template" t := &template{statePath, config} err := t.prepareTemplateFiles() diff --git a/virtcontainers/fc.go b/virtcontainers/fc.go index 203add79de..9111fff653 100644 --- a/virtcontainers/fc.go +++ b/virtcontainers/fc.go @@ -8,6 +8,7 @@ package virtcontainers import ( "context" "fmt" + "net/url" "os/exec" "path/filepath" @@ -21,6 +22,7 @@ import ( "github.com/sirupsen/logrus" "github.com/kata-containers/runtime/virtcontainers/device/config" + "github.com/kata-containers/runtime/virtcontainers/store" "github.com/kata-containers/runtime/virtcontainers/types" "net" @@ -97,7 +99,7 @@ type firecracker struct { fcClient *client.Firecracker //Tracks the current active connection socketPath string - storage resourceStorage + store *store.VCStore config HypervisorConfig pendingDevices []firecrackerDevice // Devices to be added when the FC API is ready ctx context.Context @@ -129,7 +131,7 @@ func (fc *firecracker) trace(name string) (opentracing.Span, context.Context) { // For firecracker this call only sets the internal structure up. // The sandbox will be created and started through startSandbox(). -func (fc *firecracker) createSandbox(ctx context.Context, id string, hypervisorConfig *HypervisorConfig, storage resourceStorage) error { +func (fc *firecracker) createSandbox(ctx context.Context, id string, hypervisorConfig *HypervisorConfig, vcStore *store.VCStore) error { fc.ctx = ctx span, _ := fc.trace("createSandbox") @@ -138,14 +140,14 @@ func (fc *firecracker) createSandbox(ctx context.Context, id string, hypervisorC //TODO: check validity of the hypervisor config provided //https://github.com/kata-containers/runtime/issues/1065 fc.id = id - fc.socketPath = filepath.Join(runStoragePath, fc.id, fireSocket) - fc.storage = storage + fc.socketPath = filepath.Join(store.SandboxRuntimeRootPath(fc.id), fireSocket) + fc.store = vcStore fc.config = *hypervisorConfig fc.state.set(notReady) // No need to return an error from there since there might be nothing // to fetch if this is the first time the hypervisor is created. - if err := fc.storage.fetchHypervisorState(fc.id, &fc.info); err != nil { + if err := fc.store.Load(store.Hypervisor, &fc.info); err != nil { fc.Logger().WithField("function", "init").WithError(err).Info("No info could be fetched") } @@ -246,7 +248,7 @@ func (fc *firecracker) fcInit(timeout int) error { fc.state.set(apiReady) // Store VMM information - return fc.storage.storeHypervisorState(fc.id, fc.info) + return fc.store.Store(store.Hypervisor, fc.info) } func (fc *firecracker) client() *client.Firecracker { @@ -395,7 +397,13 @@ func (fc *firecracker) createDiskPool() error { isRootDevice := false // Create a temporary file as a placeholder backend for the drive - hostPath, err := fc.storage.createSandboxTempFile(fc.id) + hostURL, err := fc.store.Raw("") + if err != nil { + return err + } + + // We get a full URL from Raw(), we need to parse it. + u, err := url.Parse(hostURL) if err != nil { return err } @@ -404,7 +412,7 @@ func (fc *firecracker) createDiskPool() error { DriveID: &driveID, IsReadOnly: &isReadOnly, IsRootDevice: &isRootDevice, - PathOnHost: &hostPath, + PathOnHost: &u.Path, } driveParams.SetBody(drive) _, err = fc.client().Operations.PutGuestDriveByID(driveParams) diff --git a/virtcontainers/filesystem_resource_storage_test.go b/virtcontainers/filesystem_resource_storage_test.go index 0090f00d8b..cbecc62971 100644 --- a/virtcontainers/filesystem_resource_storage_test.go +++ b/virtcontainers/filesystem_resource_storage_test.go @@ -34,7 +34,6 @@ func TestFilesystemCreateAllResourcesSuccessful(t *testing.T) { sandbox := &Sandbox{ ctx: context.Background(), id: testSandboxID, - storage: fs, config: sandboxConfig, devManager: manager.NewDeviceManager(manager.VirtioBlock, nil), containers: map[string]*Container{}, diff --git a/virtcontainers/hyperstart_agent.go b/virtcontainers/hyperstart_agent.go index 8a0693a65a..da6b9ecb69 100644 --- a/virtcontainers/hyperstart_agent.go +++ b/virtcontainers/hyperstart_agent.go @@ -23,6 +23,7 @@ import ( "github.com/kata-containers/runtime/virtcontainers/pkg/hyperstart" ns "github.com/kata-containers/runtime/virtcontainers/pkg/nsenter" vcTypes "github.com/kata-containers/runtime/virtcontainers/pkg/types" + "github.com/kata-containers/runtime/virtcontainers/store" "github.com/kata-containers/runtime/virtcontainers/types" "github.com/kata-containers/runtime/virtcontainers/utils" specs "github.com/opencontainers/runtime-spec/specs-go" @@ -46,8 +47,8 @@ type HyperConfig struct { func (h *hyper) generateSockets(sandbox *Sandbox, c HyperConfig) { sandboxSocketPaths := []string{ - fmt.Sprintf(defaultSockPathTemplates[0], runStoragePath, sandbox.id), - fmt.Sprintf(defaultSockPathTemplates[1], runStoragePath, sandbox.id), + fmt.Sprintf(defaultSockPathTemplates[0], store.RunStoragePath, sandbox.id), + fmt.Sprintf(defaultSockPathTemplates[1], store.RunStoragePath, sandbox.id), } if c.SockCtlName != "" { @@ -289,7 +290,7 @@ func (h *hyper) init(ctx context.Context, sandbox *Sandbox, config interface{}) } // Fetch agent runtime info. - if err := sandbox.storage.fetchAgentState(sandbox.id, &h.state); err != nil { + if err := sandbox.store.Load(store.Agent, &h.state); err != nil { h.Logger().Debug("Could not retrieve anything from storage") } @@ -297,7 +298,7 @@ func (h *hyper) init(ctx context.Context, sandbox *Sandbox, config interface{}) } func (h *hyper) getVMPath(id string) string { - return filepath.Join(runStoragePath, id) + return store.SandboxRuntimeRootPath(id) } func (h *hyper) getSharePath(id string) string { @@ -319,7 +320,7 @@ func (h *hyper) configure(hv hypervisor, id, sharePath string, builtin bool, con HostPath: sharePath, } - if err := os.MkdirAll(sharedVolume.HostPath, dirMode); err != nil { + if err := os.MkdirAll(sharedVolume.HostPath, store.DirMode); err != nil { return err } @@ -487,7 +488,7 @@ func (h *hyper) stopSandbox(sandbox *Sandbox) error { h.state.ProxyPid = -1 h.state.URL = "" - if err := sandbox.storage.storeAgentState(sandbox.id, h.state); err != nil { + if err := sandbox.store.Store(store.Agent, h.state); err != nil { // ignore error h.Logger().WithError(err).WithField("sandbox", sandbox.id).Error("failed to clean up agent state") } @@ -990,7 +991,7 @@ func (h *hyper) setProxy(sandbox *Sandbox, proxy proxy, pid int, url string) err h.state.ProxyPid = pid h.state.URL = url if sandbox != nil { - if err := sandbox.storage.storeAgentState(sandbox.id, h.state); err != nil { + if err := sandbox.store.Store(store.Agent, h.state); err != nil { return err } } diff --git a/virtcontainers/hyperstart_agent_test.go b/virtcontainers/hyperstart_agent_test.go index 06c7a782f9..69d75e3b73 100644 --- a/virtcontainers/hyperstart_agent_test.go +++ b/virtcontainers/hyperstart_agent_test.go @@ -14,6 +14,7 @@ import ( "testing" "github.com/kata-containers/runtime/virtcontainers/pkg/hyperstart" + "github.com/kata-containers/runtime/virtcontainers/store" "github.com/kata-containers/runtime/virtcontainers/types" "github.com/stretchr/testify/assert" "github.com/vishvananda/netlink" @@ -73,13 +74,13 @@ func TestHyperstartGenerateSocketsSuccessfulNoPathProvided(t *testing.T) { { DeviceID: fmt.Sprintf(defaultDeviceIDTemplate, 0), ID: fmt.Sprintf(defaultIDTemplate, 0), - HostPath: fmt.Sprintf(defaultSockPathTemplates[0], runStoragePath, sandbox.id), + HostPath: fmt.Sprintf(defaultSockPathTemplates[0], store.RunStoragePath, sandbox.id), Name: fmt.Sprintf(defaultChannelTemplate, 0), }, { DeviceID: fmt.Sprintf(defaultDeviceIDTemplate, 1), ID: fmt.Sprintf(defaultIDTemplate, 1), - HostPath: fmt.Sprintf(defaultSockPathTemplates[1], runStoragePath, sandbox.id), + HostPath: fmt.Sprintf(defaultSockPathTemplates[1], store.RunStoragePath, sandbox.id), Name: fmt.Sprintf(defaultChannelTemplate, 1), }, } @@ -247,13 +248,15 @@ func TestHyperSetProxy(t *testing.T) { h := &hyper{} p := &ccProxy{} s := &Sandbox{ - storage: &filesystem{ctx: context.Background()}, + ctx: context.Background(), } - err := h.setProxy(s, p, 0, "") - assert.Error(err) + vcStore, err := store.NewVCSandboxStore(s.ctx, "foobar") + assert.Nil(err) + + s.store = vcStore - err = h.setProxy(s, p, 0, "foobar") + err = h.setProxy(s, p, 0, "") assert.Error(err) } diff --git a/virtcontainers/hypervisor.go b/virtcontainers/hypervisor.go index 23cf82343d..dd37f8706d 100644 --- a/virtcontainers/hypervisor.go +++ b/virtcontainers/hypervisor.go @@ -15,6 +15,7 @@ import ( "strings" "github.com/kata-containers/runtime/virtcontainers/device/config" + "github.com/kata-containers/runtime/virtcontainers/store" "github.com/kata-containers/runtime/virtcontainers/types" ) @@ -591,7 +592,7 @@ func RunningOnVMM(cpuInfoPath string) (bool, error) { // hypervisor is the virtcontainers hypervisor interface. // The default hypervisor implementation is Qemu. type hypervisor interface { - createSandbox(ctx context.Context, id string, hypervisorConfig *HypervisorConfig, storage resourceStorage) error + createSandbox(ctx context.Context, id string, hypervisorConfig *HypervisorConfig, store *store.VCStore) error startSandbox(timeout int) error stopSandbox() error pauseSandbox() error diff --git a/virtcontainers/kata_agent.go b/virtcontainers/kata_agent.go index 3f725ac87b..dcf4735d47 100644 --- a/virtcontainers/kata_agent.go +++ b/virtcontainers/kata_agent.go @@ -27,6 +27,7 @@ import ( ns "github.com/kata-containers/runtime/virtcontainers/pkg/nsenter" vcTypes "github.com/kata-containers/runtime/virtcontainers/pkg/types" "github.com/kata-containers/runtime/virtcontainers/pkg/uuid" + "github.com/kata-containers/runtime/virtcontainers/store" "github.com/kata-containers/runtime/virtcontainers/types" "github.com/kata-containers/runtime/virtcontainers/utils" opentracing "github.com/opentracing/opentracing-go" @@ -131,7 +132,7 @@ func (k *kataAgent) Logger() *logrus.Entry { } func (k *kataAgent) getVMPath(id string) string { - return filepath.Join(RunVMStoragePath, id) + return filepath.Join(store.RunVMStoragePath, id) } func (k *kataAgent) getSharePath(id string) string { @@ -193,7 +194,7 @@ func (k *kataAgent) init(ctx context.Context, sandbox *Sandbox, config interface k.proxyBuiltIn = isProxyBuiltIn(sandbox.config.ProxyType) // Fetch agent runtime info. - if err := sandbox.storage.fetchAgentState(sandbox.id, &k.state); err != nil { + if err := sandbox.store.Load(store.Agent, &k.state); err != nil { k.Logger().Debug("Could not retrieve anything from storage") } @@ -272,7 +273,7 @@ func (k *kataAgent) configure(h hypervisor, id, sharePath string, builtin bool, HostPath: sharePath, } - if err := os.MkdirAll(sharedVolume.HostPath, dirMode); err != nil { + if err := os.MkdirAll(sharedVolume.HostPath, store.DirMode); err != nil { return err } @@ -578,7 +579,7 @@ func (k *kataAgent) setProxy(sandbox *Sandbox, proxy proxy, pid int, url string) k.state.ProxyPid = pid k.state.URL = url if sandbox != nil { - if err := sandbox.storage.storeAgentState(sandbox.id, k.state); err != nil { + if err := sandbox.store.Store(store.Agent, k.state); err != nil { return err } } @@ -696,7 +697,7 @@ func (k *kataAgent) stopSandbox(sandbox *Sandbox) error { // clean up agent state k.state.ProxyPid = -1 k.state.URL = "" - if err := sandbox.storage.storeAgentState(sandbox.id, k.state); err != nil { + if err := sandbox.store.Store(store.Agent, k.state); err != nil { // ignore error k.Logger().WithError(err).WithField("sandbox", sandbox.id).Error("failed to clean up agent state") } @@ -1799,7 +1800,7 @@ func (k *kataAgent) copyFile(src, dst string) error { cpReq := &grpc.CopyFileRequest{ Path: dst, - DirMode: uint32(dirMode), + DirMode: uint32(store.DirMode), FileMode: st.Mode, FileSize: fileSize, Uid: int32(st.Uid), diff --git a/virtcontainers/kata_agent_test.go b/virtcontainers/kata_agent_test.go index 85c8493612..5a55cbd7e9 100644 --- a/virtcontainers/kata_agent_test.go +++ b/virtcontainers/kata_agent_test.go @@ -6,6 +6,7 @@ package virtcontainers import ( + "context" "encoding/json" "fmt" "io/ioutil" @@ -20,7 +21,6 @@ import ( gpb "github.com/gogo/protobuf/types" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/stretchr/testify/assert" - "golang.org/x/net/context" "google.golang.org/grpc" aTypes "github.com/kata-containers/agent/pkg/types" @@ -32,6 +32,7 @@ import ( vcAnnotations "github.com/kata-containers/runtime/virtcontainers/pkg/annotations" "github.com/kata-containers/runtime/virtcontainers/pkg/mock" vcTypes "github.com/kata-containers/runtime/virtcontainers/pkg/types" + "github.com/kata-containers/runtime/virtcontainers/store" "github.com/kata-containers/runtime/virtcontainers/types" ) @@ -736,9 +737,13 @@ func TestAgentCreateContainer(t *testing.T) { }, }, hypervisor: &mockHypervisor{}, - storage: &filesystem{}, } + vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id) + assert.Nil(err) + + sandbox.store = vcStore + container := &Container{ ctx: sandbox.ctx, id: "barfoo", @@ -839,15 +844,15 @@ func TestKataAgentSetProxy(t *testing.T) { p := &kataBuiltInProxy{} s := &Sandbox{ ctx: context.Background(), - storage: &filesystem{ - ctx: context.Background(), - }, + id: "foobar", } - err := k.setProxy(s, p, 0, "") - assert.Error(err) + vcStore, err := store.NewVCSandboxStore(s.ctx, s.id) + assert.Nil(err) + + s.store = vcStore - err = k.setProxy(s, p, 0, "foobar") + err = k.setProxy(s, p, 0, "") assert.Error(err) } diff --git a/virtcontainers/mock_hypervisor.go b/virtcontainers/mock_hypervisor.go index 1146c6f746..37dccaf901 100644 --- a/virtcontainers/mock_hypervisor.go +++ b/virtcontainers/mock_hypervisor.go @@ -9,6 +9,7 @@ import ( "context" "os" + "github.com/kata-containers/runtime/virtcontainers/store" "github.com/kata-containers/runtime/virtcontainers/types" ) @@ -23,7 +24,7 @@ func (m *mockHypervisor) hypervisorConfig() HypervisorConfig { return HypervisorConfig{} } -func (m *mockHypervisor) createSandbox(ctx context.Context, id string, hypervisorConfig *HypervisorConfig, storage resourceStorage) error { +func (m *mockHypervisor) createSandbox(ctx context.Context, id string, hypervisorConfig *HypervisorConfig, store *store.VCStore) error { err := hypervisorConfig.valid() if err != nil { return err diff --git a/virtcontainers/mock_hypervisor_test.go b/virtcontainers/mock_hypervisor_test.go index 75fd59c801..34885c45f9 100644 --- a/virtcontainers/mock_hypervisor_test.go +++ b/virtcontainers/mock_hypervisor_test.go @@ -23,13 +23,12 @@ func TestMockHypervisorCreateSandbox(t *testing.T) { HypervisorPath: "", }, }, - storage: &filesystem{}, } ctx := context.Background() // wrong config - if err := m.createSandbox(ctx, sandbox.config.ID, &sandbox.config.HypervisorConfig, sandbox.storage); err == nil { + if err := m.createSandbox(ctx, sandbox.config.ID, &sandbox.config.HypervisorConfig, nil); err == nil { t.Fatal() } @@ -39,7 +38,7 @@ func TestMockHypervisorCreateSandbox(t *testing.T) { HypervisorPath: fmt.Sprintf("%s/%s", testDir, testHypervisor), } - if err := m.createSandbox(ctx, sandbox.config.ID, &sandbox.config.HypervisorConfig, sandbox.storage); err != nil { + if err := m.createSandbox(ctx, sandbox.config.ID, &sandbox.config.HypervisorConfig, nil); err != nil { t.Fatal(err) } } diff --git a/virtcontainers/noop_agent_test.go b/virtcontainers/noop_agent_test.go index 9018c700d7..ad471f4647 100644 --- a/virtcontainers/noop_agent_test.go +++ b/virtcontainers/noop_agent_test.go @@ -15,6 +15,8 @@ import ( ) func testCreateNoopContainer() (*Sandbox, *Container, error) { + cleanUp() + contID := "100" config := newTestSandboxConfigNoop() diff --git a/virtcontainers/proxy.go b/virtcontainers/proxy.go index 050980c3ea..2d79847af3 100644 --- a/virtcontainers/proxy.go +++ b/virtcontainers/proxy.go @@ -9,6 +9,7 @@ import ( "fmt" "path/filepath" + "github.com/kata-containers/runtime/virtcontainers/store" "github.com/sirupsen/logrus" ) @@ -146,7 +147,7 @@ func validateProxyConfig(proxyConfig ProxyConfig) error { func defaultProxyURL(id, socketType string) (string, error) { switch socketType { case SocketTypeUNIX: - socketPath := filepath.Join(runStoragePath, id, "proxy.sock") + socketPath := filepath.Join(store.SandboxRuntimeRootPath(id), "proxy.sock") return fmt.Sprintf("unix://%s", socketPath), nil case SocketTypeVSOCK: // TODO Build the VSOCK default URL diff --git a/virtcontainers/proxy_test.go b/virtcontainers/proxy_test.go index 4a396e9e9c..21cbf7d0d9 100644 --- a/virtcontainers/proxy_test.go +++ b/virtcontainers/proxy_test.go @@ -13,6 +13,7 @@ import ( "reflect" "testing" + "github.com/kata-containers/runtime/virtcontainers/store" "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" ) @@ -222,7 +223,7 @@ func testDefaultProxyURL(expectedURL string, socketType string, sandboxID string } func TestDefaultProxyURLUnix(t *testing.T) { - path := filepath.Join(runStoragePath, sandboxID, "proxy.sock") + path := filepath.Join(store.SandboxRuntimeRootPath(sandboxID), "proxy.sock") socketPath := fmt.Sprintf("unix://%s", path) if err := testDefaultProxyURL(socketPath, SocketTypeUNIX, sandboxID); err != nil { @@ -237,7 +238,7 @@ func TestDefaultProxyURLVSock(t *testing.T) { } func TestDefaultProxyURLUnknown(t *testing.T) { - path := filepath.Join(runStoragePath, sandboxID, "proxy.sock") + path := filepath.Join(store.SandboxRuntimeRootPath(sandboxID), "proxy.sock") socketPath := fmt.Sprintf("unix://%s", path) if err := testDefaultProxyURL(socketPath, "foobar", sandboxID); err == nil { @@ -261,7 +262,7 @@ func testProxyStart(t *testing.T, agent agent, proxy proxy) { } invalidPath := filepath.Join(tmpdir, "enoent") - expectedSocketPath := filepath.Join(runStoragePath, testSandboxID, "proxy.sock") + expectedSocketPath := filepath.Join(store.SandboxRuntimeRootPath(testSandboxID), "proxy.sock") expectedURI := fmt.Sprintf("unix://%s", expectedSocketPath) data := []testData{ diff --git a/virtcontainers/qemu.go b/virtcontainers/qemu.go index 487f91ba67..2220108385 100644 --- a/virtcontainers/qemu.go +++ b/virtcontainers/qemu.go @@ -23,6 +23,7 @@ import ( "github.com/sirupsen/logrus" "github.com/kata-containers/runtime/virtcontainers/device/config" + "github.com/kata-containers/runtime/virtcontainers/store" "github.com/kata-containers/runtime/virtcontainers/types" "github.com/kata-containers/runtime/virtcontainers/utils" "golang.org/x/sys/unix" @@ -60,7 +61,7 @@ type QemuState struct { type qemu struct { id string - storage resourceStorage + store *store.VCStore config HypervisorConfig @@ -210,7 +211,7 @@ func (q *qemu) trace(name string) (opentracing.Span, context.Context) { } // setup sets the Qemu structure up. -func (q *qemu) setup(id string, hypervisorConfig *HypervisorConfig, storage resourceStorage) error { +func (q *qemu) setup(id string, hypervisorConfig *HypervisorConfig, vcStore *store.VCStore) error { span, _ := q.trace("setup") defer span.Finish() @@ -220,7 +221,7 @@ func (q *qemu) setup(id string, hypervisorConfig *HypervisorConfig, storage reso } q.id = id - q.storage = storage + q.store = vcStore q.config = *hypervisorConfig q.arch = newQemuArch(q.config) @@ -238,7 +239,7 @@ func (q *qemu) setup(id string, hypervisorConfig *HypervisorConfig, storage reso q.nvdimmCount = 0 } - if err = q.storage.fetchHypervisorState(q.id, &q.state); err != nil { + if err = q.store.Load(store.Hypervisor, &q.state); err != nil { q.Logger().Debug("Creating bridges") q.state.Bridges = q.arch.bridges(q.config.DefaultBridges) @@ -249,11 +250,11 @@ func (q *qemu) setup(id string, hypervisorConfig *HypervisorConfig, storage reso // The path might already exist, but in case of VM templating, // we have to create it since the sandbox has not created it yet. - if err = os.MkdirAll(filepath.Join(runStoragePath, id), dirMode); err != nil { + if err = os.MkdirAll(store.SandboxRuntimeRootPath(id), store.DirMode); err != nil { return err } - if err = q.storage.storeHypervisorState(q.id, q.state); err != nil { + if err = q.store.Store(store.Hypervisor, q.state); err != nil { return err } } @@ -308,7 +309,7 @@ func (q *qemu) memoryTopology() (govmmQemu.Memory, error) { } func (q *qemu) qmpSocketPath(id string) (string, error) { - return utils.BuildSocketPath(RunVMStoragePath, id, qmpSocket) + return utils.BuildSocketPath(store.RunVMStoragePath, id, qmpSocket) } func (q *qemu) getQemuMachine() (govmmQemu.Machine, error) { @@ -416,14 +417,14 @@ func (q *qemu) setupTemplate(knobs *govmmQemu.Knobs, memory *govmmQemu.Memory) g } // createSandbox is the Hypervisor sandbox creation implementation for govmmQemu. -func (q *qemu) createSandbox(ctx context.Context, id string, hypervisorConfig *HypervisorConfig, storage resourceStorage) error { +func (q *qemu) createSandbox(ctx context.Context, id string, hypervisorConfig *HypervisorConfig, store *store.VCStore) error { // Save the tracing context q.ctx = ctx span, _ := q.trace("createSandbox") defer span.Finish() - if err := q.setup(id, hypervisorConfig, storage); err != nil { + if err := q.setup(id, hypervisorConfig, store); err != nil { return err } @@ -560,8 +561,8 @@ func (q *qemu) startSandbox(timeout int) error { q.fds = []*os.File{} }() - vmPath := filepath.Join(RunVMStoragePath, q.id) - err := os.MkdirAll(vmPath, dirMode) + vmPath := filepath.Join(store.RunVMStoragePath, q.id) + err := os.MkdirAll(vmPath, store.DirMode) if err != nil { return err } @@ -655,7 +656,7 @@ func (q *qemu) stopSandbox() error { } // cleanup vm path - dir := filepath.Join(RunVMStoragePath, q.id) + dir := filepath.Join(store.RunVMStoragePath, q.id) // If it's a symlink, remove both dir and the target. // This can happen when vm template links a sandbox to a vm. @@ -1020,7 +1021,7 @@ func (q *qemu) hotplugAddDevice(devInfo interface{}, devType deviceType) (interf return data, err } - return data, q.storage.storeHypervisorState(q.id, q.state) + return data, q.store.Store(store.Hypervisor, q.state) } func (q *qemu) hotplugRemoveDevice(devInfo interface{}, devType deviceType) (interface{}, error) { @@ -1032,7 +1033,7 @@ func (q *qemu) hotplugRemoveDevice(devInfo interface{}, devType deviceType) (int return data, err } - return data, q.storage.storeHypervisorState(q.id, q.state) + return data, q.store.Store(store.Hypervisor, q.state) } func (q *qemu) hotplugCPUs(vcpus uint32, op operation) (uint32, error) { @@ -1112,12 +1113,12 @@ func (q *qemu) hotplugAddCPUs(amount uint32) (uint32, error) { hotpluggedVCPUs++ if hotpluggedVCPUs == amount { // All vCPUs were hotplugged - return amount, q.storage.storeHypervisorState(q.id, q.state) + return amount, q.store.Store(store.Hypervisor, q.state) } } // All vCPUs were NOT hotplugged - if err := q.storage.storeHypervisorState(q.id, q.state); err != nil { + if err := q.store.Store(store.Hypervisor, q.state); err != nil { q.Logger().Errorf("failed to save hypervisor state after hotplug %d vCPUs: %v", hotpluggedVCPUs, err) } @@ -1137,7 +1138,7 @@ func (q *qemu) hotplugRemoveCPUs(amount uint32) (uint32, error) { // get the last vCPUs and try to remove it cpu := q.state.HotpluggedVCPUs[len(q.state.HotpluggedVCPUs)-1] if err := q.qmpMonitorCh.qmp.ExecuteDeviceDel(q.qmpMonitorCh.ctx, cpu.ID); err != nil { - _ = q.storage.storeHypervisorState(q.id, q.state) + _ = q.store.Store(store.Hypervisor, q.state) return i, fmt.Errorf("failed to hotunplug CPUs, only %d CPUs were hotunplugged: %v", i, err) } @@ -1145,7 +1146,7 @@ func (q *qemu) hotplugRemoveCPUs(amount uint32) (uint32, error) { q.state.HotpluggedVCPUs = q.state.HotpluggedVCPUs[:len(q.state.HotpluggedVCPUs)-1] } - return amount, q.storage.storeHypervisorState(q.id, q.state) + return amount, q.store.Store(store.Hypervisor, q.state) } func (q *qemu) hotplugMemory(memDev *memoryDevice, op operation) (int, error) { @@ -1217,7 +1218,7 @@ func (q *qemu) hotplugAddMemory(memDev *memoryDevice) (int, error) { } q.state.HotpluggedMemory += memDev.sizeMB - return memDev.sizeMB, q.storage.storeHypervisorState(q.id, q.state) + return memDev.sizeMB, q.store.Store(store.Hypervisor, q.state) } func (q *qemu) pauseSandbox() error { @@ -1269,7 +1270,7 @@ func (q *qemu) getSandboxConsole(id string) (string, error) { span, _ := q.trace("getSandboxConsole") defer span.Finish() - return utils.BuildSocketPath(RunVMStoragePath, id, consoleSocket) + return utils.BuildSocketPath(store.RunVMStoragePath, id, consoleSocket) } func (q *qemu) saveSandbox() error { diff --git a/virtcontainers/qemu_arch_base_test.go b/virtcontainers/qemu_arch_base_test.go index 7dac7dc59f..2a5f551afe 100644 --- a/virtcontainers/qemu_arch_base_test.go +++ b/virtcontainers/qemu_arch_base_test.go @@ -16,6 +16,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/kata-containers/runtime/virtcontainers/device/config" + "github.com/kata-containers/runtime/virtcontainers/store" "github.com/kata-containers/runtime/virtcontainers/types" ) @@ -226,7 +227,7 @@ func TestQemuArchBaseAppendConsoles(t *testing.T) { assert := assert.New(t) qemuArchBase := newQemuArchBase() - path := filepath.Join(runStoragePath, sandboxID, consoleSocket) + path := filepath.Join(store.SandboxRuntimeRootPath(sandboxID), consoleSocket) expectedOut := []govmmQemu.Device{ govmmQemu.SerialDevice{ diff --git a/virtcontainers/qemu_test.go b/virtcontainers/qemu_test.go index 7b53371502..6c79a9297a 100644 --- a/virtcontainers/qemu_test.go +++ b/virtcontainers/qemu_test.go @@ -15,6 +15,7 @@ import ( "testing" govmmQemu "github.com/intel/govmm/qemu" + "github.com/kata-containers/runtime/virtcontainers/store" "github.com/kata-containers/runtime/virtcontainers/types" "github.com/stretchr/testify/assert" ) @@ -75,27 +76,33 @@ func TestQemuCreateSandbox(t *testing.T) { q := &qemu{} sandbox := &Sandbox{ - id: "testSandbox", - storage: &filesystem{}, + ctx: context.Background(), + id: "testSandbox", config: &SandboxConfig{ HypervisorConfig: qemuConfig, }, } + vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id) + if err != nil { + t.Fatal(err) + } + sandbox.store = vcStore + // Create the hypervisor fake binary testQemuPath := filepath.Join(testDir, testHypervisor) - _, err := os.Create(testQemuPath) + _, err = os.Create(testQemuPath) if err != nil { t.Fatalf("Could not create hypervisor file %s: %v", testQemuPath, err) } // Create parent dir path for hypervisor.json - parentDir := filepath.Join(runStoragePath, sandbox.id) - if err := os.MkdirAll(parentDir, dirMode); err != nil { + parentDir := store.SandboxConfigurationRootPath(sandbox.id) + if err := os.MkdirAll(parentDir, store.DirMode); err != nil { t.Fatalf("Could not create parent directory %s: %v", parentDir, err) } - if err := q.createSandbox(context.Background(), sandbox.id, &sandbox.config.HypervisorConfig, sandbox.storage); err != nil { + if err := q.createSandbox(context.Background(), sandbox.id, &sandbox.config.HypervisorConfig, sandbox.store); err != nil { t.Fatal(err) } @@ -113,27 +120,33 @@ func TestQemuCreateSandboxMissingParentDirFail(t *testing.T) { q := &qemu{} sandbox := &Sandbox{ - id: "testSandbox", - storage: &filesystem{}, + ctx: context.Background(), + id: "testSandbox", config: &SandboxConfig{ HypervisorConfig: qemuConfig, }, } + vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id) + if err != nil { + t.Fatal(err) + } + sandbox.store = vcStore + // Create the hypervisor fake binary testQemuPath := filepath.Join(testDir, testHypervisor) - _, err := os.Create(testQemuPath) + _, err = os.Create(testQemuPath) if err != nil { t.Fatalf("Could not create hypervisor file %s: %v", testQemuPath, err) } // Ensure parent dir path for hypervisor.json does not exist. - parentDir := filepath.Join(runStoragePath, sandbox.id) + parentDir := store.SandboxConfigurationRootPath(sandbox.id) if err := os.RemoveAll(parentDir); err != nil { t.Fatal(err) } - if err := q.createSandbox(context.Background(), sandbox.id, &sandbox.config.HypervisorConfig, sandbox.storage); err != nil { + if err := q.createSandbox(context.Background(), sandbox.id, &sandbox.config.HypervisorConfig, sandbox.store); err != nil { t.Fatalf("Qemu createSandbox() is not expected to fail because of missing parent directory for storage: %v", err) } } @@ -291,7 +304,7 @@ func TestQemuGetSandboxConsole(t *testing.T) { ctx: context.Background(), } sandboxID := "testSandboxID" - expected := filepath.Join(RunVMStoragePath, sandboxID, consoleSocket) + expected := filepath.Join(store.RunVMStoragePath, sandboxID, consoleSocket) result, err := q.getSandboxConsole(sandboxID) if err != nil { @@ -367,14 +380,19 @@ func TestHotplugUnsupportedDeviceType(t *testing.T) { assert := assert.New(t) qemuConfig := newQemuConfig() - fs := &filesystem{} q := &qemu{ - ctx: context.Background(), - config: qemuConfig, - storage: fs, + ctx: context.Background(), + id: "qemuTest", + config: qemuConfig, + } + + vcStore, err := store.NewVCSandboxStore(q.ctx, q.id) + if err != nil { + t.Fatal(err) } + q.store = vcStore - _, err := q.hotplugAddDevice(&memoryDevice{0, 128}, fsDev) + _, err = q.hotplugAddDevice(&memoryDevice{0, 128}, fsDev) assert.Error(err) _, err = q.hotplugRemoveDevice(&memoryDevice{0, 128}, fsDev) assert.Error(err) diff --git a/virtcontainers/sandbox.go b/virtcontainers/sandbox.go index 1a9e715db7..1e1e9fc6bf 100644 --- a/virtcontainers/sandbox.go +++ b/virtcontainers/sandbox.go @@ -11,7 +11,6 @@ import ( "io" "net" "os" - "path/filepath" "sync" "syscall" @@ -26,6 +25,7 @@ import ( "github.com/kata-containers/runtime/virtcontainers/device/drivers" deviceManager "github.com/kata-containers/runtime/virtcontainers/device/manager" vcTypes "github.com/kata-containers/runtime/virtcontainers/pkg/types" + "github.com/kata-containers/runtime/virtcontainers/store" "github.com/kata-containers/runtime/virtcontainers/types" "github.com/kata-containers/runtime/virtcontainers/utils" "github.com/vishvananda/netlink" @@ -137,64 +137,6 @@ func (sandboxConfig *SandboxConfig) valid() bool { return true } -const ( - // R/W lock - exclusiveLock = syscall.LOCK_EX - - // Read only lock - sharedLock = syscall.LOCK_SH -) - -// rLockSandbox locks the sandbox with a shared lock. -func rLockSandbox(sandboxID string) (*os.File, error) { - return lockSandbox(sandboxID, sharedLock) -} - -// rwLockSandbox locks the sandbox with an exclusive lock. -func rwLockSandbox(sandboxID string) (*os.File, error) { - return lockSandbox(sandboxID, exclusiveLock) -} - -// lock locks any sandbox to prevent it from being accessed by other processes. -func lockSandbox(sandboxID string, lockType int) (*os.File, error) { - if sandboxID == "" { - return nil, errNeedSandboxID - } - - fs := filesystem{} - sandboxlockFile, _, err := fs.sandboxURI(sandboxID, lockFileType) - if err != nil { - return nil, err - } - - lockFile, err := os.Open(sandboxlockFile) - if err != nil { - return nil, err - } - - if err := syscall.Flock(int(lockFile.Fd()), lockType); err != nil { - return nil, err - } - - return lockFile, nil -} - -// unlock unlocks any sandbox to allow it being accessed by other processes. -func unlockSandbox(lockFile *os.File) error { - if lockFile == nil { - return fmt.Errorf("lockFile cannot be empty") - } - - err := syscall.Flock(int(lockFile.Fd()), syscall.LOCK_UN) - if err != nil { - return err - } - - lockFile.Close() - - return nil -} - // Sandbox is composed of a set of containers and a runtime environment. // A Sandbox can be created, deleted, started, paused, stopped, listed, entered, and restored. type Sandbox struct { @@ -204,7 +146,7 @@ type Sandbox struct { factory Factory hypervisor hypervisor agent agent - storage resourceStorage + store *store.VCStore network Network monitor *monitor @@ -269,12 +211,7 @@ func (s *Sandbox) SetAnnotations(annotations map[string]string) error { s.config.Annotations[k] = v } - err := s.storage.storeSandboxResource(s.id, configFileType, *(s.config)) - if err != nil { - return err - } - - return nil + return s.store.Store(store.Configuration, *(s.config)) } // GetAnnotations returns sandbox's annotations @@ -476,7 +413,7 @@ func (s *Sandbox) getAndStoreGuestDetails() error { s.seccompSupported = guestDetailRes.AgentDetails.SupportsSeccomp } - if err = s.storage.storeSandboxResource(s.id, stateFileType, s.state); err != nil { + if err = s.store.Store(store.State, s.state); err != nil { return err } } @@ -503,14 +440,14 @@ func createSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Fac } // Fetch sandbox network to be able to access it from the sandbox structure. - networkNS, err := s.storage.fetchSandboxNetwork(s.id) - if err == nil { + var networkNS NetworkNamespace + if err := s.store.Load(store.Network, &networkNS); err == nil { s.networkNS = networkNS } - devices, err := s.storage.fetchSandboxDevices(s.id) + devices, err := s.store.LoadDevices() if err != nil { - s.Logger().WithError(err).WithField("sandboxid", s.id).Warning("fetch sandbox device failed") + s.Logger().WithError(err).WithField("sandboxid", s.id).Warning("load sandbox devices failed") } s.devManager = deviceManager.NewDeviceManager(sandboxConfig.HypervisorConfig.BlockDeviceDriver, devices) @@ -518,7 +455,7 @@ func createSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Fac // If it exists, this means this is a re-creation, i.e. // we don't need to talk to the guest's agent, but only // want to create the sandbox and its containers in memory. - state, err := s.storage.fetchSandboxState(s.id) + state, err := s.store.LoadState() if err == nil && state.State != "" { s.state = state return s, nil @@ -557,12 +494,11 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor factory: factory, hypervisor: hypervisor, agent: agent, - storage: &filesystem{}, config: &sandboxConfig, volumes: sandboxConfig.Volumes, containers: map[string]*Container{}, - runPath: filepath.Join(runStoragePath, sandboxConfig.ID), - configPath: filepath.Join(configStoragePath, sandboxConfig.ID), + runPath: store.SandboxRuntimeRootPath(sandboxConfig.ID), + configPath: store.SandboxConfigurationRootPath(sandboxConfig.ID), state: types.State{}, annotationsLock: &sync.RWMutex{}, wg: &sync.WaitGroup{}, @@ -572,6 +508,13 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor ctx: ctx, } + vcStore, err := store.NewVCSandboxStore(ctx, s.id) + if err != nil { + return nil, err + } + + s.store = vcStore + if err = globalSandboxList.addSandbox(s); err != nil { return nil, err } @@ -583,17 +526,13 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor } }() - if err = s.storage.createAllResources(ctx, s); err != nil { - return nil, err - } - defer func() { if err != nil { - s.storage.deleteSandboxResources(s.id, nil) + s.store.Delete() } }() - if err = s.hypervisor.createSandbox(ctx, s.id, &sandboxConfig.HypervisorConfig, s.storage); err != nil { + if err = s.hypervisor.createSandbox(ctx, s.id, &sandboxConfig.HypervisorConfig, s.store); err != nil { return nil, err } @@ -611,7 +550,7 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor } func (s *Sandbox) storeSandboxDevices() error { - return s.storage.storeSandboxDevices(s.id, s.devManager.GetAllDevices()) + return s.store.StoreDevices(s.devManager.GetAllDevices()) } // storeSandbox stores a sandbox config. @@ -619,13 +558,13 @@ func (s *Sandbox) storeSandbox() error { span, _ := s.trace("storeSandbox") defer span.Finish() - err := s.storage.storeSandboxResource(s.id, configFileType, *(s.config)) + err := s.store.Store(store.Configuration, *(s.config)) if err != nil { return err } - for id, container := range s.containers { - err = s.storage.storeContainerResource(s.id, id, configFileType, *(container.config)) + for _, container := range s.containers { + err = container.store.Store(store.Configuration, *(container.config)) if err != nil { return err } @@ -634,6 +573,40 @@ func (s *Sandbox) storeSandbox() error { return nil } +func rLockSandbox(sandboxID string) (string, error) { + store, err := store.NewVCSandboxStore(context.Background(), sandboxID) + if err != nil { + return "", err + } + + return store.RLock() +} + +func rwLockSandbox(sandboxID string) (string, error) { + store, err := store.NewVCSandboxStore(context.Background(), sandboxID) + if err != nil { + return "", err + } + + return store.Lock() +} + +func unlockSandbox(sandboxID, token string) error { + // If the store no longer exists, we won't be able to unlock. + // Creating a new store for locking an item that does not even exist + // does not make sense. + if !store.VCSandboxStoreExists(context.Background(), sandboxID) { + return nil + } + + store, err := store.NewVCSandboxStore(context.Background(), sandboxID) + if err != nil { + return err + } + + return store.Unlock(token) +} + // fetchSandbox fetches a sandbox config from a sandbox ID and returns a sandbox. func fetchSandbox(ctx context.Context, sandboxID string) (sandbox *Sandbox, err error) { virtLog.Info("fetch sandbox") @@ -646,12 +619,17 @@ func fetchSandbox(ctx context.Context, sandboxID string) (sandbox *Sandbox, err return sandbox, err } - fs := filesystem{} - config, err := fs.fetchSandboxConfig(sandboxID) + // We're bootstrapping + vcStore, err := store.NewVCSandboxStore(context.Background(), sandboxID) if err != nil { return nil, err } + var config SandboxConfig + if err := vcStore.Load(store.Configuration, &config); err != nil { + return nil, err + } + // fetchSandbox is not suppose to create new sandbox VM. sandbox, err = createSandbox(ctx, config, nil) if err != nil { @@ -742,7 +720,7 @@ func (s *Sandbox) Delete() error { s.agent.cleanup(s.id) - return s.storage.deleteSandboxResources(s.id, nil) + return s.store.Delete() } func (s *Sandbox) startNetworkMonitor() error { @@ -812,7 +790,7 @@ func (s *Sandbox) createNetwork() error { } // Store the network - return s.storage.storeSandboxNetwork(s.id, s.networkNS) + return s.store.Store(store.Network, s.networkNS) } func (s *Sandbox) removeNetwork() error { @@ -880,7 +858,7 @@ func (s *Sandbox) AddInterface(inf *vcTypes.Interface) (*vcTypes.Interface, erro // Update the sandbox storage s.networkNS.Endpoints = append(s.networkNS.Endpoints, endpoint) - if err := s.storage.storeSandboxNetwork(s.id, s.networkNS); err != nil { + if err := s.store.Store(store.Network, s.networkNS); err != nil { return nil, err } @@ -898,7 +876,7 @@ func (s *Sandbox) RemoveInterface(inf *vcTypes.Interface) (*vcTypes.Interface, e return inf, err } s.networkNS.Endpoints = append(s.networkNS.Endpoints[:i], s.networkNS.Endpoints[i+1:]...) - if err := s.storage.storeSandboxNetwork(s.id, s.networkNS); err != nil { + if err := s.store.Store(store.Network, s.networkNS); err != nil { return inf, err } break @@ -969,7 +947,7 @@ func (s *Sandbox) startVM() error { return err } } - if err := s.storage.storeSandboxNetwork(s.id, s.networkNS); err != nil { + if err := s.store.Store(store.Network, s.networkNS); err != nil { return err } } @@ -1066,8 +1044,7 @@ func (s *Sandbox) CreateContainer(contConfig ContainerConfig) (VCContainer, erro return nil, err } - err = s.storage.storeSandboxResource(s.id, configFileType, *(s.config)) - if err != nil { + if err := s.store.Store(store.Configuration, *(s.config)); err != nil { return nil, err } @@ -1151,8 +1128,7 @@ func (s *Sandbox) DeleteContainer(containerID string) (VCContainer, error) { } // Store sandbox config - err = s.storage.storeSandboxResource(s.id, configFileType, *(s.config)) - if err != nil { + if err := s.store.Store(store.Configuration, *(s.config)); err != nil { return nil, err } @@ -1398,7 +1374,7 @@ func (s *Sandbox) setSandboxState(state types.StateString) error { s.state.State = state // update on-disk state - return s.storage.storeSandboxResource(s.id, stateFileType, s.state) + return s.store.Store(store.State, s.state) } func (s *Sandbox) pauseSetStates() error { @@ -1431,8 +1407,7 @@ func (s *Sandbox) getAndSetSandboxBlockIndex() (int, error) { s.state.BlockIndex++ // update on-disk state - err := s.storage.storeSandboxResource(s.id, stateFileType, s.state) - if err != nil { + if err := s.store.Store(store.State, s.state); err != nil { return -1, err } @@ -1445,8 +1420,7 @@ func (s *Sandbox) decrementSandboxBlockIndex() error { s.state.BlockIndex-- // update on-disk state - err := s.storage.storeSandboxResource(s.id, stateFileType, s.state) - if err != nil { + if err := s.store.Store(store.State, s.state); err != nil { return err } @@ -1459,7 +1433,7 @@ func (s *Sandbox) setSandboxPid(pid int) error { s.state.Pid = pid // update on-disk state - return s.storage.storeSandboxResource(s.id, stateFileType, s.state) + return s.store.Store(store.State, s.state) } func (s *Sandbox) setContainersState(state types.StateString) error { @@ -1476,32 +1450,7 @@ func (s *Sandbox) setContainersState(state types.StateString) error { return nil } -func (s *Sandbox) deleteContainerState(containerID string) error { - if containerID == "" { - return errNeedContainerID - } - - err := s.storage.deleteContainerResources(s.id, containerID, []sandboxResource{stateFileType}) - if err != nil { - return err - } - - return nil -} - -func (s *Sandbox) deleteContainersState() error { - for _, container := range s.config.Containers { - err := s.deleteContainerState(container.ID) - if err != nil { - return err - } - } - - return nil -} - -// togglePauseSandbox pauses a sandbox if pause is set to true, else it resumes -// it. +// togglePauseSandbox pauses a sandbox if pause is set to true, else it resumes it. func togglePauseSandbox(ctx context.Context, sandboxID string, pause bool) (*Sandbox, error) { span, ctx := trace(ctx, "togglePauseSandbox") defer span.Finish() @@ -1514,7 +1463,7 @@ func togglePauseSandbox(ctx context.Context, sandboxID string, pause bool) (*San if err != nil { return nil, err } - defer unlockSandbox(lockFile) + defer unlockSandbox(sandboxID, lockFile) // Fetch the sandbox from storage and create it. s, err := fetchSandbox(ctx, sandboxID) diff --git a/virtcontainers/sandbox_test.go b/virtcontainers/sandbox_test.go index 26c2979533..e5b361b035 100644 --- a/virtcontainers/sandbox_test.go +++ b/virtcontainers/sandbox_test.go @@ -24,6 +24,7 @@ import ( "github.com/kata-containers/runtime/virtcontainers/device/drivers" "github.com/kata-containers/runtime/virtcontainers/device/manager" "github.com/kata-containers/runtime/virtcontainers/pkg/annotations" + "github.com/kata-containers/runtime/virtcontainers/store" "github.com/kata-containers/runtime/virtcontainers/types" "golang.org/x/sys/unix" ) @@ -179,92 +180,6 @@ func TestSandboxStatePausedReady(t *testing.T) { } } -func testSandboxDir(t *testing.T, resource sandboxResource, expected string) error { - fs := filesystem{} - _, dir, err := fs.sandboxURI(testSandboxID, resource) - if err != nil { - return err - } - - if dir != expected { - return fmt.Errorf("Unexpected sandbox directory %s vs %s", dir, expected) - } - - return nil -} - -func testSandboxFile(t *testing.T, resource sandboxResource, expected string) error { - fs := filesystem{} - file, _, err := fs.sandboxURI(testSandboxID, resource) - if err != nil { - return err - } - - if file != expected { - return fmt.Errorf("Unexpected sandbox file %s vs %s", file, expected) - } - - return nil -} - -func TestSandboxDirConfig(t *testing.T) { - err := testSandboxDir(t, configFileType, sandboxDirConfig) - if err != nil { - t.Fatal(err) - } -} - -func TestSandboxDirState(t *testing.T) { - err := testSandboxDir(t, stateFileType, sandboxDirState) - if err != nil { - t.Fatal(err) - } -} - -func TestSandboxDirLock(t *testing.T) { - err := testSandboxDir(t, lockFileType, sandboxDirLock) - if err != nil { - t.Fatal(err) - } -} - -func TestSandboxDirNegative(t *testing.T) { - fs := filesystem{} - _, _, err := fs.sandboxURI("", lockFileType) - if err == nil { - t.Fatal("Empty sandbox IDs should not be allowed") - } -} - -func TestSandboxFileConfig(t *testing.T) { - err := testSandboxFile(t, configFileType, sandboxFileConfig) - if err != nil { - t.Fatal(err) - } -} - -func TestSandboxFileState(t *testing.T) { - err := testSandboxFile(t, stateFileType, sandboxFileState) - if err != nil { - t.Fatal(err) - } -} - -func TestSandboxFileLock(t *testing.T) { - err := testSandboxFile(t, lockFileType, sandboxFileLock) - if err != nil { - t.Fatal(err) - } -} - -func TestSandboxFileNegative(t *testing.T) { - fs := filesystem{} - _, _, err := fs.sandboxURI("", lockFileType) - if err == nil { - t.Fatal("Empty sandbox IDs should not be allowed") - } -} - func testStateValid(t *testing.T, stateStr types.StateString, expected bool) { state := &types.State{ State: stateStr, @@ -619,174 +534,6 @@ func TestSandboxSetSandboxAndContainerState(t *testing.T) { } } -func TestSandboxSetSandboxStateFailingStoreSandboxResource(t *testing.T) { - fs := &filesystem{} - sandbox := &Sandbox{ - storage: fs, - } - - err := sandbox.setSandboxState(types.StateReady) - if err == nil { - t.Fatal() - } -} - -func TestSandboxSetContainersStateFailingEmptySandboxID(t *testing.T) { - sandbox := &Sandbox{ - storage: &filesystem{}, - } - - containers := map[string]*Container{ - "100": { - id: "100", - sandbox: sandbox, - }, - } - - sandbox.containers = containers - - err := sandbox.setContainersState(types.StateReady) - if err == nil { - t.Fatal() - } -} - -func TestSandboxDeleteContainerStateSuccessful(t *testing.T) { - contID := "100" - - fs := &filesystem{} - sandbox := &Sandbox{ - id: testSandboxID, - storage: fs, - } - - path := filepath.Join(runStoragePath, testSandboxID, contID) - err := os.MkdirAll(path, dirMode) - if err != nil { - t.Fatal(err) - } - - stateFilePath := filepath.Join(path, stateFile) - - os.Remove(stateFilePath) - - _, err = os.Create(stateFilePath) - if err != nil { - t.Fatal(err) - } - - _, err = os.Stat(stateFilePath) - if err != nil { - t.Fatal(err) - } - - err = sandbox.deleteContainerState(contID) - if err != nil { - t.Fatal(err) - } - - _, err = os.Stat(stateFilePath) - if err == nil { - t.Fatal() - } -} - -func TestSandboxDeleteContainerStateFailingEmptySandboxID(t *testing.T) { - contID := "100" - - fs := &filesystem{} - sandbox := &Sandbox{ - storage: fs, - } - - err := sandbox.deleteContainerState(contID) - if err == nil { - t.Fatal() - } -} - -func TestSandboxDeleteContainersStateSuccessful(t *testing.T) { - var err error - - containers := []ContainerConfig{ - { - ID: "100", - }, - { - ID: "200", - }, - } - - sandboxConfig := &SandboxConfig{ - Containers: containers, - } - - fs := &filesystem{} - sandbox := &Sandbox{ - id: testSandboxID, - config: sandboxConfig, - storage: fs, - } - - for _, c := range containers { - path := filepath.Join(runStoragePath, testSandboxID, c.ID) - err = os.MkdirAll(path, dirMode) - if err != nil { - t.Fatal(err) - } - - stateFilePath := filepath.Join(path, stateFile) - - os.Remove(stateFilePath) - - _, err = os.Create(stateFilePath) - if err != nil { - t.Fatal(err) - } - - _, err = os.Stat(stateFilePath) - if err != nil { - t.Fatal(err) - } - } - - err = sandbox.deleteContainersState() - if err != nil { - t.Fatal(err) - } - - for _, c := range containers { - stateFilePath := filepath.Join(runStoragePath, testSandboxID, c.ID, stateFile) - _, err = os.Stat(stateFilePath) - if err == nil { - t.Fatal() - } - } -} - -func TestSandboxDeleteContainersStateFailingEmptySandboxID(t *testing.T) { - containers := []ContainerConfig{ - { - ID: "100", - }, - } - - sandboxConfig := &SandboxConfig{ - Containers: containers, - } - - fs := &filesystem{} - sandbox := &Sandbox{ - config: sandboxConfig, - storage: fs, - } - - err := sandbox.deleteContainersState() - if err == nil { - t.Fatal() - } -} - func TestGetContainer(t *testing.T) { containerIDs := []string{"abc", "123", "xyz", "rgb"} containers := map[string]*Container{} @@ -837,8 +584,8 @@ func TestGetAllContainers(t *testing.T) { func TestSetAnnotations(t *testing.T) { sandbox := Sandbox{ + ctx: context.Background(), id: "abcxyz123", - storage: &filesystem{}, annotationsLock: &sync.RWMutex{}, config: &SandboxConfig{ Annotations: map[string]string{ @@ -847,6 +594,12 @@ func TestSetAnnotations(t *testing.T) { }, } + vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id) + if err != nil { + t.Fatal(err) + } + sandbox.store = vcStore + keyAnnotation := "annotation2" valueAnnotation := "xyz" newAnnotations := map[string]string{ @@ -947,23 +700,27 @@ func TestContainerSetStateBlockIndex(t *testing.T) { } defer cleanUp() - fs := &filesystem{} - sandbox.storage = fs + sandboxStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id) + if err != nil { + t.Fatal(err) + } + sandbox.store = sandboxStore c := sandbox.GetContainer("100") if c == nil { t.Fatal() } + cImpl, ok := c.(*Container) + assert.True(t, ok) - path := filepath.Join(runStoragePath, testSandboxID, c.ID()) - err = os.MkdirAll(path, dirMode) + containerStore, err := store.NewVCContainerStore(sandbox.ctx, sandbox.id, c.ID()) if err != nil { t.Fatal(err) } + cImpl.store = containerStore - stateFilePath := filepath.Join(path, stateFile) - - os.Remove(stateFilePath) + path := store.ContainerRuntimeRootPath(testSandboxID, c.ID()) + stateFilePath := filepath.Join(path, store.StateFile) f, err := os.Create(stateFilePath) if err != nil { @@ -975,9 +732,6 @@ func TestContainerSetStateBlockIndex(t *testing.T) { Fstype: "vfs", } - cImpl, ok := c.(*Container) - assert.True(t, ok) - cImpl.state = state stateData := `{ @@ -992,11 +746,6 @@ func TestContainerSetStateBlockIndex(t *testing.T) { } f.Close() - _, err = os.Stat(stateFilePath) - if err != nil { - t.Fatal(err) - } - newIndex := 20 if err := cImpl.setStateBlockIndex(newIndex); err != nil { t.Fatal(err) @@ -1046,22 +795,27 @@ func TestContainerStateSetFstype(t *testing.T) { } defer cleanUp() - fs := &filesystem{} - sandbox.storage = fs + vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id) + if err != nil { + t.Fatal(err) + } + sandbox.store = vcStore c := sandbox.GetContainer("100") if c == nil { t.Fatal() } + cImpl, ok := c.(*Container) + assert.True(t, ok) - path := filepath.Join(runStoragePath, testSandboxID, c.ID()) - err = os.MkdirAll(path, dirMode) + containerStore, err := store.NewVCContainerStore(sandbox.ctx, sandbox.id, c.ID()) if err != nil { t.Fatal(err) } + cImpl.store = containerStore - stateFilePath := filepath.Join(path, stateFile) - os.Remove(stateFilePath) + path := store.ContainerRuntimeRootPath(testSandboxID, c.ID()) + stateFilePath := filepath.Join(path, store.StateFile) f, err := os.Create(stateFilePath) if err != nil { @@ -1074,9 +828,6 @@ func TestContainerStateSetFstype(t *testing.T) { BlockIndex: 3, } - cImpl, ok := c.(*Container) - assert.True(t, ok) - cImpl.state = state stateData := `{ @@ -1092,11 +843,6 @@ func TestContainerStateSetFstype(t *testing.T) { } f.Close() - _, err = os.Stat(stateFilePath) - if err != nil { - t.Fatal(err) - } - newFstype := "ext4" if err := cImpl.setStateFstype(newFstype); err != nil { t.Fatal(err) @@ -1141,7 +887,7 @@ func TestSandboxAttachDevicesVFIO(t *testing.T) { testDeviceBDFPath := "0000:00:1c.0" devicesDir := filepath.Join(tmpDir, testFDIOGroup, "devices") - err = os.MkdirAll(devicesDir, dirMode) + err = os.MkdirAll(devicesDir, store.DirMode) assert.Nil(t, err) deviceFile := filepath.Join(devicesDir, testDeviceBDFPath) @@ -1181,15 +927,16 @@ func TestSandboxAttachDevicesVFIO(t *testing.T) { sandbox := Sandbox{ id: "100", containers: containers, - storage: &filesystem{}, hypervisor: &mockHypervisor{}, devManager: dm, ctx: context.Background(), } + store, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id) + assert.Nil(t, err) + sandbox.store = store + containers[c.id].sandbox = &sandbox - err = sandbox.storage.createAllResources(context.Background(), &sandbox) - assert.Nil(t, err, "Error while create all resources for sandbox") err = sandbox.storeSandboxDevices() assert.Nil(t, err, "Error while store sandbox devices %s", err) @@ -1554,7 +1301,6 @@ func TestContainerProcessIOStream(t *testing.T) { } func TestAttachBlockDevice(t *testing.T) { - fs := &filesystem{} hypervisor := &mockHypervisor{} hConfig := HypervisorConfig{ @@ -1567,12 +1313,15 @@ func TestAttachBlockDevice(t *testing.T) { sandbox := &Sandbox{ id: testSandboxID, - storage: fs, hypervisor: hypervisor, config: sconfig, ctx: context.Background(), } + vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id) + assert.Nil(t, err) + sandbox.store = vcStore + contID := "100" container := Container{ sandbox: sandbox, @@ -1580,15 +1329,15 @@ func TestAttachBlockDevice(t *testing.T) { } // create state file - path := filepath.Join(runStoragePath, testSandboxID, container.ID()) - err := os.MkdirAll(path, dirMode) + path := store.ContainerRuntimeRootPath(testSandboxID, container.ID()) + err = os.MkdirAll(path, store.DirMode) if err != nil { t.Fatal(err) } defer os.RemoveAll(path) - stateFilePath := filepath.Join(path, stateFile) + stateFilePath := filepath.Join(path, store.StateFile) os.Remove(stateFilePath) _, err = os.Create(stateFilePath) @@ -1640,7 +1389,6 @@ func TestAttachBlockDevice(t *testing.T) { } func TestPreAddDevice(t *testing.T) { - fs := &filesystem{} hypervisor := &mockHypervisor{} hConfig := HypervisorConfig{ @@ -1655,13 +1403,16 @@ func TestPreAddDevice(t *testing.T) { // create a sandbox first sandbox := &Sandbox{ id: testSandboxID, - storage: fs, hypervisor: hypervisor, config: sconfig, devManager: dm, ctx: context.Background(), } + vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id) + assert.Nil(t, err) + sandbox.store = vcStore + contID := "100" container := Container{ sandbox: sandbox, @@ -1670,16 +1421,20 @@ func TestPreAddDevice(t *testing.T) { } container.state.State = types.StateReady + containerStore, err := store.NewVCContainerStore(sandbox.ctx, sandbox.id, container.id) + assert.Nil(t, err) + container.store = containerStore + // create state file - path := filepath.Join(runStoragePath, testSandboxID, container.ID()) - err := os.MkdirAll(path, dirMode) + path := store.ContainerRuntimeRootPath(testSandboxID, container.ID()) + err = os.MkdirAll(path, store.DirMode) if err != nil { t.Fatal(err) } defer os.RemoveAll(path) - stateFilePath := filepath.Join(path, stateFile) + stateFilePath := filepath.Join(path, store.StateFile) os.Remove(stateFilePath) _, err = os.Create(stateFilePath) diff --git a/virtcontainers/virtcontainers_test.go b/virtcontainers/virtcontainers_test.go index adf143b0bf..77f4982990 100644 --- a/virtcontainers/virtcontainers_test.go +++ b/virtcontainers/virtcontainers_test.go @@ -14,6 +14,7 @@ import ( "path/filepath" "testing" + "github.com/kata-containers/runtime/virtcontainers/store" "github.com/sirupsen/logrus" ) @@ -46,12 +47,13 @@ var testHyperstartTtySocket = "" // the next test to run. func cleanUp() { globalSandboxList.removeSandbox(testSandboxID) + store.DeleteAll() for _, dir := range []string{testDir, defaultSharedDir} { os.RemoveAll(dir) - os.MkdirAll(dir, dirMode) + os.MkdirAll(dir, store.DirMode) } - os.Mkdir(filepath.Join(testDir, testBundle), dirMode) + os.Mkdir(filepath.Join(testDir, testBundle), store.DirMode) _, err := os.Create(filepath.Join(testDir, testImage)) if err != nil { @@ -82,7 +84,7 @@ func TestMain(m *testing.M) { } fmt.Printf("INFO: Creating virtcontainers test directory %s\n", testDir) - err = os.MkdirAll(testDir, dirMode) + err = os.MkdirAll(testDir, store.DirMode) if err != nil { fmt.Println("Could not create test directories:", err) os.Exit(1) @@ -117,7 +119,7 @@ func TestMain(m *testing.M) { os.Exit(1) } - err = os.Mkdir(filepath.Join(testDir, testBundle), dirMode) + err = os.Mkdir(filepath.Join(testDir, testBundle), store.DirMode) if err != nil { fmt.Println("Could not create test bundle directory:", err) os.RemoveAll(testDir) @@ -125,16 +127,16 @@ func TestMain(m *testing.M) { } // allow the tests to run without affecting the host system. - configStoragePath = filepath.Join(testDir, storagePathSuffix, "config") - runStoragePath = filepath.Join(testDir, storagePathSuffix, "run") + store.ConfigStoragePath = filepath.Join(testDir, store.StoragePathSuffix, "config") + store.RunStoragePath = filepath.Join(testDir, store.StoragePathSuffix, "run") // set now that configStoragePath has been overridden. - sandboxDirConfig = filepath.Join(configStoragePath, testSandboxID) - sandboxFileConfig = filepath.Join(configStoragePath, testSandboxID, configFile) - sandboxDirState = filepath.Join(runStoragePath, testSandboxID) - sandboxDirLock = filepath.Join(runStoragePath, testSandboxID) - sandboxFileState = filepath.Join(runStoragePath, testSandboxID, stateFile) - sandboxFileLock = filepath.Join(runStoragePath, testSandboxID, lockFileName) + sandboxDirConfig = filepath.Join(store.ConfigStoragePath, testSandboxID) + sandboxFileConfig = filepath.Join(store.ConfigStoragePath, testSandboxID, store.ConfigurationFile) + sandboxDirState = filepath.Join(store.RunStoragePath, testSandboxID) + sandboxDirLock = filepath.Join(store.RunStoragePath, testSandboxID) + sandboxFileState = filepath.Join(store.RunStoragePath, testSandboxID, store.StateFile) + sandboxFileLock = filepath.Join(store.RunStoragePath, testSandboxID, store.LockFile) testHyperstartCtlSocket = filepath.Join(testDir, "test_hyper.sock") testHyperstartTtySocket = filepath.Join(testDir, "test_tty.sock") diff --git a/virtcontainers/vm.go b/virtcontainers/vm.go index 43bffffe6c..b7af5138fb 100644 --- a/virtcontainers/vm.go +++ b/virtcontainers/vm.go @@ -12,6 +12,7 @@ import ( "time" "github.com/kata-containers/runtime/virtcontainers/pkg/uuid" + "github.com/kata-containers/runtime/virtcontainers/store" "github.com/sirupsen/logrus" ) @@ -118,7 +119,14 @@ func NewVM(ctx context.Context, config VMConfig) (*VM, error) { } }() - if err = hypervisor.createSandbox(ctx, id, &config.HypervisorConfig, &filesystem{}); err != nil { + vcStore, err := store.NewVCStore(ctx, + store.SandboxConfigurationRoot(id), + store.SandboxRuntimeRoot(id)) + if err != nil { + return nil, err + } + + if err = hypervisor.createSandbox(ctx, id, &config.HypervisorConfig, vcStore); err != nil { return nil, err } @@ -180,7 +188,7 @@ func NewVM(ctx context.Context, config VMConfig) (*VM, error) { } func buildVMSharePath(id string) string { - return filepath.Join(RunVMStoragePath, id, "shared") + return filepath.Join(store.RunVMStoragePath, id, "shared") } func (v *VM) logger() logrus.FieldLogger { From f8e7e308c38007b55dcfd09e53ccf321c6a7a3a0 Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Wed, 9 Jan 2019 11:08:39 +0100 Subject: [PATCH 11/13] virtcontainers: Remove the resource storage original implementation Now that we converted the virtcontainers code to the store package, we can remove all the resource storage old code. Fixes: #1099 Signed-off-by: Samuel Ortiz --- virtcontainers/errors.go | 2 - virtcontainers/filesystem_resource_storage.go | 873 ------------------ .../filesystem_resource_storage_test.go | 566 ------------ virtcontainers/noop_resource_storage.go | 113 --- virtcontainers/noop_resource_storage_test.go | 188 ---- virtcontainers/resource_storage.go | 69 -- 6 files changed, 1811 deletions(-) delete mode 100644 virtcontainers/filesystem_resource_storage.go delete mode 100644 virtcontainers/filesystem_resource_storage_test.go delete mode 100644 virtcontainers/noop_resource_storage.go delete mode 100644 virtcontainers/noop_resource_storage_test.go delete mode 100644 virtcontainers/resource_storage.go diff --git a/virtcontainers/errors.go b/virtcontainers/errors.go index 4cca6548d4..774ce2e31e 100644 --- a/virtcontainers/errors.go +++ b/virtcontainers/errors.go @@ -14,8 +14,6 @@ var ( errNeedSandbox = errors.New("Sandbox must be specified") errNeedSandboxID = errors.New("Sandbox ID cannot be empty") errNeedContainerID = errors.New("Container ID cannot be empty") - errNeedFile = errors.New("File cannot be empty") errNeedState = errors.New("State cannot be empty") - errInvalidResource = errors.New("Invalid sandbox resource") errNoSuchContainer = errors.New("Container does not exist") ) diff --git a/virtcontainers/filesystem_resource_storage.go b/virtcontainers/filesystem_resource_storage.go deleted file mode 100644 index 1cfb3ee568..0000000000 --- a/virtcontainers/filesystem_resource_storage.go +++ /dev/null @@ -1,873 +0,0 @@ -// Copyright (c) 2016 Intel Corporation -// -// SPDX-License-Identifier: Apache-2.0 -// - -package virtcontainers - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - - opentracing "github.com/opentracing/opentracing-go" - "github.com/sirupsen/logrus" - - "github.com/kata-containers/runtime/virtcontainers/device/api" - "github.com/kata-containers/runtime/virtcontainers/device/config" - "github.com/kata-containers/runtime/virtcontainers/device/drivers" - "github.com/kata-containers/runtime/virtcontainers/types" -) - -// sandboxResource is an int representing a sandbox resource type. -// -// Note that some are specific to the sandbox itself and others can apply to -// sandboxes and containers. -type sandboxResource int - -const ( - // configFileType represents a configuration file type - configFileType sandboxResource = iota - - // stateFileType represents a state file type - stateFileType - - // networkFileType represents a network file type (sandbox only) - networkFileType - - // hypervisorFileType represents a hypervisor file type (sandbox only) - hypervisorFileType - - // agentFileType represents an agent file type (sandbox only) - agentFileType - - // processFileType represents a process file type - processFileType - - // lockFileType represents a lock file type (sandbox only) - lockFileType - - // mountsFileType represents a mount file type - mountsFileType - - // devicesFileType represents a device file type - devicesFileType - - // devicesIDFileType saves reference IDs to file, e.g. device IDs - devicesIDFileType -) - -// configFile is the file name used for every JSON sandbox configuration. -const configFile = "config.json" - -// stateFile is the file name storing a sandbox state. -const stateFile = "state.json" - -// networkFile is the file name storing a sandbox network. -const networkFile = "network.json" - -// hypervisorFile is the file name storing a hypervisor's state. -const hypervisorFile = "hypervisor.json" - -// agentFile is the file name storing an agent's state. -const agentFile = "agent.json" - -// processFile is the file name storing a container process. -const processFile = "process.json" - -// lockFile is the file name locking the usage of a sandbox. -const lockFileName = "lock" - -const mountsFile = "mounts.json" - -// devicesFile is the file name storing a container's devices. -const devicesFile = "devices.json" - -// dirMode is the permission bits used for creating a directory -const dirMode = os.FileMode(0750) | os.ModeDir - -// storagePathSuffix is the suffix used for all storage paths -// -// Note: this very brief path represents "virtcontainers". It is as -// terse as possible to minimise path length. -const storagePathSuffix = "vc" - -// sandboxPathSuffix is the suffix used for sandbox storage -const sandboxPathSuffix = "sbs" - -// vmPathSuffix is the suffix used for guest VMs. -const vmPathSuffix = "vm" - -// configStoragePath is the sandbox configuration directory. -// It will contain one config.json file for each created sandbox. -var configStoragePath = filepath.Join("/var/lib", storagePathSuffix, sandboxPathSuffix) - -// runStoragePath is the sandbox runtime directory. -// It will contain one state.json and one lock file for each created sandbox. -var runStoragePath = filepath.Join("/run", storagePathSuffix, sandboxPathSuffix) - -// RunVMStoragePath is the vm directory. -// It will contain all guest vm sockets and shared mountpoints. -var RunVMStoragePath = filepath.Join("/run", storagePathSuffix, vmPathSuffix) - -// filesystem is a resourceStorage interface implementation for a local filesystem. -type filesystem struct { - ctx context.Context -} - -// Logger returns a logrus logger appropriate for logging filesystem messages -func (fs *filesystem) Logger() *logrus.Entry { - return virtLog.WithFields(logrus.Fields{ - "subsystem": "storage", - "type": "filesystem", - }) -} - -func (fs *filesystem) trace(name string) (opentracing.Span, context.Context) { - if fs.ctx == nil { - fs.Logger().WithField("type", "bug").Error("trace called before context set") - fs.ctx = context.Background() - } - - span, ctx := opentracing.StartSpanFromContext(fs.ctx, name) - - span.SetTag("subsystem", "storage") - span.SetTag("type", "filesystem") - - return span, ctx -} - -func (fs *filesystem) createAllResources(ctx context.Context, sandbox *Sandbox) (err error) { - fs.ctx = ctx - - span, _ := fs.trace("createAllResources") - defer span.Finish() - - for _, resource := range []sandboxResource{stateFileType, configFileType} { - _, path, _ := fs.sandboxURI(sandbox.id, resource) - err = os.MkdirAll(path, dirMode) - if err != nil { - return err - } - } - - for _, container := range sandbox.containers { - for _, resource := range []sandboxResource{stateFileType, configFileType} { - _, path, _ := fs.containerURI(sandbox.id, container.id, resource) - err = os.MkdirAll(path, dirMode) - if err != nil { - fs.deleteSandboxResources(sandbox.id, nil) - return err - } - } - } - - sandboxlockFile, _, err := fs.sandboxURI(sandbox.id, lockFileType) - if err != nil { - fs.deleteSandboxResources(sandbox.id, nil) - return err - } - - _, err = os.Stat(sandboxlockFile) - if err != nil { - lockFile, err := os.Create(sandboxlockFile) - if err != nil { - fs.deleteSandboxResources(sandbox.id, nil) - return err - } - lockFile.Close() - } - - return nil -} - -func (fs *filesystem) storeFile(file string, data interface{}) error { - if file == "" { - return errNeedFile - } - - f, err := os.Create(file) - if err != nil { - return err - } - defer f.Close() - - jsonOut, err := json.Marshal(data) - if err != nil { - return fmt.Errorf("Could not marshall data: %s", err) - } - f.Write(jsonOut) - - return nil -} - -// createSandboxTempFile is used to create a temporary file under sandbox runtime storage path. -func (fs *filesystem) createSandboxTempFile(sandboxID string) (string, error) { - if sandboxID == "" { - return "", errNeedSandboxID - } - - dirPath := filepath.Join(runStoragePath, sandboxID, "tempDir") - err := os.MkdirAll(dirPath, dirMode) - if err != nil { - return "", err - } - - f, err := ioutil.TempFile(dirPath, "tmp-") - if err != nil { - return "", err - } - - return f.Name(), nil -} - -// storeDeviceIDFile is used to marshal and store device IDs to disk. -func (fs *filesystem) storeDeviceIDFile(file string, data interface{}) error { - if file == "" { - return errNeedFile - } - - f, err := os.Create(file) - if err != nil { - return err - } - defer f.Close() - - devices, ok := data.([]ContainerDevice) - if !ok { - return fmt.Errorf("Incorrect data type received, Expected []string") - } - - jsonOut, err := json.Marshal(devices) - if err != nil { - return fmt.Errorf("Could not marshal devices: %s", err) - } - - if _, err := f.Write(jsonOut); err != nil { - return err - } - - return nil -} - -// storeDeviceFile is used to provide custom marshalling for Device objects. -// Device is first marshalled into TypedDevice to include the type -// of the Device object. -func (fs *filesystem) storeDeviceFile(file string, data interface{}) error { - if file == "" { - return errNeedFile - } - - f, err := os.Create(file) - if err != nil { - return err - } - defer f.Close() - - devices, ok := data.([]api.Device) - if !ok { - return fmt.Errorf("Incorrect data type received, Expected []Device") - } - - var typedDevices []TypedDevice - for _, d := range devices { - tempJSON, _ := json.Marshal(d) - typedDevice := TypedDevice{ - Type: string(d.DeviceType()), - Data: tempJSON, - } - typedDevices = append(typedDevices, typedDevice) - } - - jsonOut, err := json.Marshal(typedDevices) - if err != nil { - return fmt.Errorf("Could not marshal devices: %s", err) - } - - if _, err := f.Write(jsonOut); err != nil { - return err - } - - return nil -} - -func (fs *filesystem) fetchFile(file string, resource sandboxResource, data interface{}) error { - if file == "" { - return errNeedFile - } - - fileData, err := ioutil.ReadFile(file) - if err != nil { - return err - } - - switch resource { - case devicesFileType: - devices, ok := data.(*[]api.Device) - if !ok { - return fmt.Errorf("Could not cast %v into *[]Device type", data) - } - - return fs.fetchDeviceFile(fileData, devices) - } - - return json.Unmarshal(fileData, data) -} - -// fetchDeviceFile is used for custom unmarshalling of device interface objects. -func (fs *filesystem) fetchDeviceFile(fileData []byte, devices *[]api.Device) error { - var typedDevices []TypedDevice - if err := json.Unmarshal(fileData, &typedDevices); err != nil { - return err - } - - var tempDevices []api.Device - for _, d := range typedDevices { - l := fs.Logger().WithField("device-type", d.Type) - l.Info("Device type found") - - switch d.Type { - case string(config.DeviceVFIO): - // TODO: remove dependency of drivers package - var device drivers.VFIODevice - if err := json.Unmarshal(d.Data, &device); err != nil { - return err - } - tempDevices = append(tempDevices, &device) - l.Infof("VFIO device unmarshalled [%v]", device) - - case string(config.DeviceBlock): - // TODO: remove dependency of drivers package - var device drivers.BlockDevice - if err := json.Unmarshal(d.Data, &device); err != nil { - return err - } - tempDevices = append(tempDevices, &device) - l.Infof("Block Device unmarshalled [%v]", device) - case string(config.DeviceGeneric): - // TODO: remove dependency of drivers package - var device drivers.GenericDevice - if err := json.Unmarshal(d.Data, &device); err != nil { - return err - } - tempDevices = append(tempDevices, &device) - l.Infof("Generic device unmarshalled [%v]", device) - - default: - return fmt.Errorf("Unknown device type, could not unmarshal") - } - } - - *devices = tempDevices - return nil -} - -// resourceNeedsContainerID determines if the specified -// sandboxResource needs a containerID. Since some sandboxResources can -// be used for both sandboxes and containers, it is necessary to specify -// whether the resource is being used in a sandbox-specific context using -// the sandboxSpecific parameter. -func resourceNeedsContainerID(sandboxSpecific bool, resource sandboxResource) bool { - - switch resource { - case lockFileType, networkFileType, hypervisorFileType, agentFileType, devicesFileType: - // sandbox-specific resources - return false - default: - return !sandboxSpecific - } -} - -func resourceName(resource sandboxResource) string { - switch resource { - case agentFileType: - return "agentFileType" - case configFileType: - return "configFileType" - case devicesFileType: - return "devicesFileType" - case devicesIDFileType: - return "devicesIDFileType" - case hypervisorFileType: - return "hypervisorFileType" - case lockFileType: - return "lockFileType" - case mountsFileType: - return "mountsFileType" - case networkFileType: - return "networkFileType" - case processFileType: - return "processFileType" - case stateFileType: - return "stateFileType" - default: - return "" - } -} - -func resourceDir(sandboxSpecific bool, sandboxID, containerID string, resource sandboxResource) (string, error) { - if sandboxID == "" { - return "", errNeedSandboxID - } - - if resourceNeedsContainerID(sandboxSpecific, resource) == true && containerID == "" { - return "", errNeedContainerID - } - - var path string - - switch resource { - case configFileType: - path = configStoragePath - break - case stateFileType, networkFileType, processFileType, lockFileType, mountsFileType, devicesFileType, devicesIDFileType, hypervisorFileType, agentFileType: - path = runStoragePath - break - default: - return "", errInvalidResource - } - - dirPath := filepath.Join(path, sandboxID, containerID) - - return dirPath, nil -} - -// If sandboxSpecific is true, the resource is being applied for an empty -// sandbox (meaning containerID may be blank). -// Note that this function defers determining if containerID can be -// blank to resourceDIR() -func (fs *filesystem) resourceURI(sandboxSpecific bool, sandboxID, containerID string, resource sandboxResource) (string, string, error) { - if sandboxID == "" { - return "", "", errNeedSandboxID - } - - var filename string - - dirPath, err := resourceDir(sandboxSpecific, sandboxID, containerID, resource) - if err != nil { - return "", "", err - } - - switch resource { - case configFileType: - filename = configFile - break - case stateFileType: - filename = stateFile - case networkFileType: - filename = networkFile - case hypervisorFileType: - filename = hypervisorFile - case agentFileType: - filename = agentFile - case processFileType: - filename = processFile - case lockFileType: - filename = lockFileName - break - case mountsFileType: - filename = mountsFile - break - case devicesFileType: - filename = devicesFile - break - case devicesIDFileType: - filename = devicesFile - break - default: - return "", "", errInvalidResource - } - - filePath := filepath.Join(dirPath, filename) - - return filePath, dirPath, nil -} - -func (fs *filesystem) containerURI(sandboxID, containerID string, resource sandboxResource) (string, string, error) { - if sandboxID == "" { - return "", "", errNeedSandboxID - } - - if containerID == "" { - return "", "", errNeedContainerID - } - - return fs.resourceURI(false, sandboxID, containerID, resource) -} - -func (fs *filesystem) sandboxURI(sandboxID string, resource sandboxResource) (string, string, error) { - return fs.resourceURI(true, sandboxID, "", resource) -} - -// commonResourceChecks performs basic checks common to both setting and -// getting a sandboxResource. -func (fs *filesystem) commonResourceChecks(sandboxSpecific bool, sandboxID, containerID string, resource sandboxResource) error { - if sandboxID == "" { - return errNeedSandboxID - } - - if resourceNeedsContainerID(sandboxSpecific, resource) == true && containerID == "" { - return errNeedContainerID - } - - switch resource { - case configFileType: - case stateFileType: - case networkFileType: - case hypervisorFileType: - case agentFileType: - case processFileType: - case mountsFileType: - case devicesFileType: - case devicesIDFileType: - default: - return errInvalidResource - } - - return nil -} - -func (fs *filesystem) storeSandboxAndContainerConfigResource(sandboxSpecific bool, sandboxID, containerID string, resource sandboxResource, file interface{}) error { - if resource != configFileType { - return errInvalidResource - } - - configFile, _, err := fs.resourceURI(sandboxSpecific, sandboxID, containerID, configFileType) - if err != nil { - return err - } - - return fs.storeFile(configFile, file) -} - -func (fs *filesystem) storeStateResource(sandboxSpecific bool, sandboxID, containerID string, resource sandboxResource, file interface{}) error { - if resource != stateFileType { - return errInvalidResource - } - - stateFile, _, err := fs.resourceURI(sandboxSpecific, sandboxID, containerID, stateFileType) - if err != nil { - return err - } - - return fs.storeFile(stateFile, file) -} - -func (fs *filesystem) storeNetworkResource(sandboxSpecific bool, sandboxID, containerID string, resource sandboxResource, file interface{}) error { - if resource != networkFileType { - return errInvalidResource - } - - // sandbox only resource - networkFile, _, err := fs.resourceURI(true, sandboxID, containerID, networkFileType) - if err != nil { - return err - } - - return fs.storeFile(networkFile, file) -} - -func (fs *filesystem) storeProcessResource(sandboxSpecific bool, sandboxID, containerID string, resource sandboxResource, file interface{}) error { - if resource != processFileType { - return errInvalidResource - } - - processFile, _, err := fs.resourceURI(sandboxSpecific, sandboxID, containerID, processFileType) - if err != nil { - return err - } - - return fs.storeFile(processFile, file) -} - -func (fs *filesystem) storeMountResource(sandboxSpecific bool, sandboxID, containerID string, resource sandboxResource, file interface{}) error { - if resource != mountsFileType { - return errInvalidResource - } - - mountsFile, _, err := fs.resourceURI(sandboxSpecific, sandboxID, containerID, mountsFileType) - if err != nil { - return err - } - - return fs.storeFile(mountsFile, file) -} - -func (fs *filesystem) storeDeviceResource(sandboxSpecific bool, sandboxID, containerID string, resource sandboxResource, file interface{}) error { - if resource != devicesFileType { - return errInvalidResource - } - - devicesFile, _, err := fs.resourceURI(sandboxSpecific, sandboxID, containerID, devicesFileType) - if err != nil { - return err - } - - return fs.storeDeviceFile(devicesFile, file) -} - -func (fs *filesystem) storeDevicesIDResource(sandboxSpecific bool, sandboxID, containerID string, resource sandboxResource, file interface{}) error { - if resource != devicesIDFileType { - return errInvalidResource - } - - devicesFile, _, err := fs.resourceURI(sandboxSpecific, sandboxID, containerID, resource) - if err != nil { - return err - } - - return fs.storeDeviceIDFile(devicesFile, file) -} - -func (fs *filesystem) storeResource(sandboxSpecific bool, sandboxID, containerID string, resource sandboxResource, data interface{}) error { - if err := fs.commonResourceChecks(sandboxSpecific, sandboxID, containerID, resource); err != nil { - return err - } - - switch file := data.(type) { - case SandboxConfig, ContainerConfig: - return fs.storeSandboxAndContainerConfigResource(sandboxSpecific, sandboxID, containerID, resource, file) - - case types.State: - return fs.storeStateResource(sandboxSpecific, sandboxID, containerID, resource, file) - - case NetworkNamespace: - return fs.storeNetworkResource(sandboxSpecific, sandboxID, containerID, resource, file) - - case Process: - return fs.storeProcessResource(sandboxSpecific, sandboxID, containerID, resource, file) - - case []Mount: - return fs.storeMountResource(sandboxSpecific, sandboxID, containerID, resource, file) - - case []api.Device: - return fs.storeDeviceResource(sandboxSpecific, sandboxID, containerID, resource, file) - case []ContainerDevice: - return fs.storeDevicesIDResource(sandboxSpecific, sandboxID, containerID, resource, file) - - default: - return fmt.Errorf("Invalid resource data type") - } -} - -func (fs *filesystem) fetchResource(sandboxSpecific bool, sandboxID, containerID string, resource sandboxResource, data interface{}) error { - var span opentracing.Span - - if fs.ctx != nil { - span, _ = fs.trace("fetchResource") - defer span.Finish() - - span.SetTag("sandbox", sandboxID) - span.SetTag("container", containerID) - span.SetTag("resource", resourceName(resource)) - } - - if err := fs.commonResourceChecks(sandboxSpecific, sandboxID, containerID, resource); err != nil { - return err - } - - path, _, err := fs.resourceURI(sandboxSpecific, sandboxID, containerID, resource) - if err != nil { - return err - } - - return fs.fetchFile(path, resource, data) -} - -func (fs *filesystem) storeSandboxResource(sandboxID string, resource sandboxResource, data interface{}) error { - return fs.storeResource(true, sandboxID, "", resource, data) -} - -func (fs *filesystem) fetchSandboxConfig(sandboxID string) (SandboxConfig, error) { - var sandboxConfig SandboxConfig - - if err := fs.fetchResource(true, sandboxID, "", configFileType, &sandboxConfig); err != nil { - return SandboxConfig{}, err - } - - return sandboxConfig, nil -} - -func (fs *filesystem) fetchSandboxState(sandboxID string) (types.State, error) { - var state types.State - - if err := fs.fetchResource(true, sandboxID, "", stateFileType, &state); err != nil { - return types.State{}, err - } - - return state, nil -} - -func (fs *filesystem) fetchSandboxNetwork(sandboxID string) (NetworkNamespace, error) { - var networkNS NetworkNamespace - - if err := fs.fetchResource(true, sandboxID, "", networkFileType, &networkNS); err != nil { - return NetworkNamespace{}, err - } - - return networkNS, nil -} - -func (fs *filesystem) fetchSandboxDevices(sandboxID string) ([]api.Device, error) { - var devices []api.Device - if err := fs.fetchResource(true, sandboxID, "", devicesFileType, &devices); err != nil { - return []api.Device{}, err - } - return devices, nil -} - -func (fs *filesystem) storeSandboxDevices(sandboxID string, devices []api.Device) error { - return fs.storeSandboxResource(sandboxID, devicesFileType, devices) -} - -func (fs *filesystem) fetchHypervisorState(sandboxID string, state interface{}) error { - return fs.fetchResource(true, sandboxID, "", hypervisorFileType, state) -} - -func (fs *filesystem) fetchAgentState(sandboxID string, state interface{}) error { - return fs.fetchResource(true, sandboxID, "", agentFileType, state) -} - -func (fs *filesystem) storeSandboxNetwork(sandboxID string, networkNS NetworkNamespace) error { - return fs.storeSandboxResource(sandboxID, networkFileType, networkNS) -} - -func (fs *filesystem) storeHypervisorState(sandboxID string, state interface{}) error { - hypervisorFile, _, err := fs.resourceURI(true, sandboxID, "", hypervisorFileType) - if err != nil { - return err - } - - return fs.storeFile(hypervisorFile, state) -} - -func (fs *filesystem) storeAgentState(sandboxID string, state interface{}) error { - span, _ := fs.trace("storeAgentState") - defer span.Finish() - - agentFile, _, err := fs.resourceURI(true, sandboxID, "", agentFileType) - if err != nil { - return err - } - - return fs.storeFile(agentFile, state) -} - -func (fs *filesystem) deleteSandboxResources(sandboxID string, resources []sandboxResource) error { - if resources == nil { - resources = []sandboxResource{configFileType, stateFileType} - } - - for _, resource := range resources { - _, dir, err := fs.sandboxURI(sandboxID, resource) - if err != nil { - return err - } - - err = os.RemoveAll(dir) - if err != nil { - return err - } - } - - return nil -} - -func (fs *filesystem) storeContainerResource(sandboxID, containerID string, resource sandboxResource, data interface{}) error { - if sandboxID == "" { - return errNeedSandboxID - } - - if containerID == "" { - return errNeedContainerID - } - - return fs.storeResource(false, sandboxID, containerID, resource, data) -} - -func (fs *filesystem) fetchContainerConfig(sandboxID, containerID string) (ContainerConfig, error) { - var config ContainerConfig - - if err := fs.fetchResource(false, sandboxID, containerID, configFileType, &config); err != nil { - return ContainerConfig{}, err - } - - return config, nil -} - -func (fs *filesystem) fetchContainerState(sandboxID, containerID string) (types.State, error) { - var state types.State - - if err := fs.fetchResource(false, sandboxID, containerID, stateFileType, &state); err != nil { - return types.State{}, err - } - - return state, nil -} - -func (fs *filesystem) fetchContainerProcess(sandboxID, containerID string) (Process, error) { - var process Process - - if err := fs.fetchResource(false, sandboxID, containerID, processFileType, &process); err != nil { - return Process{}, err - } - - return process, nil -} - -func (fs *filesystem) storeContainerProcess(sandboxID, containerID string, process Process) error { - return fs.storeContainerResource(sandboxID, containerID, processFileType, process) -} - -func (fs *filesystem) fetchContainerMounts(sandboxID, containerID string) ([]Mount, error) { - var mounts []Mount - - if err := fs.fetchResource(false, sandboxID, containerID, mountsFileType, &mounts); err != nil { - return []Mount{}, err - } - - return mounts, nil -} - -func (fs *filesystem) fetchContainerDevices(sandboxID, containerID string) ([]ContainerDevice, error) { - var devices []ContainerDevice - - if err := fs.fetchResource(false, sandboxID, containerID, devicesIDFileType, &devices); err != nil { - return nil, err - } - - return devices, nil -} - -func (fs *filesystem) storeContainerMounts(sandboxID, containerID string, mounts []Mount) error { - return fs.storeContainerResource(sandboxID, containerID, mountsFileType, mounts) -} - -func (fs *filesystem) storeContainerDevices(sandboxID, containerID string, devices []ContainerDevice) error { - return fs.storeContainerResource(sandboxID, containerID, devicesIDFileType, devices) -} - -func (fs *filesystem) deleteContainerResources(sandboxID, containerID string, resources []sandboxResource) error { - if resources == nil { - resources = []sandboxResource{configFileType, stateFileType} - } - - for _, resource := range resources { - _, dir, err := fs.sandboxURI(sandboxID, resource) - if err != nil { - return err - } - - containerDir := filepath.Join(dir, containerID, "/") - - err = os.RemoveAll(containerDir) - if err != nil { - return err - } - } - - return nil -} diff --git a/virtcontainers/filesystem_resource_storage_test.go b/virtcontainers/filesystem_resource_storage_test.go deleted file mode 100644 index cbecc62971..0000000000 --- a/virtcontainers/filesystem_resource_storage_test.go +++ /dev/null @@ -1,566 +0,0 @@ -// Copyright (c) 2016 Intel Corporation -// -// SPDX-License-Identifier: Apache-2.0 -// - -package virtcontainers - -import ( - "context" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "reflect" - "testing" - - "github.com/kata-containers/runtime/virtcontainers/device/manager" - "github.com/kata-containers/runtime/virtcontainers/types" -) - -func TestFilesystemCreateAllResourcesSuccessful(t *testing.T) { - fs := &filesystem{} - - contConfigs := []ContainerConfig{ - {ID: "1"}, - {ID: "10"}, - {ID: "100"}, - } - - sandboxConfig := &SandboxConfig{ - Containers: contConfigs, - } - - sandbox := &Sandbox{ - ctx: context.Background(), - id: testSandboxID, - config: sandboxConfig, - devManager: manager.NewDeviceManager(manager.VirtioBlock, nil), - containers: map[string]*Container{}, - } - - if err := sandbox.fetchContainers(); err != nil { - t.Fatal(err) - } - - sandboxConfigPath := filepath.Join(configStoragePath, testSandboxID) - sandboxRunPath := filepath.Join(runStoragePath, testSandboxID) - - os.RemoveAll(sandboxConfigPath) - os.RemoveAll(sandboxRunPath) - - for _, container := range contConfigs { - configPath := filepath.Join(configStoragePath, testSandboxID, container.ID) - os.RemoveAll(configPath) - - runPath := filepath.Join(runStoragePath, testSandboxID, container.ID) - os.RemoveAll(runPath) - } - - err := fs.createAllResources(context.Background(), sandbox) - if err != nil { - t.Fatal(err) - } - - // Check resources - _, err = os.Stat(sandboxConfigPath) - if err != nil { - t.Fatal(err) - } - - _, err = os.Stat(sandboxRunPath) - if err != nil { - t.Fatal(err) - } - - for _, container := range contConfigs { - configPath := filepath.Join(configStoragePath, testSandboxID, container.ID) - s, err := os.Stat(configPath) - if err != nil { - t.Fatal(err) - } - - // Check we created the dirs with the correct mode - if s.Mode() != dirMode { - t.Fatal(fmt.Errorf("dirmode [%v] != expected [%v]", s.Mode(), dirMode)) - } - - runPath := filepath.Join(runStoragePath, testSandboxID, container.ID) - s, err = os.Stat(runPath) - if err != nil { - t.Fatal(err) - } - - // Check we created the dirs with the correct mode - if s.Mode() != dirMode { - t.Fatal(fmt.Errorf("dirmode [%v] != expected [%v]", s.Mode(), dirMode)) - } - - } -} - -func TestFilesystemCreateAllResourcesFailingSandboxIDEmpty(t *testing.T) { - fs := &filesystem{} - - sandbox := &Sandbox{} - - err := fs.createAllResources(context.Background(), sandbox) - if err == nil { - t.Fatal() - } -} - -func TestFilesystemCreateAllResourcesFailingContainerIDEmpty(t *testing.T) { - fs := &filesystem{} - - containers := map[string]*Container{ - testContainerID: {}, - } - - sandbox := &Sandbox{ - id: testSandboxID, - containers: containers, - } - - err := fs.createAllResources(context.Background(), sandbox) - if err == nil { - t.Fatal() - } -} - -type TestNoopStructure struct { - Field1 string - Field2 string -} - -func TestFilesystemStoreFileSuccessfulNotExisting(t *testing.T) { - fs := &filesystem{} - - path := filepath.Join(testDir, "testFilesystem") - os.Remove(path) - - data := TestNoopStructure{ - Field1: "value1", - Field2: "value2", - } - - expected := "{\"Field1\":\"value1\",\"Field2\":\"value2\"}" - - err := fs.storeFile(path, data) - if err != nil { - t.Fatal(err) - } - - fileData, err := ioutil.ReadFile(path) - if err != nil { - t.Fatal(err) - } - - if string(fileData) != expected { - t.Fatal() - } -} - -func TestFilesystemStoreFileSuccessfulExisting(t *testing.T) { - fs := &filesystem{} - - path := filepath.Join(testDir, "testFilesystem") - os.Remove(path) - - f, err := os.Create(path) - if err != nil { - t.Fatal(err) - } - f.Close() - - data := TestNoopStructure{ - Field1: "value1", - Field2: "value2", - } - - expected := "{\"Field1\":\"value1\",\"Field2\":\"value2\"}" - - err = fs.storeFile(path, data) - if err != nil { - t.Fatal(err) - } - - fileData, err := ioutil.ReadFile(path) - if err != nil { - t.Fatal(err) - } - - if string(fileData) != expected { - t.Fatal() - } -} - -func TestFilesystemStoreFileFailingMarshalling(t *testing.T) { - fs := &filesystem{} - - path := filepath.Join(testDir, "testFilesystem") - os.Remove(path) - - data := make(chan bool) - - err := fs.storeFile(path, data) - if err == nil { - t.Fatal() - } -} - -func TestFilesystemFetchFileSuccessful(t *testing.T) { - fs := &filesystem{} - data := TestNoopStructure{} - - path := filepath.Join(testDir, "testFilesystem") - os.Remove(path) - - f, err := os.Create(path) - if err != nil { - t.Fatal(err) - } - - dataToWrite := "{\"Field1\":\"value1\",\"Field2\":\"value2\"}" - n, err := f.WriteString(dataToWrite) - if err != nil || n != len(dataToWrite) { - f.Close() - t.Fatal(err) - } - f.Close() - - err = fs.fetchFile(path, sandboxResource(-1), &data) - if err != nil { - t.Fatal(err) - } - - expected := TestNoopStructure{ - Field1: "value1", - Field2: "value2", - } - - if reflect.DeepEqual(data, expected) == false { - t.Fatal() - } -} - -func TestFilesystemFetchFileFailingNoFile(t *testing.T) { - fs := &filesystem{} - data := TestNoopStructure{} - - path := filepath.Join(testDir, "testFilesystem") - os.Remove(path) - - err := fs.fetchFile(path, sandboxResource(-1), &data) - if err == nil { - t.Fatal() - } -} - -func TestFilesystemFetchFileFailingUnMarshalling(t *testing.T) { - fs := &filesystem{} - data := TestNoopStructure{} - - path := filepath.Join(testDir, "testFilesystem") - os.Remove(path) - - f, err := os.Create(path) - if err != nil { - t.Fatal(err) - } - f.Close() - - err = fs.fetchFile(path, sandboxResource(-1), data) - if err == nil { - t.Fatal() - } -} - -func TestFilesystemFetchContainerConfigSuccessful(t *testing.T) { - fs := &filesystem{} - contID := "100" - rootFs := "rootfs" - - contConfigDir := filepath.Join(configStoragePath, testSandboxID, contID) - os.MkdirAll(contConfigDir, dirMode) - - path := filepath.Join(contConfigDir, configFile) - os.Remove(path) - - f, err := os.Create(path) - if err != nil { - t.Fatal(err) - } - - configData := fmt.Sprintf("{\"ID\":\"%s\",\"RootFs\":\"%s\"}", contID, rootFs) - n, err := f.WriteString(configData) - if err != nil || n != len(configData) { - f.Close() - t.Fatal(err) - } - f.Close() - - data, err := fs.fetchContainerConfig(testSandboxID, contID) - if err != nil { - t.Fatal(err) - } - - expected := ContainerConfig{ - ID: contID, - RootFs: rootFs, - } - - if reflect.DeepEqual(data, expected) == false { - t.Fatal() - } -} - -func TestFilesystemFetchContainerConfigFailingContIDEmpty(t *testing.T) { - fs := &filesystem{} - - _, err := fs.fetchContainerConfig(testSandboxID, "") - if err == nil { - t.Fatal() - } -} - -func TestFilesystemFetchContainerConfigFailingSandboxIDEmpty(t *testing.T) { - fs := &filesystem{} - - _, err := fs.fetchContainerConfig("", "100") - if err == nil { - t.Fatal() - } -} - -func TestFilesystemFetchContainerMountsSuccessful(t *testing.T) { - fs := &filesystem{} - contID := "100" - - contMountsDir := filepath.Join(runStoragePath, testSandboxID, contID) - os.MkdirAll(contMountsDir, dirMode) - - path := filepath.Join(contMountsDir, mountsFile) - os.Remove(path) - - f, err := os.Create(path) - if err != nil { - t.Fatal(err) - } - - source := "/dev/sda1" - dest := "/root" - mntType := "ext4" - options := "rw" - hostPath := "/tmp/root" - - mountData := fmt.Sprintf(` - [ - { - "Source":"%s", - "Destination":"%s", - "Type":"%s", - "Options": ["%s"], - "HostPath":"%s" - } - ] - `, source, dest, mntType, options, hostPath) - - n, err := f.WriteString(mountData) - if err != nil || n != len(mountData) { - f.Close() - t.Fatal(err) - } - f.Close() - - data, err := fs.fetchContainerMounts(testSandboxID, contID) - if err != nil { - data, _ := ioutil.ReadFile(path) - t.Logf("Data from file : %s", string(data[:])) - t.Fatal(err) - } - - expected := []Mount{ - { - Source: source, - Destination: dest, - Type: mntType, - Options: []string{"rw"}, - HostPath: hostPath, - }, - } - - if reflect.DeepEqual(data, expected) == false { - t.Fatalf("Expected : [%v]\n, Got : [%v]\n", expected, data) - } -} - -func TestFilesystemFetchContainerMountsInvalidType(t *testing.T) { - fs := &filesystem{} - contID := "100" - - contMountsDir := filepath.Join(runStoragePath, testSandboxID, contID) - os.MkdirAll(contMountsDir, dirMode) - - path := filepath.Join(contMountsDir, mountsFile) - os.Remove(path) - - f, err := os.Create(path) - if err != nil { - t.Fatal(err) - } - - configData := fmt.Sprintf("{\"ID\":\"%s\",\"RootFs\":\"rootfs\"}", contID) - n, err := f.WriteString(configData) - if err != nil || n != len(configData) { - f.Close() - t.Fatal(err) - } - f.Close() - - _, err = fs.fetchContainerMounts(testSandboxID, contID) - if err == nil { - t.Fatal() - } -} - -func TestFilesystemFetchContainerMountsFailingContIDEmpty(t *testing.T) { - fs := &filesystem{} - - _, err := fs.fetchContainerMounts(testSandboxID, "") - if err == nil { - t.Fatal() - } -} - -func TestFilesystemFetchContainerMountsFailingSandboxIDEmpty(t *testing.T) { - fs := &filesystem{} - - _, err := fs.fetchContainerMounts("", "100") - if err == nil { - t.Fatal() - } -} - -func TestFilesystemResourceDirFailingSandboxIDEmpty(t *testing.T) { - for _, b := range []bool{true, false} { - _, err := resourceDir(b, "", "", configFileType) - if err == nil { - t.Fatal() - } - } -} - -func TestFilesystemResourceDirFailingInvalidResource(t *testing.T) { - for _, b := range []bool{true, false} { - _, err := resourceDir(b, testSandboxID, "100", sandboxResource(-1)) - if err == nil { - t.Fatal() - } - } -} - -func TestFilesystemResourceURIFailingResourceDir(t *testing.T) { - fs := &filesystem{} - - for _, b := range []bool{true, false} { - _, _, err := fs.resourceURI(b, testSandboxID, "100", sandboxResource(-1)) - if err == nil { - t.Fatal() - } - } -} - -func TestFilesystemStoreResourceFailingSandboxConfigStateFileType(t *testing.T) { - fs := &filesystem{} - data := SandboxConfig{} - - for _, b := range []bool{true, false} { - err := fs.storeResource(b, testSandboxID, "100", stateFileType, data) - if err == nil { - t.Fatal() - } - } -} - -func TestFilesystemStoreResourceFailingContainerConfigStateFileType(t *testing.T) { - fs := &filesystem{} - data := ContainerConfig{} - - for _, b := range []bool{true, false} { - err := fs.storeResource(b, testSandboxID, "100", stateFileType, data) - if err == nil { - t.Fatal() - } - } -} - -func TestFilesystemStoreResourceFailingSandboxConfigResourceURI(t *testing.T) { - fs := &filesystem{} - data := SandboxConfig{} - - for _, b := range []bool{true, false} { - err := fs.storeResource(b, "", "100", configFileType, data) - if err == nil { - t.Fatal() - } - } -} - -func TestFilesystemStoreResourceFailingContainerConfigResourceURI(t *testing.T) { - fs := &filesystem{} - data := ContainerConfig{} - - for _, b := range []bool{true, false} { - err := fs.storeResource(b, "", "100", configFileType, data) - if err == nil { - t.Fatal() - } - } -} - -func TestFilesystemStoreResourceFailingStateConfigFileType(t *testing.T) { - fs := &filesystem{} - data := types.State{} - - for _, b := range []bool{true, false} { - err := fs.storeResource(b, testSandboxID, "100", configFileType, data) - if err == nil { - t.Fatal() - } - } -} - -func TestFilesystemStoreResourceFailingStateResourceURI(t *testing.T) { - fs := &filesystem{} - data := types.State{} - - for _, b := range []bool{true, false} { - err := fs.storeResource(b, "", "100", stateFileType, data) - if err == nil { - t.Fatal() - } - } -} - -func TestFilesystemStoreResourceFailingWrongDataType(t *testing.T) { - fs := &filesystem{} - data := TestNoopStructure{} - - for _, b := range []bool{true, false} { - err := fs.storeResource(b, testSandboxID, "100", configFileType, data) - if err == nil { - t.Fatal() - } - } -} - -func TestFilesystemFetchResourceFailingWrongResourceType(t *testing.T) { - fs := &filesystem{} - - for _, b := range []bool{true, false} { - if err := fs.fetchResource(b, testSandboxID, "100", lockFileType, nil); err == nil { - t.Fatal() - } - } -} diff --git a/virtcontainers/noop_resource_storage.go b/virtcontainers/noop_resource_storage.go deleted file mode 100644 index 89db7c7faf..0000000000 --- a/virtcontainers/noop_resource_storage.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (c) 2018 Intel Corporation -// -// SPDX-License-Identifier: Apache-2.0 -// - -package virtcontainers - -import ( - "github.com/kata-containers/runtime/virtcontainers/device/api" - "github.com/kata-containers/runtime/virtcontainers/types" -) - -type noopResourceStorage struct{} - -func (n *noopResourceStorage) createAllResources(sandbox *Sandbox) error { - return nil -} - -func (n *noopResourceStorage) containerURI(sandboxID, containerID string, resource sandboxResource) (string, string, error) { - return "", "", nil -} - -func (n *noopResourceStorage) sandboxURI(sandboxID string, resource sandboxResource) (string, string, error) { - return "", "", nil -} - -func (n *noopResourceStorage) storeSandboxResource(sandboxID string, resource sandboxResource, data interface{}) error { - return nil -} - -func (n *noopResourceStorage) deleteSandboxResources(sandboxID string, resources []sandboxResource) error { - return nil -} - -func (n *noopResourceStorage) fetchSandboxConfig(sandboxID string) (SandboxConfig, error) { - return SandboxConfig{}, nil -} - -func (n *noopResourceStorage) fetchSandboxState(sandboxID string) (types.State, error) { - return types.State{}, nil -} - -func (n *noopResourceStorage) fetchSandboxNetwork(sandboxID string) (NetworkNamespace, error) { - return NetworkNamespace{}, nil -} - -func (n *noopResourceStorage) storeSandboxNetwork(sandboxID string, networkNS NetworkNamespace) error { - return nil -} - -func (n *noopResourceStorage) fetchSandboxDevices(sandboxID string) ([]api.Device, error) { - return []api.Device{}, nil -} - -func (n *noopResourceStorage) storeSandboxDevices(sandboxID string, devices []api.Device) error { - return nil -} - -func (n *noopResourceStorage) fetchHypervisorState(sandboxID string, state interface{}) error { - return nil -} - -func (n *noopResourceStorage) storeHypervisorState(sandboxID string, state interface{}) error { - return nil -} - -func (n *noopResourceStorage) fetchAgentState(sandboxID string, state interface{}) error { - return nil -} - -func (n *noopResourceStorage) storeAgentState(sandboxID string, state interface{}) error { - return nil -} - -func (n *noopResourceStorage) storeContainerResource(sandboxID, containerID string, resource sandboxResource, data interface{}) error { - return nil -} - -func (n *noopResourceStorage) deleteContainerResources(sandboxID, containerID string, resources []sandboxResource) error { - return nil -} - -func (n *noopResourceStorage) fetchContainerConfig(sandboxID, containerID string) (ContainerConfig, error) { - return ContainerConfig{}, nil -} - -func (n *noopResourceStorage) fetchContainerState(sandboxID, containerID string) (types.State, error) { - return types.State{}, nil -} - -func (n *noopResourceStorage) fetchContainerProcess(sandboxID, containerID string) (Process, error) { - return Process{}, nil -} - -func (n *noopResourceStorage) storeContainerProcess(sandboxID, containerID string, process Process) error { - return nil -} - -func (n *noopResourceStorage) fetchContainerMounts(sandboxID, containerID string) ([]Mount, error) { - return []Mount{}, nil -} - -func (n *noopResourceStorage) storeContainerMounts(sandboxID, containerID string, mounts []Mount) error { - return nil -} - -func (n *noopResourceStorage) fetchContainerDevices(sandboxID, containerID string) ([]ContainerDevice, error) { - return []ContainerDevice{}, nil -} - -func (n *noopResourceStorage) storeContainerDevices(sandboxID, containerID string, devices []ContainerDevice) error { - return nil -} diff --git a/virtcontainers/noop_resource_storage_test.go b/virtcontainers/noop_resource_storage_test.go deleted file mode 100644 index 1e4a696bf6..0000000000 --- a/virtcontainers/noop_resource_storage_test.go +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright (c) 2018 Intel Corporation -// -// SPDX-License-Identifier: Apache-2.0 -// - -package virtcontainers - -import ( - "testing" - - "github.com/kata-containers/runtime/virtcontainers/device/api" - "github.com/stretchr/testify/assert" -) - -func TestNoopCreateAllResources(t *testing.T) { - n := &noopResourceStorage{} - - err := n.createAllResources(nil) - assert.Nil(t, err) -} - -func TestNoopContainerURI(t *testing.T) { - n := &noopResourceStorage{} - - _, _, err := n.containerURI("", "", 0) - assert.Nil(t, err) -} - -func TestNoopSandboxURI(t *testing.T) { - n := &noopResourceStorage{} - - _, _, err := n.sandboxURI("", 0) - assert.Nil(t, err) -} - -func TestNoopStoreSandboxResource(t *testing.T) { - n := &noopResourceStorage{} - - err := n.storeSandboxResource("", 0, nil) - assert.Nil(t, err) -} - -func TestNoopDeleteSandboxResources(t *testing.T) { - n := &noopResourceStorage{} - - err := n.deleteSandboxResources("", []sandboxResource{0}) - assert.Nil(t, err) -} - -func TestNoopFetchSandboxConfig(t *testing.T) { - n := &noopResourceStorage{} - - _, err := n.fetchSandboxConfig("") - assert.Nil(t, err) -} - -func TestNoopFetchSandboxState(t *testing.T) { - n := &noopResourceStorage{} - - _, err := n.fetchSandboxState("") - assert.Nil(t, err) -} - -func TestNoopFetchSandboxNetwork(t *testing.T) { - n := &noopResourceStorage{} - - _, err := n.fetchSandboxNetwork("") - assert.Nil(t, err) -} - -func TestNoopStoreSandboxNetwork(t *testing.T) { - n := &noopResourceStorage{} - - err := n.storeSandboxNetwork("", NetworkNamespace{}) - assert.Nil(t, err) -} - -func TestNoopFetchSandboxDevices(t *testing.T) { - n := &noopResourceStorage{} - - _, err := n.fetchSandboxDevices("") - assert.Nil(t, err) -} - -func TestNoopStoreSandboxDevices(t *testing.T) { - n := &noopResourceStorage{} - - err := n.storeSandboxDevices("", []api.Device{}) - assert.Nil(t, err) -} - -func TestNoopFetchHypervisorState(t *testing.T) { - n := &noopResourceStorage{} - - err := n.fetchHypervisorState("", nil) - assert.Nil(t, err) -} - -func TestNoopStoreHypervisorState(t *testing.T) { - n := &noopResourceStorage{} - - err := n.storeHypervisorState("", nil) - assert.Nil(t, err) -} - -func TestNoopFetchAgentState(t *testing.T) { - n := &noopResourceStorage{} - - err := n.fetchAgentState("", nil) - assert.Nil(t, err) -} - -func TestNoopStoreAgentState(t *testing.T) { - n := &noopResourceStorage{} - - err := n.storeAgentState("", nil) - assert.Nil(t, err) -} - -func TestNoopStoreContainerResource(t *testing.T) { - n := &noopResourceStorage{} - - err := n.storeContainerResource("", "", 0, nil) - assert.Nil(t, err) -} - -func TestNoopDeleteContainerResources(t *testing.T) { - n := &noopResourceStorage{} - - err := n.deleteContainerResources("", "", []sandboxResource{0}) - assert.Nil(t, err) -} - -func TestNoopFetchContainerConfig(t *testing.T) { - n := &noopResourceStorage{} - - _, err := n.fetchContainerConfig("", "") - assert.Nil(t, err) -} - -func TestNoopFetchContainerState(t *testing.T) { - n := &noopResourceStorage{} - - _, err := n.fetchContainerState("", "") - assert.Nil(t, err) -} - -func TestNoopFetchContainerProcess(t *testing.T) { - n := &noopResourceStorage{} - - _, err := n.fetchContainerProcess("", "") - assert.Nil(t, err) -} - -func TestNoopStoreContainerProcess(t *testing.T) { - n := &noopResourceStorage{} - - err := n.storeContainerProcess("", "", Process{}) - assert.Nil(t, err) -} - -func TestNoopFetchContainerMounts(t *testing.T) { - n := &noopResourceStorage{} - - _, err := n.fetchContainerMounts("", "") - assert.Nil(t, err) -} - -func TestNoopStoreContainerMounts(t *testing.T) { - n := &noopResourceStorage{} - - err := n.storeContainerMounts("", "", []Mount{}) - assert.Nil(t, err) -} - -func TestNoopFetchContainerDevices(t *testing.T) { - n := &noopResourceStorage{} - - _, err := n.fetchContainerDevices("", "") - assert.Nil(t, err) -} - -func TestNoopStoreContainerDevices(t *testing.T) { - n := &noopResourceStorage{} - - err := n.storeContainerDevices("", "", []ContainerDevice{}) - assert.Nil(t, err) -} diff --git a/virtcontainers/resource_storage.go b/virtcontainers/resource_storage.go deleted file mode 100644 index 91cbf27a56..0000000000 --- a/virtcontainers/resource_storage.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (c) 2018 Intel Corporation -// -// SPDX-License-Identifier: Apache-2.0 -// - -package virtcontainers - -import ( - "context" - "encoding/json" - - "github.com/kata-containers/runtime/virtcontainers/device/api" - "github.com/kata-containers/runtime/virtcontainers/types" -) - -// TypedDevice is used as an intermediate representation for marshalling -// and unmarshalling Device implementations. -type TypedDevice struct { - Type string - - // Data is assigned the Device object. - // This being declared as RawMessage prevents it from being marshalled/unmarshalled. - // We do that explicitly depending on Type. - Data json.RawMessage -} - -// resourceStorage is the virtcontainers resources (configuration, state, etc...) -// storage interface. -// The default resource storage implementation is filesystem. -type resourceStorage interface { - // Create all resources for a sandbox - createAllResources(ctx context.Context, sandbox *Sandbox) error - - // Resources URIs functions return both the URI - // for the actual resource and the URI base. - containerURI(sandboxID, containerID string, resource sandboxResource) (string, string, error) - sandboxURI(sandboxID string, resource sandboxResource) (string, string, error) - - // Sandbox resources - storeSandboxResource(sandboxID string, resource sandboxResource, data interface{}) error - deleteSandboxResources(sandboxID string, resources []sandboxResource) error - fetchSandboxConfig(sandboxID string) (SandboxConfig, error) - fetchSandboxState(sandboxID string) (types.State, error) - fetchSandboxNetwork(sandboxID string) (NetworkNamespace, error) - storeSandboxNetwork(sandboxID string, networkNS NetworkNamespace) error - fetchSandboxDevices(sandboxID string) ([]api.Device, error) - storeSandboxDevices(sandboxID string, devices []api.Device) error - - // Hypervisor resources - fetchHypervisorState(sandboxID string, state interface{}) error - storeHypervisorState(sandboxID string, state interface{}) error - - // Agent resources - fetchAgentState(sandboxID string, state interface{}) error - storeAgentState(sandboxID string, state interface{}) error - - // Container resources - storeContainerResource(sandboxID, containerID string, resource sandboxResource, data interface{}) error - deleteContainerResources(sandboxID, containerID string, resources []sandboxResource) error - fetchContainerConfig(sandboxID, containerID string) (ContainerConfig, error) - fetchContainerState(sandboxID, containerID string) (types.State, error) - fetchContainerProcess(sandboxID, containerID string) (Process, error) - storeContainerProcess(sandboxID, containerID string, process Process) error - fetchContainerMounts(sandboxID, containerID string) ([]Mount, error) - storeContainerMounts(sandboxID, containerID string, mounts []Mount) error - fetchContainerDevices(sandboxID, containerID string) ([]ContainerDevice, error) - storeContainerDevices(sandboxID, containerID string, devices []ContainerDevice) error - createSandboxTempFile(sandboxID string) (string, error) -} From 7b0376f3d3fd86b578162e186e2476d9549a7a49 Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Fri, 11 Jan 2019 10:46:27 +0100 Subject: [PATCH 12/13] virtcontainers: Fix container.go cyclomatic complexity With the Stores conversion, the newContainer() cyclomatic complexity went over 15. We fix that by extracting the block devices creation routine out of newContainer. Signed-off-by: Samuel Ortiz --- virtcontainers/container.go | 70 +++++++++++++++++++++---------------- 1 file changed, 39 insertions(+), 31 deletions(-) diff --git a/virtcontainers/container.go b/virtcontainers/container.go index 07c65594c6..7952dfbd61 100644 --- a/virtcontainers/container.go +++ b/virtcontainers/container.go @@ -584,6 +584,42 @@ func filterDevices(sandbox *Sandbox, c *Container, devices []ContainerDevice) (r return } +func (c *Container) createBlockDevices() error { + // iterate all mounts and create block device if it's block based. + for i, m := range c.mounts { + if len(m.BlockDeviceID) > 0 || m.Type != "bind" { + // Non-empty m.BlockDeviceID indicates there's already one device + // associated with the mount,so no need to create a new device for it + // and we only create block device for bind mount + continue + } + + var stat unix.Stat_t + if err := unix.Stat(m.Source, &stat); err != nil { + return fmt.Errorf("stat %q failed: %v", m.Source, err) + } + + // Check if mount is a block device file. If it is, the block device will be attached to the host + // instead of passing this as a shared mount. + if c.checkBlockDeviceSupport() && stat.Mode&unix.S_IFBLK == unix.S_IFBLK { + b, err := c.sandbox.devManager.NewDevice(config.DeviceInfo{ + HostPath: m.Source, + ContainerPath: m.Destination, + DevType: "b", + Major: int64(unix.Major(stat.Rdev)), + Minor: int64(unix.Minor(stat.Rdev)), + }) + if err != nil { + return fmt.Errorf("device manager failed to create new device for %q: %v", m.Source, err) + } + + c.mounts[i].BlockDeviceID = b.DeviceID() + } + } + + return nil +} + // newContainer creates a Container structure from a sandbox and a container configuration. func newContainer(sandbox *Sandbox, contConfig ContainerConfig) (*Container, error) { span, _ := sandbox.trace("newContainer") @@ -630,37 +666,9 @@ func newContainer(sandbox *Sandbox, contConfig ContainerConfig) (*Container, err // restore mounts from disk c.mounts = mounts } else { - // for newly created container: - // iterate all mounts and create block device if it's block based. - for i, m := range c.mounts { - if len(m.BlockDeviceID) > 0 || m.Type != "bind" { - // Non-empty m.BlockDeviceID indicates there's already one device - // associated with the mount,so no need to create a new device for it - // and we only create block device for bind mount - continue - } - - var stat unix.Stat_t - if err := unix.Stat(m.Source, &stat); err != nil { - return nil, fmt.Errorf("stat %q failed: %v", m.Source, err) - } - - // Check if mount is a block device file. If it is, the block device will be attached to the host - // instead of passing this as a shared mount. - if c.checkBlockDeviceSupport() && stat.Mode&unix.S_IFBLK == unix.S_IFBLK { - b, err := c.sandbox.devManager.NewDevice(config.DeviceInfo{ - HostPath: m.Source, - ContainerPath: m.Destination, - DevType: "b", - Major: int64(unix.Major(stat.Rdev)), - Minor: int64(unix.Minor(stat.Rdev)), - }) - if err != nil { - return nil, fmt.Errorf("device manager failed to create new device for %q: %v", m.Source, err) - } - - c.mounts[i].BlockDeviceID = b.DeviceID() - } + // Create block devices for newly created container + if err := c.createBlockDevices(); err != nil { + return nil, err } } From bb99e4152b7217c1fbc8657b7aac2c3e10601056 Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Wed, 6 Feb 2019 14:00:01 +0100 Subject: [PATCH 13/13] virtcontainers: Fix Store related vm factory leak We are creating Store directories but never removing them. Calling into a VM factory created vm Stop() will now clean the VM Store artifacts up. Signed-off-by: Samuel Ortiz --- virtcontainers/factory/cache/cache_test.go | 5 ++- virtcontainers/factory/direct/direct_test.go | 5 ++- virtcontainers/factory/factory_test.go | 40 +++++++++++++++---- .../factory/template/template_test.go | 25 +++++++++--- virtcontainers/vm.go | 25 ++++++++---- 5 files changed, 77 insertions(+), 23 deletions(-) diff --git a/virtcontainers/factory/cache/cache_test.go b/virtcontainers/factory/cache/cache_test.go index 4139f6930c..2189360d8e 100644 --- a/virtcontainers/factory/cache/cache_test.go +++ b/virtcontainers/factory/cache/cache_test.go @@ -40,7 +40,10 @@ func TestTemplateFactory(t *testing.T) { assert.Equal(f.Config(), vmConfig) // GetBaseVM - _, err := f.GetBaseVM(ctx, vmConfig) + vm, err := f.GetBaseVM(ctx, vmConfig) + assert.Nil(err) + + err = vm.Stop() assert.Nil(err) // CloseFactory diff --git a/virtcontainers/factory/direct/direct_test.go b/virtcontainers/factory/direct/direct_test.go index 20a2548747..cca51cea97 100644 --- a/virtcontainers/factory/direct/direct_test.go +++ b/virtcontainers/factory/direct/direct_test.go @@ -39,7 +39,10 @@ func TestTemplateFactory(t *testing.T) { assert.Equal(f.Config(), vmConfig) // GetBaseVM - _, err := f.GetBaseVM(ctx, vmConfig) + vm, err := f.GetBaseVM(ctx, vmConfig) + assert.Nil(err) + + err = vm.Stop() assert.Nil(err) // CloseFactory diff --git a/virtcontainers/factory/factory_test.go b/virtcontainers/factory/factory_test.go index 6893c4c5f1..deaa02c20e 100644 --- a/virtcontainers/factory/factory_test.go +++ b/virtcontainers/factory/factory_test.go @@ -190,7 +190,10 @@ func TestFactoryGetVM(t *testing.T) { f, err := NewFactory(ctx, Config{VMConfig: vmConfig}, false) assert.Nil(err) - _, err = f.GetVM(ctx, vmConfig) + vm, err := f.GetVM(ctx, vmConfig) + assert.Nil(err) + + err = vm.Stop() assert.Nil(err) f.CloseFactory(ctx) @@ -199,7 +202,10 @@ func TestFactoryGetVM(t *testing.T) { f, err = NewFactory(ctx, Config{Template: true, VMConfig: vmConfig}, false) assert.Nil(err) - _, err = f.GetVM(ctx, vmConfig) + vm, err = f.GetVM(ctx, vmConfig) + assert.Nil(err) + + err = vm.Stop() assert.Nil(err) f.CloseFactory(ctx) @@ -211,7 +217,10 @@ func TestFactoryGetVM(t *testing.T) { _, err = NewFactory(ctx, Config{Template: true, VMConfig: vmConfig}, true) assert.Error(err) - _, err = f.GetVM(ctx, vmConfig) + vm, err = f.GetVM(ctx, vmConfig) + assert.Nil(err) + + err = vm.Stop() assert.Nil(err) f.CloseFactory(ctx) @@ -220,7 +229,10 @@ func TestFactoryGetVM(t *testing.T) { f, err = NewFactory(ctx, Config{Cache: 2, VMConfig: vmConfig}, false) assert.Nil(err) - _, err = f.GetVM(ctx, vmConfig) + vm, err = f.GetVM(ctx, vmConfig) + assert.Nil(err) + + err = vm.Stop() assert.Nil(err) f.CloseFactory(ctx) @@ -229,22 +241,34 @@ func TestFactoryGetVM(t *testing.T) { f, err = NewFactory(ctx, Config{Template: true, Cache: 2, VMConfig: vmConfig}, false) assert.Nil(err) - _, err = f.GetVM(ctx, vmConfig) + vm, err = f.GetVM(ctx, vmConfig) + assert.Nil(err) + + err = vm.Stop() assert.Nil(err) // CPU hotplug vmConfig.HypervisorConfig.NumVCPUs++ - _, err = f.GetVM(ctx, vmConfig) + vm, err = f.GetVM(ctx, vmConfig) + assert.Nil(err) + + err = vm.Stop() assert.Nil(err) // Memory hotplug vmConfig.HypervisorConfig.MemorySize += 128 - _, err = f.GetVM(ctx, vmConfig) + vm, err = f.GetVM(ctx, vmConfig) + assert.Nil(err) + + err = vm.Stop() assert.Nil(err) // checkConfig fall back vmConfig.HypervisorConfig.Mlock = true - _, err = f.GetVM(ctx, vmConfig) + vm, err = f.GetVM(ctx, vmConfig) + assert.Nil(err) + + err = vm.Stop() assert.Nil(err) f.CloseFactory(ctx) diff --git a/virtcontainers/factory/template/template_test.go b/virtcontainers/factory/template/template_test.go index e6ddab557f..d08a2ab69e 100644 --- a/virtcontainers/factory/template/template_test.go +++ b/virtcontainers/factory/template/template_test.go @@ -46,7 +46,10 @@ func TestTemplateFactory(t *testing.T) { assert.Equal(f.Config(), vmConfig) // GetBaseVM - _, err = f.GetBaseVM(ctx, vmConfig) + vm, err := f.GetBaseVM(ctx, vmConfig) + assert.Nil(err) + + err = vm.Stop() assert.Nil(err) // Fetch @@ -74,19 +77,31 @@ func TestTemplateFactory(t *testing.T) { assert.Error(err) templateProxyType = vc.NoopProxyType - _, err = tt.GetBaseVM(ctx, vmConfig) + vm, err = tt.GetBaseVM(ctx, vmConfig) + assert.Nil(err) + + err = vm.Stop() + assert.Nil(err) + + vm, err = f.GetBaseVM(ctx, vmConfig) assert.Nil(err) - _, err = f.GetBaseVM(ctx, vmConfig) + err = vm.Stop() assert.Nil(err) err = tt.createTemplateVM(ctx) assert.Nil(err) - _, err = tt.GetBaseVM(ctx, vmConfig) + vm, err = tt.GetBaseVM(ctx, vmConfig) + assert.Nil(err) + + err = vm.Stop() + assert.Nil(err) + + vm, err = f.GetBaseVM(ctx, vmConfig) assert.Nil(err) - _, err = f.GetBaseVM(ctx, vmConfig) + err = vm.Stop() assert.Nil(err) // CloseFactory diff --git a/virtcontainers/vm.go b/virtcontainers/vm.go index b7af5138fb..e04f521727 100644 --- a/virtcontainers/vm.go +++ b/virtcontainers/vm.go @@ -31,6 +31,8 @@ type VM struct { memory uint32 cpuDelta uint32 + + store *store.VCStore } // VMConfig is a collection of all info that a new blackbox VM needs. @@ -113,12 +115,6 @@ func NewVM(ctx context.Context, config VMConfig) (*VM, error) { virtLog.WithField("vm", id).WithField("config", config).Info("create new vm") - defer func() { - if err != nil { - virtLog.WithField("vm", id).WithError(err).Error("failed to create new vm") - } - }() - vcStore, err := store.NewVCStore(ctx, store.SandboxConfigurationRoot(id), store.SandboxRuntimeRoot(id)) @@ -126,6 +122,14 @@ func NewVM(ctx context.Context, config VMConfig) (*VM, error) { return nil, err } + defer func() { + if err != nil { + virtLog.WithField("vm", id).WithError(err).Error("failed to create new vm") + virtLog.WithField("vm", id).Errorf("Deleting store for %s", id) + vcStore.Delete() + } + }() + if err = hypervisor.createSandbox(ctx, id, &config.HypervisorConfig, vcStore); err != nil { return nil, err } @@ -184,6 +188,7 @@ func NewVM(ctx context.Context, config VMConfig) (*VM, error) { proxyURL: url, cpu: config.HypervisorConfig.NumVCPUs, memory: config.HypervisorConfig.MemorySize, + store: vcStore, }, nil } @@ -235,9 +240,13 @@ func (v *VM) Disconnect() error { // Stop stops a VM process. func (v *VM) Stop() error { - v.logger().Info("kill vm") + v.logger().Info("stop vm") + + if err := v.hypervisor.stopSandbox(); err != nil { + return err + } - return v.hypervisor.stopSandbox() + return v.store.Delete() } // AddCPUs adds num of CPUs to the VM.