Skip to content

Commit

Permalink
Add createSnapshot mode
Browse files Browse the repository at this point in the history
- There is a problem related to the gogo-protobuf compiler/library,
  which requires us to downgrade some dependencies to work (for now).
- After the snapshot is created and the uVM is resumed, it looks like
  the vsock connections (used to provide stdio of the created task)
  appear to be crippled. Hopefully the uVM will be otherwise fine when
  loaded from a snapshot...

Signed-off-by: Christos Katsakioris <[email protected]>
  • Loading branch information
ckatsak committed Nov 1, 2022
1 parent e9dd15a commit 955ba23
Showing 1 changed file with 206 additions and 2 deletions.
208 changes: 206 additions & 2 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,9 @@ import (
"context"
"fmt"
"os"
"path/filepath"
"syscall"
"time"

"github.com/sirupsen/logrus"

Expand All @@ -27,6 +30,10 @@ const (
imageName = "docker.io/library/debian:bullseye"

vmID = "testclient01-01"

defaultSnapshotDirname = "/tmp/snap-testclient01"
defaultSnapshotStateFileExt = "state"
defaultSnapshotMemoryFileExt = "memory"
)

var (
Expand Down Expand Up @@ -100,6 +107,9 @@ func NewClient(containerdAddress, containerdTTRPCAddress string) (ret *Client, e
//
// Also see:
// - https://github.com/estesp/examplectr/blob/master/examplectr.go
// - https://github.com/containerd/containerd/blob/release/1.6/docs/getting-started.md
// - firecracker-containerd examples (taskworkflow and remote-snapshotter)
// - vHive
func (c *Client) getImage(ctx context.Context, imageName string) (image containerd.Image, err error) {
log := log.WithField("func", "(*Client).getImage()")

Expand Down Expand Up @@ -211,6 +221,7 @@ func vanilla() {
containerd.WithNewSnapshot(vmID+"-snap", image),
containerd.WithNewSpec(
oci.WithImageConfig(image),
//oci.WithTTY,
firecrackeroci.WithVMID(vmID),
firecrackeroci.WithVMNetwork,
),
Expand All @@ -231,7 +242,7 @@ func vanilla() {
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Create a new task from the container we just defined, to run inside the uVM
log.Infof("Creating the new task...")
task, err := container.NewTask(ctx, cio.NewCreator(cio.WithStdio))
task, err := container.NewTask(ctx, cio.NewCreator(cio.WithStdio /*, cio.WithTerminal */))
if err != nil {
log.WithError(err).Errorf("failed to create new task")
return
Expand Down Expand Up @@ -274,7 +285,200 @@ func vanilla() {
}

func createSnapshot() {
panic("TODO(ckatsak): UNIMLPEMENTED")
var err error

log := log.WithField("func", "createSnapshot()")
ctx := namespaces.WithNamespace(context.Background(), defaultNamespace)

c, err := NewClient(defaultContainerdAddress, defaultContainerdTTRPCAddress)
if err != nil {
log.WithError(err).Errorf("failed to create client")
return
}

///////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Version info for containerd.Client
if v, err := c.cc.Version(ctx); err != nil {
log.WithError(err).Errorf("failed to get containerd's version information")
return
} else {
log.Infof("(*containerd.Client).Version() returned %#v", v)
}
log.Infof("(*containerd.Client).Runtime() returned '%s'", c.cc.Runtime())
///////////////////////////////////////////////////////////////////////////////////////////////////////////////

///////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Create new uVM
log.Infof("Creating new uVM...")
createReq := &proto.CreateVMRequest{
VMID: vmID,
TimeoutSeconds: 20, // XXX(ckatsak): Optional
NetworkInterfaces: []*proto.FirecrackerNetworkInterface{{
StaticConfig: &proto.StaticNetworkConfiguration{
MacAddress: "AA:FC:00:00:05:01",
HostDevName: "ckatsak.tap.01",
IPConfig: &proto.IPConfiguration{
PrimaryAddr: "10.0.1.2/24",
GatewayAddr: "10.0.1.1",
Nameservers: []string{"1.1.1.1", "1.0.0.1"},
},
},
}},
ContainerCount: 1,
ExitAfterAllTasksDeleted: true,
}
resp, err := c.fcc.CreateVM(ctx, createReq)
if err != nil {
log.WithError(err).Errorf("error creating new VM")
return
}
defer func() {
log.Infof("Stopping uVM...")
if _, err := c.fcc.StopVM(ctx, &proto.StopVMRequest{VMID: vmID}); err != nil {
log.WithError(err).Warnf("failed to StopVM() on deferred call")
}
}()
log.Debugf("fcClient.CreateVM() responded: %#v", resp)
///////////////////////////////////////////////////////////////////////////////////////////////////////////////

///////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Fetch the container image to be deployed inside the uVM
log.Infof("Fetching the container image...")
image, err := c.getImage(ctx, imageName)
if err != nil {
log.WithError(err).Errorf("failed to getImage()")
return
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////

///////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Create the new container to run inside the uVM
log.Infof("Creating the new container...")
container, err := c.cc.NewContainer(
ctx,
vmID,
containerd.WithSnapshotter(snapshotter),
containerd.WithNewSnapshot(vmID+"-snap", image),
containerd.WithNewSpec(
oci.WithImageConfig(image),
firecrackeroci.WithVMID(vmID),
firecrackeroci.WithVMNetwork,
),
containerd.WithRuntime("aws.firecracker", nil),
)
if err != nil {
log.WithError(err).Errorf("failed to create NewContainer()")
return
}
defer func() {
log.Infof("Deleting container '%s'...", container.ID())
if err := container.Delete(ctx, containerd.WithSnapshotCleanup); err != nil {
log.WithError(err).Warnf("failed to delete container '%s' with snapshot-cleanup on deferred call", container.ID())
}
}()
///////////////////////////////////////////////////////////////////////////////////////////////////////////////

///////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Create a new task from the container we just defined, to run inside the uVM
log.Infof("Creating the new task...")
var task containerd.Task
task, err = container.NewTask(ctx, cio.NewCreator(cio.WithStdio))
if err != nil {
log.WithError(err).Errorf("failed to create new task")
return
}
defer func() {
log.Infof("Deleting task...")
if exitStatus, err := task.Delete(ctx); err != nil {
log.WithError(err).Warnf("failed to delete task on deferred call")
} else {
log.Infof("Deleted task's exit status on deferred call: %#v", exitStatus)
}
}()
///////////////////////////////////////////////////////////////////////////////////////////////////////////////

///////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Asynchronously wait for the task to complete ... for now (?)
log.Infof("Creating the new task's exit channel...")
var esCh <-chan containerd.ExitStatus
esCh, err = task.Wait(ctx) // esCh: (<-chan containerd.ExitStatus)
if err != nil {
log.WithError(err).Errorf("failed to (*containerd.Task).Wait()")
return
}
go func() {
log := log.WithField("func", "goroutine:task.Wait()")
log.Infof("Asynchronously waiting for the task to complete...")
es := <-esCh
log.Infof("Task completed! Exit status: %#v", es)
}()
///////////////////////////////////////////////////////////////////////////////////////////////////////////////

///////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Start the task
log.Infof("Starting the new task...")
if err = task.Start(ctx); err != nil {
log.WithError(err).Errorf("failed to start the new Task")
return
}
defer func() {
// Only SIGKILL if we exit with an error?
//if err != nil {
log.Infof("Sending a SIGKILL to the task...")
if err := task.Kill(ctx, syscall.SIGKILL); err != nil {
log.WithError(err).Warnf("failed to send a SIGKILL to the task on deferred call")
}
//}
}()
///////////////////////////////////////////////////////////////////////////////////////////////////////////////

if err = os.MkdirAll(defaultSnapshotDirname, 0775); err != nil {
log.WithError(err).Errorf("failed to mkdir('%s') for snapshots", defaultSnapshotDirname)
return
}
log.Infof("Sleeping for 10 seconds for you to play...")
time.Sleep(10 * time.Second)

///////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Pause the uVM
log.Infof("Pausing the uVM...")
if _, err = c.fcc.PauseVM(ctx, &proto.PauseVMRequest{VMID: vmID}); err != nil {
log.WithError(err).Errorf("failed to pause the uVM")
return
}
defer func() {
// Only resume if we exit with an error?
//if err != nil {
log.Infof("Resuming the paused uVM...")
if _, err := c.fcc.ResumeVM(ctx, &proto.ResumeVMRequest{VMID: vmID}); err != nil {
log.WithError(err).Warnf("failed to resume the uVM on deferred call")
} else {
log.Infof("uVM has been resumed successfully!")

log.Infof("Sleeping for 10sec for you to play...")
time.Sleep(10 * time.Second)
}
//}
}()
log.Infof("uVM has been paused successfully!")
///////////////////////////////////////////////////////////////////////////////////////////////////////////////

///////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Create a new uVM snapshot from the paused uVM
log.Infof("Creating a new uVM snapshot...")
snapReq := &proto.CreateVMSnapshotRequest{
VMID: vmID,
SnapshotPath: filepath.Join(defaultSnapshotDirname, fmt.Sprintf("%s.%s", vmID, defaultSnapshotStateFileExt)),
MemFilePath: filepath.Join(defaultSnapshotDirname, fmt.Sprintf("%s.%s", vmID, defaultSnapshotMemoryFileExt)),
}
if _, err = c.fcc.CreateVMSnapshot(ctx, snapReq); err != nil {
log.WithError(err).Errorf("failed to create uVM snapshot")
return
}
log.Infof("uVM snapshots have been created successfully!")
///////////////////////////////////////////////////////////////////////////////////////////////////////////////

// Return, allowing the deferred calls to attempt to resume the uVM and cleanup the task and the container
}

func main() {
Expand Down

0 comments on commit 955ba23

Please sign in to comment.