Skip to content

Commit

Permalink
Review OCI tests (#269)
Browse files Browse the repository at this point in the history
* Review OCI tests

* use yitsushi/devmapper-containerd-action@v1
* extends imageservice test
  * invalid image pull
  * double check if volume is mounted
  * volume is not mounted test
  * kernel volume mount test
* Add unit test on ImageService
* Fix integration test (sudo)
* Add a nice and friendly comment on ImageService integration test
* run tests on push, so we can get a better codecov on merge

fixes #15

Co-authored-by: Claudia <[email protected]>
  • Loading branch information
yitsushi and Callisto13 authored Nov 22, 2021
1 parent 78c369b commit cd8062f
Show file tree
Hide file tree
Showing 15 changed files with 2,068 additions and 96 deletions.
5 changes: 2 additions & 3 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
@@ -1,14 +1,13 @@
name: run tests

on: [pull_request, workflow_dispatch]
on: [pull_request, workflow_dispatch, push]

jobs:
test:
name: test
runs-on: ubuntu-latest
steps:
- name: Set up containerd
uses: crazy-max/ghaction-setup-containerd@v1
- uses: yitsushi/[email protected]
- uses: actions/checkout@v2
- uses: actions/setup-go@v2
with:
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ test: ## Run unit tests

.PHONY: test-with-cov
test-with-cov: ## Run unit tests with coverage
go test -v -race -timeout 2m -p 1 -covermode=atomic -coverprofile=coverage.txt -exec sudo ./...
go test -v -race -timeout 2m -p 1 -covermode=atomic -coverprofile=coverage.txt -exec "sudo --preserve-env=CTR_SOCK_PATH" ./...

.PHONY: test-e2e
test-e2e: compile-e2e ## Run e2e tests locally
Expand Down
60 changes: 60 additions & 0 deletions infrastructure/containerd/client_interface.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
package containerd

import (
"context"

"github.com/containerd/containerd"
"github.com/containerd/containerd/api/services/tasks/v1"
versionservice "github.com/containerd/containerd/api/services/version/v1"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/events"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/leases"
"github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/platforms"
"github.com/containerd/containerd/services/introspection"
"github.com/containerd/containerd/snapshots"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/health/grpc_health_v1"
)

type Client interface {
Close() error
Conn() *grpc.ClientConn
Containers(ctx context.Context, filters ...string) ([]containerd.Container, error)
ContainerService() containers.Store
ContentStore() content.Store
DiffService() containerd.DiffService
EventService() containerd.EventService
Fetch(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (images.Image, error)
GetImage(ctx context.Context, ref string) (containerd.Image, error)
GetLabel(ctx context.Context, label string) (string, error)
GetSnapshotterSupportedPlatforms(ctx context.Context, snapshotterName string) (platforms.MatchComparer, error)
HealthService() grpc_health_v1.HealthClient
ImageService() images.Store
IntrospectionService() introspection.Service
IsServing(ctx context.Context) (bool, error)
LeasesService() leases.Manager
ListImages(ctx context.Context, filters ...string) ([]containerd.Image, error)
LoadContainer(ctx context.Context, id string) (containerd.Container, error)
NamespaceService() namespaces.Store
NewContainer(ctx context.Context, id string, opts ...containerd.NewContainerOpts) (containerd.Container, error)
Pull(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (containerd.Image, error)
Push(ctx context.Context, ref string, desc ocispec.Descriptor, opts ...containerd.RemoteOpt) error
Reconnect() error
Restore(
ctx context.Context,
id string,
checkpoint containerd.Image,
opts ...containerd.RestoreOpts,
) (containerd.Container, error)
Runtime() string
Server(ctx context.Context) (containerd.ServerInfo, error)
SnapshotService(snapshotterName string) snapshots.Snapshotter
Subscribe(ctx context.Context, filters ...string) (<-chan *events.Envelope, <-chan error)
TaskService() tasks.TasksClient
Version(ctx context.Context) (containerd.Version, error)
VersionService() versionservice.VersionClient
}
2 changes: 1 addition & 1 deletion infrastructure/containerd/event_service_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ func TestEventService_Integration(t *testing.T) {
es := containerd.NewEventServiceWithClient(&containerd.Config{
SnapshotterKernel: testSnapshotter,
SnapshotterVolume: testSnapshotter,
Namespace: testContainerdNs,
Namespace: testContainerdNS,
}, client)

testEvents := []*events.MicroVMSpecCreated{
Expand Down
4 changes: 2 additions & 2 deletions infrastructure/containerd/image_service.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,15 +27,15 @@ func NewImageService(cfg *Config) (ports.ImageService, error) {
}

// NewImageServiceWithClient will create a new image service based on containerd with the supplied containerd client.
func NewImageServiceWithClient(cfg *Config, client *containerd.Client) ports.ImageService {
func NewImageServiceWithClient(cfg *Config, client Client) ports.ImageService {
return &imageService{
config: cfg,
client: client,
}
}

type imageService struct {
client *containerd.Client
client Client
config *Config
}

Expand Down
281 changes: 281 additions & 0 deletions infrastructure/containerd/image_service_integration_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,281 @@
package containerd_test

import (
"context"
"fmt"
"os"
"testing"

ctr "github.com/containerd/containerd"
"github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/snapshots"
. "github.com/onsi/gomega"

"github.com/weaveworks/flintlock/core/models"
"github.com/weaveworks/flintlock/core/ports"
"github.com/weaveworks/flintlock/infrastructure/containerd"
)

const (
testImageVolume = "docker.io/library/alpine:3.14.1"
testImageKernel = "docker.io/linuxkit/kernel:5.4.129"
testSnapshotter = "devmapper"
testOwnerNamespace = "int_ns"
testOwnerUsageID = "vol1"
testOwnerName = "imageservice-get-test"
testContainerdNS = "flintlock_test_ctr"
)

// What this test does?
//
// == Preface
//
// Prepare ImageService and a Namespace context. All images and snapshots will
// live under the same namespace.
//
// We do some prudent cleanup steps in case the test was aborted. Because it's
// an integration test with an external service, if we don't clean up after
// ourselves, the next test run will most likely fail. In theory, releasing the
// leases should be enough, but that's heavily depend on how containerd tells
// us how it works, so it's easier to delete everything we intended to create.
//
// == Chapter I: Pull and mount
//
// Using ImageService, we tries to Pull an image.
//
// We don't have separate Mount function (yet), we try to PullAndMount, with
// this, we can be sure ImageService can pull and image from a repository and
// can mount it.
//
// To avoid false positive result, we try to PullAndMount an image, we know
// it's definitely not there, and we expect it to be failed. If it does not
// fail, something fishy is swimming under the hood.
//
// After PullAndMount, the real image should be mounted, but the fake one
// shouldn't be. At the end we should have only one image available locally.
//
// == Chapter II: Snapshots
//
// As we already pull and mounted an image, we expect one snapshot to be there
// with the name constructed from the VM namespace, the VM name, and the volume
// usage type.
// At the same time, we expect one lease to be there with similar naming
// structure except the volume usage type we have one lease for all resources
// in containerd.
//
// == Chapter III: Kernel
//
// We do the same checks for kernel. The kernel and volume works the same way,
// and the real reason why we test it here because the usage type is different,
// and we should be able to pull and mount them.
func TestImageService_Integration(t *testing.T) {
if !runContainerDTests() {
t.Skip("skipping containerd image service integration test")
}

RegisterTestingT(t)

//
// Preface
//

client, ctx := testCreateClient(t)
namespaceCtx := namespaces.WithNamespace(ctx, testContainerdNS)

imageSvc := containerd.NewImageServiceWithClient(&containerd.Config{
SnapshotterKernel: testSnapshotter,
SnapshotterVolume: testSnapshotter,
Namespace: testContainerdNS,
}, client)

inputGetAndMount := &ports.ImageMountSpec{
ImageName: getTestVolumeImage(),
Owner: fmt.Sprintf("%s/%s", testOwnerNamespace, testOwnerName),
OwnerUsageID: testOwnerUsageID,
Use: models.ImageUseVolume,
}
inputGet := &ports.ImageSpec{
ImageName: inputGetAndMount.ImageName,
Owner: inputGetAndMount.Owner,
}
expectedSnapshotName := fmt.Sprintf(
"flintlock/%s/%s/%s",
testOwnerNamespace,
testOwnerName,
testOwnerUsageID,
)
expectedLeaseName := fmt.Sprintf("flintlock/%s/%s", testOwnerNamespace, testOwnerName)

defer func() {
// Make sure it's deleted.
if err := client.ImageService().Delete(namespaceCtx, getTestKernelImage()); err != nil {
t.Logf("Unable to delete the %s volume: %s\n", getTestKernelImage(), err.Error())
}

if err := client.ImageService().Delete(namespaceCtx, getTestVolumeImage()); err != nil {
t.Logf("Unable to delete the %s volume: %s\n", getTestVolumeImage(), err.Error())
}

if err := client.SnapshotService(testSnapshotter).Remove(namespaceCtx, expectedSnapshotName); err != nil {
t.Logf("Unable to delete the %s snapshot: %s\n", expectedSnapshotName, err.Error())
}

leases, err := client.LeasesService().List(namespaceCtx)
if err != nil {
t.Logf("Unable to list leases: %s\n", err.Error())
}

for _, lease := range leases {
if err := client.LeasesService().Delete(namespaceCtx, lease); err != nil {
t.Logf("Unable to delete %s lease: %s\n", lease.ID, err.Error())
}
}
}()

//
// Chapter I: Pull and mount.
//

err := imageSvc.Pull(ctx, inputGet)
Expect(err).NotTo(HaveOccurred())

mounts, err := imageSvc.PullAndMount(ctx, inputGetAndMount)
Expect(err).NotTo(HaveOccurred())
Expect(mounts).NotTo(BeNil())
Expect(len(mounts)).To(Equal(1))

fakePull := &ports.ImageMountSpec{
ImageName: "random/whynot/definitely-not-there",
Owner: fmt.Sprintf("%s/%s", testOwnerNamespace, testOwnerName),
OwnerUsageID: testOwnerUsageID,
Use: models.ImageUseVolume,
}
mounts, err = imageSvc.PullAndMount(ctx, fakePull)
Expect(err).To(HaveOccurred())
Expect(mounts).To(BeNil())

testImageMounted(ctx, imageSvc, testImageMountOptions{
ImageName: getTestVolumeImage(),
Owner: inputGetAndMount.Owner,
Use: models.ImageUseVolume,
Expected: true,
})

testImageMounted(ctx, imageSvc, testImageMountOptions{
ImageName: "definitely-not-there",
Owner: inputGetAndMount.Owner,
Use: models.ImageUseVolume,
Expected: false,
})

img, err := client.ImageService().List(namespaceCtx)
Expect(err).NotTo(HaveOccurred())
Expect(len(img)).To(Equal(1))
Expect(img[0].Name).To(Equal(getTestVolumeImage()))

//
// Chapter II: Snapshots
//

snapshotExists := false
err = client.SnapshotService(testSnapshotter).Walk(namespaceCtx, func(walkCtx context.Context, info snapshots.Info) error {
if info.Name == expectedSnapshotName {
snapshotExists = true
}

return nil
})
Expect(err).NotTo(HaveOccurred())
Expect(snapshotExists).To(BeTrue(), "expect snapshot with name %s to exist", expectedSnapshotName)

leases, err := client.LeasesService().List(namespaceCtx)
Expect(len(leases)).To(Equal(1))
Expect(leases[0].ID).To(Equal(expectedLeaseName), "expect lease with name %s to exists", expectedLeaseName)

//
// Chapter III: Kernel
//

inputGet.ImageName = getTestKernelImage()

err = imageSvc.Pull(ctx, inputGet)
Expect(err).NotTo(HaveOccurred())

exists, err := imageSvc.Exists(ctx, &ports.ImageSpec{
ImageName: getTestVolumeImage(),
Owner: testOwnerUsageID,
})
Expect(err).NotTo(HaveOccurred())
Expect(exists).To(BeTrue())

mounts, err = imageSvc.PullAndMount(ctx, &ports.ImageMountSpec{
ImageName: getTestKernelImage(),
Owner: testOwnerUsageID,
Use: models.ImageUseKernel,
OwnerUsageID: testOwnerUsageID,
})
Expect(err).NotTo(HaveOccurred())
Expect(mounts).NotTo(BeNil())
Expect(len(mounts)).To(Equal(1))
}

func testCreateClient(t *testing.T) (*ctr.Client, context.Context) {
addr := os.Getenv("CTR_SOCK_PATH")
client, err := ctr.New(addr)
Expect(err).NotTo(HaveOccurred())

ctx := context.Background()

serving, err := client.IsServing(ctx)
Expect(err).NotTo(HaveOccurred())
Expect(serving).To(BeTrue())

return client, ctx
}

func runContainerDTests() bool {
testCtr := os.Getenv("CTR_SOCK_PATH")
return testCtr != ""
}

// getTestVolumeImage returns with the default test volume image name, if
// CTR_TEST_VOL_IMG environment variable is not defined.
func getTestVolumeImage() string {
envImage := os.Getenv("CTR_TEST_VOL_IMG")
if envImage != "" {
return envImage
}

return testImageVolume
}

// getTestKernelImage returns with the default test kernel image name, if
// CTR_TEST_KERNEL_IMG environment variable is not defined.
func getTestKernelImage() string {
envImage := os.Getenv("CTR_TEST_KERNEL_IMG")
if envImage != "" {
return envImage
}

return testImageKernel
}

type testImageMountOptions struct {
ImageName string
Owner string
Use models.ImageUse
Expected bool
}

// testImageMounted checks that images have been mounted
// tests the IsMounted method
func testImageMounted(ctx context.Context, imageSvc ports.ImageService, opts testImageMountOptions) {
mounted, err := imageSvc.IsMounted(ctx, &ports.ImageMountSpec{
ImageName: opts.ImageName,
Owner: opts.Owner,
Use: opts.Use,
OwnerUsageID: testOwnerUsageID,
})
Expect(err).NotTo(HaveOccurred())
Expect(mounted).To(Equal(opts.Expected))
}
Loading

0 comments on commit cd8062f

Please sign in to comment.