From 6a75f5d765c58b5e49cd061ca32ddd23d9ec0d99 Mon Sep 17 00:00:00 2001 From: Sam Berning <113054166+sam-berning@users.noreply.github.com> Date: Fri, 6 Jan 2023 18:09:11 -0600 Subject: [PATCH] feat: saves containerd user data to a persistent disk (#133) this allows users to retain downloaded images, containers, etc. across new installations of finch Signed-off-by: Sam Berning Issue #, if available: https://github.com/runfinch/finch/issues/77 *Description of changes:* - Adds a new package for managing the user data persistent disk - Checks disk configuration & creates disk if necessary on `finch vm init` - Attaches disk whenever the VM starts *Testing done:* - Unit testing on `disk` and `cmd/finch` - Manual testing *Open questions:* - Should we also check the configuration on `finch vm start`? - [x] I've reviewed the guidance in CONTRIBUTING.md #### License Acceptance By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. Signed-off-by: Sam Berning --- cmd/finch/main.go | 3 + cmd/finch/virtual_machine.go | 8 +- cmd/finch/virtual_machine_init.go | 25 +- cmd/finch/virtual_machine_init_test.go | 25 +- cmd/finch/virtual_machine_start.go | 28 ++- cmd/finch/virtual_machine_start_test.go | 28 ++- cmd/finch/virtual_machine_test.go | 2 +- e2e/additional_disk_test.go | 44 ++++ e2e/config_test.go | 4 +- e2e/e2e_test.go | 1 + finch.yaml | 22 +- go.mod | 5 + go.sum | 4 + pkg/disk/disk.go | 160 +++++++++++++ pkg/disk/disk_test.go | 151 +++++++++++++ pkg/mocks/pkg_disk_disk.go | 289 ++++++++++++++++++++++++ pkg/path/finch.go | 6 + pkg/path/finch_test.go | 7 + 18 files changed, 779 insertions(+), 33 deletions(-) create mode 100644 e2e/additional_disk_test.go create mode 100644 pkg/disk/disk.go create mode 100644 pkg/disk/disk_test.go create mode 100644 pkg/mocks/pkg_disk_disk.go diff --git a/cmd/finch/main.go b/cmd/finch/main.go index f7c7f3ad8..9ed00c6be 100644 --- a/cmd/finch/main.go +++ b/cmd/finch/main.go @@ -7,6 +7,8 @@ package main import ( "fmt" + "github.com/runfinch/finch/pkg/disk" + "github.com/runfinch/finch/pkg/command" "github.com/runfinch/finch/pkg/config" "github.com/runfinch/finch/pkg/dependency" @@ -108,6 +110,7 @@ func virtualMachineCommands( config.NewNerdctlApplier(fssh.NewDialer(), fs, fp.LimaSSHPrivateKeyPath(), system.NewStdLib()), fp, fs, + disk.NewUserDataDiskManager(lcc, &afero.OsFs{}, fp, system.NewStdLib().Env("HOME")), ) } diff --git a/cmd/finch/virtual_machine.go b/cmd/finch/virtual_machine.go index fb8d4ebd3..b4c28bbae 100644 --- a/cmd/finch/virtual_machine.go +++ b/cmd/finch/virtual_machine.go @@ -7,6 +7,8 @@ import ( "fmt" "strings" + "github.com/runfinch/finch/pkg/disk" + "github.com/runfinch/finch/pkg/command" "github.com/runfinch/finch/pkg/config" "github.com/runfinch/finch/pkg/dependency" @@ -30,6 +32,7 @@ func newVirtualMachineCommand( nca config.NerdctlConfigApplier, fp path.Finch, fs afero.Fs, + diskManager disk.UserDataDiskManager, ) *cobra.Command { virtualMachineCommand := &cobra.Command{ Use: virtualMachineRootCmd, @@ -37,10 +40,11 @@ func newVirtualMachineCommand( } virtualMachineCommand.AddCommand( - newStartVMCommand(limaCmdCreator, logger, optionalDepGroups, lca, nca, fs, fp.LimaSSHPrivateKeyPath()), + newStartVMCommand(limaCmdCreator, logger, optionalDepGroups, lca, nca, fs, fp.LimaSSHPrivateKeyPath(), diskManager), newStopVMCommand(limaCmdCreator, logger), newRemoveVMCommand(limaCmdCreator, logger), - newInitVMCommand(limaCmdCreator, logger, optionalDepGroups, lca, nca, fp.BaseYamlFilePath(), fs, fp.LimaSSHPrivateKeyPath()), + newInitVMCommand(limaCmdCreator, logger, optionalDepGroups, lca, nca, fp.BaseYamlFilePath(), fs, + fp.LimaSSHPrivateKeyPath(), diskManager), ) return virtualMachineCommand diff --git a/cmd/finch/virtual_machine_init.go b/cmd/finch/virtual_machine_init.go index 941bbc85d..c69ed6460 100644 --- a/cmd/finch/virtual_machine_init.go +++ b/cmd/finch/virtual_machine_init.go @@ -6,6 +6,8 @@ package main import ( "fmt" + "github.com/runfinch/finch/pkg/disk" + "github.com/runfinch/finch/pkg/command" "github.com/runfinch/finch/pkg/config" "github.com/runfinch/finch/pkg/dependency" @@ -25,11 +27,12 @@ func newInitVMCommand( baseYamlFilePath string, fs afero.Fs, privateKeyPath string, + diskManager disk.UserDataDiskManager, ) *cobra.Command { initVMCommand := &cobra.Command{ Use: "init", Short: "Initialize the virtual machine", - RunE: newInitVMAction(lcc, logger, optionalDepGroups, lca, baseYamlFilePath).runAdapter, + RunE: newInitVMAction(lcc, logger, optionalDepGroups, lca, baseYamlFilePath, diskManager).runAdapter, PostRunE: newPostVMStartInitAction(logger, lcc, fs, privateKeyPath, nca).runAdapter, } @@ -42,6 +45,7 @@ type initVMAction struct { logger flog.Logger optionalDepGroups []*dependency.Group limaConfigApplier config.LimaConfigApplier + diskManager disk.UserDataDiskManager } func newInitVMAction( @@ -50,9 +54,15 @@ func newInitVMAction( optionalDepGroups []*dependency.Group, lca config.LimaConfigApplier, baseYamlFilePath string, + diskManager disk.UserDataDiskManager, ) *initVMAction { return &initVMAction{ - creator: creator, logger: logger, optionalDepGroups: optionalDepGroups, limaConfigApplier: lca, baseYamlFilePath: baseYamlFilePath, + creator: creator, + logger: logger, + optionalDepGroups: optionalDepGroups, + limaConfigApplier: lca, + baseYamlFilePath: baseYamlFilePath, + diskManager: diskManager, } } @@ -61,7 +71,7 @@ func (iva *initVMAction) runAdapter(cmd *cobra.Command, args []string) error { } func (iva *initVMAction) run() error { - err := iva.assertVMIsNonexistent(iva.creator, iva.logger) + err := iva.assertVMIsNonexistent() if err != nil { return err } @@ -76,6 +86,11 @@ func (iva *initVMAction) run() error { return err } + err = iva.diskManager.EnsureUserDataDisk() + if err != nil { + return err + } + instanceName := fmt.Sprintf("--name=%v", limaInstanceName) limaCmd := iva.creator.CreateWithoutStdio("start", instanceName, iva.baseYamlFilePath, "--tty=false") iva.logger.Info("Initializing and starting Finch virtual machine...") @@ -88,8 +103,8 @@ func (iva *initVMAction) run() error { return nil } -func (iva *initVMAction) assertVMIsNonexistent(creator command.LimaCmdCreator, logger flog.Logger) error { - status, err := lima.GetVMStatus(creator, logger, limaInstanceName) +func (iva *initVMAction) assertVMIsNonexistent() error { + status, err := lima.GetVMStatus(iva.creator, iva.logger, limaInstanceName) if err != nil { return err } diff --git a/cmd/finch/virtual_machine_init_test.go b/cmd/finch/virtual_machine_init_test.go index 2e3b55641..d69983747 100644 --- a/cmd/finch/virtual_machine_init_test.go +++ b/cmd/finch/virtual_machine_init_test.go @@ -21,7 +21,7 @@ const mockBaseYamlFilePath = "/os/os.yaml" func TestNewInitVMCommand(t *testing.T) { t.Parallel() - cmd := newInitVMCommand(nil, nil, nil, nil, nil, "", nil, "") + cmd := newInitVMCommand(nil, nil, nil, nil, nil, "", nil, "", nil) assert.Equal(t, cmd.Name(), "init") } @@ -37,6 +37,7 @@ func TestInitVMAction_runAdapter(t *testing.T) { *mocks.LimaCmdCreator, *mocks.Logger, *mocks.LimaConfigApplier, + *mocks.MockUserDataDiskManager, *gomock.Controller, ) }{ @@ -61,6 +62,7 @@ func TestInitVMAction_runAdapter(t *testing.T) { lcc *mocks.LimaCmdCreator, logger *mocks.Logger, lca *mocks.LimaConfigApplier, + dm *mocks.MockUserDataDiskManager, ctrl *gomock.Controller, ) { getVMStatusC := mocks.NewCommand(ctrl) @@ -70,6 +72,7 @@ func TestInitVMAction_runAdapter(t *testing.T) { command := mocks.NewCommand(ctrl) lca.EXPECT().Apply().Return(nil) + dm.EXPECT().EnsureUserDataDisk().Return(nil) lcc.EXPECT().CreateWithoutStdio("start", fmt.Sprintf("--name=%s", limaInstanceName), mockBaseYamlFilePath, "--tty=false").Return(command) command.EXPECT().CombinedOutput() @@ -89,11 +92,12 @@ func TestInitVMAction_runAdapter(t *testing.T) { logger := mocks.NewLogger(ctrl) lcc := mocks.NewLimaCmdCreator(ctrl) lca := mocks.NewLimaConfigApplier(ctrl) + dm := mocks.NewMockUserDataDiskManager(ctrl) groups := tc.groups(ctrl) - tc.mockSvc(lcc, logger, lca, ctrl) + tc.mockSvc(lcc, logger, lca, dm, ctrl) - assert.NoError(t, newInitVMAction(lcc, logger, groups, lca, mockBaseYamlFilePath).runAdapter(tc.command, tc.args)) + assert.NoError(t, newInitVMAction(lcc, logger, groups, lca, mockBaseYamlFilePath, dm).runAdapter(tc.command, tc.args)) }) } } @@ -109,6 +113,7 @@ func TestInitVMAction_run(t *testing.T) { *mocks.LimaCmdCreator, *mocks.Logger, *mocks.LimaConfigApplier, + *mocks.MockUserDataDiskManager, *gomock.Controller, ) }{ @@ -122,6 +127,7 @@ func TestInitVMAction_run(t *testing.T) { lcc *mocks.LimaCmdCreator, logger *mocks.Logger, lca *mocks.LimaConfigApplier, + dm *mocks.MockUserDataDiskManager, ctrl *gomock.Controller, ) { getVMStatusC := mocks.NewCommand(ctrl) @@ -130,6 +136,7 @@ func TestInitVMAction_run(t *testing.T) { logger.EXPECT().Debugf("Status of virtual machine: %s", "") lca.EXPECT().Apply().Return(nil) + dm.EXPECT().EnsureUserDataDisk().Return(nil) command := mocks.NewCommand(ctrl) lcc.EXPECT().CreateWithoutStdio("start", fmt.Sprintf("--name=%s", limaInstanceName), @@ -150,6 +157,7 @@ func TestInitVMAction_run(t *testing.T) { lcc *mocks.LimaCmdCreator, logger *mocks.Logger, lca *mocks.LimaConfigApplier, + dm *mocks.MockUserDataDiskManager, ctrl *gomock.Controller, ) { getVMStatusC := mocks.NewCommand(ctrl) @@ -170,6 +178,7 @@ func TestInitVMAction_run(t *testing.T) { lcc *mocks.LimaCmdCreator, logger *mocks.Logger, lca *mocks.LimaConfigApplier, + dm *mocks.MockUserDataDiskManager, ctrl *gomock.Controller, ) { getVMStatusC := mocks.NewCommand(ctrl) @@ -188,6 +197,7 @@ func TestInitVMAction_run(t *testing.T) { lcc *mocks.LimaCmdCreator, logger *mocks.Logger, lca *mocks.LimaConfigApplier, + dm *mocks.MockUserDataDiskManager, ctrl *gomock.Controller, ) { getVMStatusC := mocks.NewCommand(ctrl) @@ -206,6 +216,7 @@ func TestInitVMAction_run(t *testing.T) { lcc *mocks.LimaCmdCreator, logger *mocks.Logger, lca *mocks.LimaConfigApplier, + dm *mocks.MockUserDataDiskManager, ctrl *gomock.Controller, ) { getVMStatusC := mocks.NewCommand(ctrl) @@ -234,6 +245,7 @@ func TestInitVMAction_run(t *testing.T) { lcc *mocks.LimaCmdCreator, logger *mocks.Logger, lca *mocks.LimaConfigApplier, + dm *mocks.MockUserDataDiskManager, ctrl *gomock.Controller, ) { getVMStatusC := mocks.NewCommand(ctrl) @@ -257,6 +269,7 @@ func TestInitVMAction_run(t *testing.T) { lcc *mocks.LimaCmdCreator, logger *mocks.Logger, lca *mocks.LimaConfigApplier, + dm *mocks.MockUserDataDiskManager, ctrl *gomock.Controller, ) { getVMStatusC := mocks.NewCommand(ctrl) @@ -265,6 +278,7 @@ func TestInitVMAction_run(t *testing.T) { logger.EXPECT().Debugf("Status of virtual machine: %s", "") lca.EXPECT().Apply().Return(nil) + dm.EXPECT().EnsureUserDataDisk().Return(nil) logs := []byte("stdout + stderr") command := mocks.NewCommand(ctrl) @@ -287,11 +301,12 @@ func TestInitVMAction_run(t *testing.T) { logger := mocks.NewLogger(ctrl) lcc := mocks.NewLimaCmdCreator(ctrl) lca := mocks.NewLimaConfigApplier(ctrl) + dm := mocks.NewMockUserDataDiskManager(ctrl) groups := tc.groups(ctrl) - tc.mockSvc(lcc, logger, lca, ctrl) + tc.mockSvc(lcc, logger, lca, dm, ctrl) - err := newInitVMAction(lcc, logger, groups, lca, mockBaseYamlFilePath).run() + err := newInitVMAction(lcc, logger, groups, lca, mockBaseYamlFilePath, dm).run() assert.Equal(t, err, tc.wantErr) }) } diff --git a/cmd/finch/virtual_machine_start.go b/cmd/finch/virtual_machine_start.go index de86d156f..0a56d9f03 100644 --- a/cmd/finch/virtual_machine_start.go +++ b/cmd/finch/virtual_machine_start.go @@ -6,6 +6,8 @@ package main import ( "fmt" + "github.com/runfinch/finch/pkg/disk" + "github.com/runfinch/finch/pkg/command" "github.com/runfinch/finch/pkg/config" "github.com/runfinch/finch/pkg/dependency" @@ -24,20 +26,22 @@ func newStartVMCommand( nca config.NerdctlConfigApplier, fs afero.Fs, privateKeyPath string, + dm disk.UserDataDiskManager, ) *cobra.Command { return &cobra.Command{ Use: "start", Short: "Start the virtual machine", - RunE: newStartVMAction(lcc, logger, optionalDepGroups, lca).runAdapter, + RunE: newStartVMAction(lcc, logger, optionalDepGroups, lca, dm).runAdapter, PostRunE: newPostVMStartInitAction(logger, lcc, fs, privateKeyPath, nca).runAdapter, } } type startVMAction struct { - creator command.LimaCmdCreator - logger flog.Logger - optionalDepGroups []*dependency.Group - limaConfigApplier config.LimaConfigApplier + creator command.LimaCmdCreator + logger flog.Logger + optionalDepGroups []*dependency.Group + limaConfigApplier config.LimaConfigApplier + userDataDiskManager disk.UserDataDiskManager } func newStartVMAction( @@ -45,8 +49,15 @@ func newStartVMAction( logger flog.Logger, optionalDepGroups []*dependency.Group, lca config.LimaConfigApplier, + dm disk.UserDataDiskManager, ) *startVMAction { - return &startVMAction{creator: creator, logger: logger, optionalDepGroups: optionalDepGroups, limaConfigApplier: lca} + return &startVMAction{ + creator: creator, + logger: logger, + optionalDepGroups: optionalDepGroups, + limaConfigApplier: lca, + userDataDiskManager: dm, + } } func (sva *startVMAction) runAdapter(cmd *cobra.Command, args []string) error { @@ -68,6 +79,11 @@ func (sva *startVMAction) run() error { return err } + err = sva.userDataDiskManager.EnsureUserDataDisk() + if err != nil { + return err + } + limaCmd := sva.creator.CreateWithoutStdio("start", limaInstanceName) sva.logger.Info("Starting existing Finch virtual machine...") logs, err := limaCmd.CombinedOutput() diff --git a/cmd/finch/virtual_machine_start_test.go b/cmd/finch/virtual_machine_start_test.go index d8b155801..de7d648b4 100644 --- a/cmd/finch/virtual_machine_start_test.go +++ b/cmd/finch/virtual_machine_start_test.go @@ -19,7 +19,7 @@ import ( func TestNewStartVMCommand(t *testing.T) { t.Parallel() - cmd := newStartVMCommand(nil, nil, nil, nil, nil, nil, "") + cmd := newStartVMCommand(nil, nil, nil, nil, nil, nil, "", nil) assert.Equal(t, cmd.Name(), "start") } @@ -36,6 +36,7 @@ func TestStartVMAction_runAdapter(t *testing.T) { *mocks.LimaCmdCreator, *mocks.Logger, *mocks.LimaConfigApplier, + *mocks.MockUserDataDiskManager, *gomock.Controller, ) }{ @@ -61,6 +62,7 @@ func TestStartVMAction_runAdapter(t *testing.T) { lcc *mocks.LimaCmdCreator, logger *mocks.Logger, lca *mocks.LimaConfigApplier, + dm *mocks.MockUserDataDiskManager, ctrl *gomock.Controller, ) { getVMStatusC := mocks.NewCommand(ctrl) @@ -70,6 +72,8 @@ func TestStartVMAction_runAdapter(t *testing.T) { lca.EXPECT().Apply().Return(nil) + dm.EXPECT().EnsureUserDataDisk().Return(nil) + command := mocks.NewCommand(ctrl) command.EXPECT().CombinedOutput() lcc.EXPECT().CreateWithoutStdio("start", limaInstanceName).Return(command) @@ -89,11 +93,12 @@ func TestStartVMAction_runAdapter(t *testing.T) { logger := mocks.NewLogger(ctrl) lcc := mocks.NewLimaCmdCreator(ctrl) lca := mocks.NewLimaConfigApplier(ctrl) + dm := mocks.NewMockUserDataDiskManager(ctrl) groups := tc.groups(ctrl) - tc.mockSvc(lcc, logger, lca, ctrl) + tc.mockSvc(lcc, logger, lca, dm, ctrl) - err := newStartVMAction(lcc, logger, groups, lca).runAdapter(tc.command, tc.args) + err := newStartVMAction(lcc, logger, groups, lca, dm).runAdapter(tc.command, tc.args) assert.Equal(t, tc.wantErr, err) }) } @@ -110,6 +115,7 @@ func TestStartVMAction_run(t *testing.T) { *mocks.LimaCmdCreator, *mocks.Logger, *mocks.LimaConfigApplier, + *mocks.MockUserDataDiskManager, *gomock.Controller, ) }{ @@ -131,6 +137,7 @@ func TestStartVMAction_run(t *testing.T) { lcc *mocks.LimaCmdCreator, logger *mocks.Logger, lca *mocks.LimaConfigApplier, + dm *mocks.MockUserDataDiskManager, ctrl *gomock.Controller, ) { getVMStatusC := mocks.NewCommand(ctrl) @@ -140,6 +147,8 @@ func TestStartVMAction_run(t *testing.T) { lca.EXPECT().Apply().Return(nil) + dm.EXPECT().EnsureUserDataDisk().Return(nil) + command := mocks.NewCommand(ctrl) command.EXPECT().CombinedOutput() lcc.EXPECT().CreateWithoutStdio("start", limaInstanceName).Return(command) @@ -158,6 +167,7 @@ func TestStartVMAction_run(t *testing.T) { lcc *mocks.LimaCmdCreator, logger *mocks.Logger, lca *mocks.LimaConfigApplier, + dm *mocks.MockUserDataDiskManager, ctrl *gomock.Controller, ) { getVMStatusC := mocks.NewCommand(ctrl) @@ -177,6 +187,7 @@ func TestStartVMAction_run(t *testing.T) { lcc *mocks.LimaCmdCreator, logger *mocks.Logger, lca *mocks.LimaConfigApplier, + dm *mocks.MockUserDataDiskManager, ctrl *gomock.Controller, ) { getVMStatusC := mocks.NewCommand(ctrl) @@ -195,6 +206,7 @@ func TestStartVMAction_run(t *testing.T) { lcc *mocks.LimaCmdCreator, logger *mocks.Logger, lca *mocks.LimaConfigApplier, + dm *mocks.MockUserDataDiskManager, ctrl *gomock.Controller, ) { getVMStatusC := mocks.NewCommand(ctrl) @@ -213,6 +225,7 @@ func TestStartVMAction_run(t *testing.T) { lcc *mocks.LimaCmdCreator, logger *mocks.Logger, lca *mocks.LimaConfigApplier, + dm *mocks.MockUserDataDiskManager, ctrl *gomock.Controller, ) { getVMStatusC := mocks.NewCommand(ctrl) @@ -241,6 +254,7 @@ func TestStartVMAction_run(t *testing.T) { lcc *mocks.LimaCmdCreator, logger *mocks.Logger, lca *mocks.LimaConfigApplier, + dm *mocks.MockUserDataDiskManager, ctrl *gomock.Controller, ) { getVMStatusC := mocks.NewCommand(ctrl) @@ -271,6 +285,7 @@ func TestStartVMAction_run(t *testing.T) { lcc *mocks.LimaCmdCreator, logger *mocks.Logger, lca *mocks.LimaConfigApplier, + dm *mocks.MockUserDataDiskManager, ctrl *gomock.Controller, ) { getVMStatusC := mocks.NewCommand(ctrl) @@ -280,6 +295,8 @@ func TestStartVMAction_run(t *testing.T) { lca.EXPECT().Apply().Return(nil) + dm.EXPECT().EnsureUserDataDisk().Return(nil) + logs := []byte("stdout + stderr") command := mocks.NewCommand(ctrl) command.EXPECT().CombinedOutput().Return(logs, errors.New("start command error")) @@ -300,11 +317,12 @@ func TestStartVMAction_run(t *testing.T) { logger := mocks.NewLogger(ctrl) lcc := mocks.NewLimaCmdCreator(ctrl) lca := mocks.NewLimaConfigApplier(ctrl) + dm := mocks.NewMockUserDataDiskManager(ctrl) groups := tc.groups(ctrl) - tc.mockSvc(lcc, logger, lca, ctrl) + tc.mockSvc(lcc, logger, lca, dm, ctrl) - err := newStartVMAction(lcc, logger, groups, lca).run() + err := newStartVMAction(lcc, logger, groups, lca, dm).run() assert.Equal(t, err, tc.wantErr) }) } diff --git a/cmd/finch/virtual_machine_test.go b/cmd/finch/virtual_machine_test.go index 0a8d9f38d..3571a7233 100644 --- a/cmd/finch/virtual_machine_test.go +++ b/cmd/finch/virtual_machine_test.go @@ -17,7 +17,7 @@ import ( func TestVirtualMachineCommand(t *testing.T) { t.Parallel() - cmd := newVirtualMachineCommand(nil, nil, nil, nil, nil, "", nil) + cmd := newVirtualMachineCommand(nil, nil, nil, nil, nil, "", nil, nil) assert.Equal(t, cmd.Use, virtualMachineRootCmd) // check the number of subcommand for vm diff --git a/e2e/additional_disk_test.go b/e2e/additional_disk_test.go new file mode 100644 index 000000000..29bec7ff3 --- /dev/null +++ b/e2e/additional_disk_test.go @@ -0,0 +1,44 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package e2e + +import ( + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + "github.com/runfinch/common-tests/command" + "github.com/runfinch/common-tests/option" +) + +const ( + savedImage = "public.ecr.aws/docker/library/alpine:latest" + containerName = "userDataTest" +) + +var testAdditionalDisk = func(o *option.Option) { + ginkgo.Describe("Additional disk", ginkgo.Serial, func() { + ginkgo.It("Retains container user data after the VM is deleted", func() { + command.Run(o, "pull", savedImage) + oldImagesOutput := command.StdoutStr(o, "images", "--format", "{{.Name}}") + gomega.Expect(oldImagesOutput).Should(gomega.ContainSubstring(savedImage)) + + command.Run(o, "run", "--name", containerName, savedImage) + oldPsOutput := command.StdoutStr(o, "ps", "--all", "--format", "{{.Names}}") + gomega.Expect(oldPsOutput).Should(gomega.ContainSubstring(containerName)) + + command.New(o, virtualMachineRootCmd, "stop").WithoutCheckingExitCode().WithTimeoutInSeconds(60).Run() + command.Run(o, virtualMachineRootCmd, "remove") + + command.New(o, virtualMachineRootCmd, "init").WithTimeoutInSeconds(240).Run() + + newImagesOutput := command.StdoutStr(o, "images", "--format", "{{.Name}}") + gomega.Expect(newImagesOutput).Should(gomega.Equal(oldImagesOutput)) + + newPsOutput := command.StdoutStr(o, "ps", "--all", "--format", "{{.Names}}") + gomega.Expect(newPsOutput).Should(gomega.Equal(oldPsOutput)) + + command.Run(o, "rm", containerName) + command.Run(o, "rmi", savedImage) + }) + }) +} diff --git a/e2e/config_test.go b/e2e/config_test.go index bbaad6913..9b33c08ec 100644 --- a/e2e/config_test.go +++ b/e2e/config_test.go @@ -41,8 +41,8 @@ func writeFile(filePath string, buf []byte) { func updateAndApplyConfig(o *option.Option, configBytes []byte) *gexec.Session { writeFile(finchConfigFilePath, configBytes) - command.New(o, virtualMachineRootCmd, "stop").WithoutCheckingExitCode().WithTimeoutInSeconds(20).Run() - return command.New(o, virtualMachineRootCmd, "start").WithoutCheckingExitCode().WithTimeoutInSeconds(60).Run() + command.New(o, virtualMachineRootCmd, "stop").WithoutCheckingExitCode().WithTimeoutInSeconds(60).Run() + return command.New(o, virtualMachineRootCmd, "start").WithoutCheckingExitCode().WithTimeoutInSeconds(120).Run() } // testConfig updates the finch config file and ensures that its settings are applied properly. diff --git a/e2e/e2e_test.go b/e2e/e2e_test.go index 60e5a3e46..8cad9053c 100644 --- a/e2e/e2e_test.go +++ b/e2e/e2e_test.go @@ -99,6 +99,7 @@ func TestE2e(t *testing.T) { // When running tests in serial sequence and using the local registry, testVirtualMachine needs to run after generic tests finished // since it will remove the VM instance thus removing the local registry. testVirtualMachine(o) + testAdditionalDisk(o) testConfig(o, *installed) testVersion(o) }) diff --git a/finch.yaml b/finch.yaml index 784220765..14a337bb8 100644 --- a/finch.yaml +++ b/finch.yaml @@ -81,6 +81,15 @@ mounts: # 🟢 Builtin default: "reverse-sshfs" mountType: reverse-sshfs +# Lima disks to attach to the instance. The disks will be accessible from inside the +# instance, labeled by name. (e.g. if the disk is named "data", it will be labeled +# "lima-data" inside the instance). The disk will be mounted inside the instance at +# `/mnt/lima-${VOLUME}`. +# 🟢 Builtin default: null +# For Finch, this value should always be the same as the diskName in pkg/disk/disk.go +additionalDisks: +- "finch" + ssh: # A localhost port of the host. Forwarded to port 22 of the guest. # 🟢 Builtin default: 0 (automatically assigned to a free port) @@ -136,13 +145,12 @@ provision: systemctl reset-failed NetworkManager-wait-online.service systemctl mask NetworkManager-wait-online.service # # `user` is executed without the root privilege -# - mode: user -# script: | -# #!/bin/bash -# set -eux -o pipefail -# cat < ~/.vimrc -# set number -# EOF +- mode: user + script: | + #!/bin/bash + sudo chown $USER /mnt/lima-finch + sudo mount --bind /mnt/lima-finch ~/.local/share/containerd + systemctl --user restart containerd.service # Probe scripts to check readiness. # 🟢 Builtin default: null diff --git a/go.mod b/go.mod index e9c778171..651759e05 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,9 @@ require ( require ( github.com/Microsoft/go-winio v0.5.2 // indirect github.com/apparentlymart/go-cidr v1.1.0 // indirect + github.com/containerd/containerd v1.6.14 // indirect github.com/containers/gvisor-tap-vsock v0.4.1-0.20220920072955-5b1aff8ba743 // indirect + github.com/golang/protobuf v1.5.2 // indirect github.com/google/btree v1.0.1 // indirect github.com/google/gopacket v1.1.19 // indirect github.com/insomniacslk/dhcp v0.0.0-20220504074936-1ca156eafb9f // indirect @@ -34,6 +36,9 @@ require ( github.com/u-root/uio v0.0.0-20210528114334-82958018845c // indirect golang.org/x/sync v0.1.0 // indirect golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect + google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959 // indirect + google.golang.org/grpc v1.48.0 // indirect + google.golang.org/protobuf v1.28.1 // indirect gvisor.dev/gvisor v0.0.0-20220908032458-edc830a43ba6 // indirect inet.af/tcpproxy v0.0.0-20220326234310-be3ee21c9fa0 // indirect ) diff --git a/go.sum b/go.sum index 9a5d7748a..e38f9f820 100644 --- a/go.sum +++ b/go.sum @@ -121,6 +121,8 @@ github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.12/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.6.14 h1:W+d0AJKVG3ioTZZyQwcw1Y3vvo6ZDYzAcjDcY4tkgGI= +github.com/containerd/containerd v1.6.14/go.mod h1:U2NnBPIhzJDm59xF7xB2MMHnKtggpZ+phKg8o2TKj2c= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.2.1/go.mod h1:wCYX+dRqZdImhGucXOqTQn05AhX6EUDaGEMUzTFFpLg= github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= @@ -1088,6 +1090,7 @@ google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljW google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959 h1:hw4Y42zL1VyVKxPgRHHh191fpVBGV8sNVmcow5Z8VXY= google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1123,6 +1126,7 @@ google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11 google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w= google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= diff --git a/pkg/disk/disk.go b/pkg/disk/disk.go new file mode 100644 index 000000000..1af5ee06d --- /dev/null +++ b/pkg/disk/disk.go @@ -0,0 +1,160 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +// Package disk manages the persistent disk used to save containerd user data +package disk + +import ( + "encoding/json" + "errors" + "fmt" + "io/fs" + "path" + + limaStore "github.com/lima-vm/lima/pkg/store" + "github.com/spf13/afero" + + "github.com/runfinch/finch/pkg/command" + fpath "github.com/runfinch/finch/pkg/path" +) + +const ( + // diskName must always be consistent with the value under additionalDisks in finch.yaml. + diskName = "finch" + diskSize = "50G" +) + +// UserDataDiskManager is used to check the user data disk configuration and create it if needed. +type UserDataDiskManager interface { + EnsureUserDataDisk() error +} + +// fs functions required for setting up the user data disk. +type diskFS interface { + afero.Fs + afero.Linker + afero.LinkReader +} + +type userDataDiskManager struct { + lcc command.LimaCmdCreator + fs diskFS + finch fpath.Finch + homeDir string +} + +// NewUserDataDiskManager is a constructor for UserDataDiskManager. +func NewUserDataDiskManager( + lcc command.LimaCmdCreator, + fs diskFS, + finch fpath.Finch, + homeDir string, +) UserDataDiskManager { + return &userDataDiskManager{ + lcc: lcc, + fs: fs, + finch: finch, + homeDir: homeDir, + } +} + +// EnsureUserDataDisk checks the current disk configuration and fixes it if needed. +func (m *userDataDiskManager) EnsureUserDataDisk() error { + if m.limaDiskExists() { + limaPath := fmt.Sprintf("%s/_disks/%s/datadisk", m.finch.LimaHomePath(), diskName) + loc, err := m.fs.ReadlinkIfPossible(limaPath) + if err != nil { + return err + } + // if the file is not a symlink, loc will be an empty string + // both os.Readlink() and UserDataDiskPath return absolute paths, so they will be equal if equivalent + if loc != m.finch.UserDataDiskPath(m.homeDir) { + err := m.attachPersistentDiskToLimaDisk() + if err != nil { + return err + } + } + } else { + if err := m.createLimaDisk(); err != nil { + return err + } + err := m.attachPersistentDiskToLimaDisk() + if err != nil { + return err + } + } + + if m.limaDiskIsLocked() { + err := m.unlockLimaDisk() + if err != nil { + return err + } + } + + return nil +} + +func (m *userDataDiskManager) persistentDiskExists() bool { + _, err := m.fs.Stat(m.finch.UserDataDiskPath(m.homeDir)) + return err == nil +} + +func (m *userDataDiskManager) limaDiskExists() bool { + cmd := m.lcc.CreateWithoutStdio("disk", "ls", diskName, "--json") + out, err := cmd.Output() + if err != nil { + return false + } + diskListOutput := &limaStore.Disk{} + err = json.Unmarshal(out, diskListOutput) + if err != nil { + return false + } + return diskListOutput.Name == diskName +} + +func (m *userDataDiskManager) createLimaDisk() error { + cmd := m.lcc.CreateWithoutStdio("disk", "create", diskName, "--size", diskSize) + return cmd.Run() +} + +func (m *userDataDiskManager) attachPersistentDiskToLimaDisk() error { + limaPath := fmt.Sprintf("%s/_disks/%s/datadisk", m.finch.LimaHomePath(), diskName) + if !m.persistentDiskExists() { + err := m.fs.Rename(limaPath, m.finch.UserDataDiskPath(m.homeDir)) + if err != nil { + return err + } + } + + // if a datadisk already exists in the lima path, SymlinkIfPossible will no-op. + // to ensure that it symlinks properly, we have to delete the disk in the lima path + _, err := m.fs.Stat(limaPath) + if err != nil { + if !errors.Is(err, fs.ErrNotExist) { + return err + } + } else { + err = m.fs.Remove(limaPath) + if err != nil { + return err + } + } + + err = m.fs.SymlinkIfPossible(m.finch.UserDataDiskPath(m.homeDir), limaPath) + if err != nil { + return err + } + return nil +} + +func (m *userDataDiskManager) limaDiskIsLocked() bool { + lockPath := path.Join(m.finch.LimaHomePath(), "_disks", diskName, "in_use_by") + _, err := m.fs.Stat(lockPath) + return err == nil +} + +func (m *userDataDiskManager) unlockLimaDisk() error { + cmd := m.lcc.CreateWithoutStdio("disk", "unlock", diskName) + return cmd.Run() +} diff --git a/pkg/disk/disk_test.go b/pkg/disk/disk_test.go new file mode 100644 index 000000000..a7c89b73e --- /dev/null +++ b/pkg/disk/disk_test.go @@ -0,0 +1,151 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package disk + +import ( + "io/fs" + "path" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "github.com/runfinch/finch/pkg/mocks" + fpath "github.com/runfinch/finch/pkg/path" +) + +func TestDisk_NewUserDataDiskManager(t *testing.T) { + t.Parallel() + + ctrl := gomock.NewController(t) + lcc := mocks.NewLimaCmdCreator(ctrl) + dfs := mocks.NewMockdiskFS(ctrl) + finch := fpath.Finch("mock_finch") + homeDir := "mock_home" + + NewUserDataDiskManager(lcc, dfs, finch, homeDir) +} + +func TestUserDataDiskManager_InitializeUserDataDisk(t *testing.T) { + t.Parallel() + + finch := fpath.Finch("mock_finch") + homeDir := "mock_home" + + limaPath := path.Join(finch.LimaHomePath(), "_disks", diskName, "datadisk") + lockPath := path.Join(finch.LimaHomePath(), "_disks", diskName, "in_use_by") + mockListArgs := []string{"disk", "ls", diskName, "--json"} + mockCreateArgs := []string{"disk", "create", diskName, "--size", diskSize} + mockUnlockArgs := []string{"disk", "unlock", diskName} + + //nolint:lll // line cannot be shortened without losing functionality + listSuccessOutput := []byte("{\"name\":\"finch\",\"size\":5,\"dir\":\"mock_dir\",\"instance\":\"\",\"instanceDir\":\"\",\"mountPoint\":\"/mnt/lima-finch\"}") + + testCases := []struct { + name string + wantErr error + mockSvc func(*mocks.LimaCmdCreator, *mocks.MockdiskFS, *mocks.Command) + }{ + { + name: "create and save disk", + wantErr: nil, + mockSvc: func(lcc *mocks.LimaCmdCreator, dfs *mocks.MockdiskFS, cmd *mocks.Command) { + lcc.EXPECT().CreateWithoutStdio(mockListArgs).Return(cmd) + cmd.EXPECT().Output().Return([]byte(""), nil) + + lcc.EXPECT().CreateWithoutStdio(mockCreateArgs).Return(cmd) + cmd.EXPECT().Run().Return(nil) + + dfs.EXPECT().Stat(finch.UserDataDiskPath(homeDir)).Return(nil, fs.ErrNotExist) + dfs.EXPECT().Rename(limaPath, finch.UserDataDiskPath(homeDir)).Return(nil) + + dfs.EXPECT().Stat(limaPath).Return(nil, fs.ErrNotExist) + dfs.EXPECT().SymlinkIfPossible(finch.UserDataDiskPath(homeDir), limaPath).Return(nil) + + dfs.EXPECT().Stat(lockPath).Return(nil, fs.ErrNotExist) + }, + }, + { + name: "disk already exists", + wantErr: nil, + mockSvc: func(lcc *mocks.LimaCmdCreator, dfs *mocks.MockdiskFS, cmd *mocks.Command) { + lcc.EXPECT().CreateWithoutStdio(mockListArgs).Return(cmd) + cmd.EXPECT().Output().Return(listSuccessOutput, nil) + + dfs.EXPECT().ReadlinkIfPossible(limaPath).Return(finch.UserDataDiskPath(homeDir), nil) + + dfs.EXPECT().Stat(lockPath).Return(nil, fs.ErrNotExist) + }, + }, + { + name: "disk exists but has not been saved", + wantErr: nil, + mockSvc: func(lcc *mocks.LimaCmdCreator, dfs *mocks.MockdiskFS, cmd *mocks.Command) { + lcc.EXPECT().CreateWithoutStdio(mockListArgs).Return(cmd) + cmd.EXPECT().Output().Return(listSuccessOutput, nil) + + // not a link + dfs.EXPECT().ReadlinkIfPossible(limaPath).Return("", nil) + + dfs.EXPECT().Stat(finch.UserDataDiskPath(homeDir)).Return(nil, fs.ErrNotExist) + dfs.EXPECT().Rename(limaPath, finch.UserDataDiskPath(homeDir)).Return(nil) + + dfs.EXPECT().Stat(limaPath).Return(nil, fs.ErrNotExist) + dfs.EXPECT().SymlinkIfPossible(finch.UserDataDiskPath(homeDir), limaPath).Return(nil) + + dfs.EXPECT().Stat(lockPath).Return(nil, fs.ErrNotExist) + }, + }, + { + name: "disk does not exist but a persistent disk does", + wantErr: nil, + mockSvc: func(lcc *mocks.LimaCmdCreator, dfs *mocks.MockdiskFS, cmd *mocks.Command) { + lcc.EXPECT().CreateWithoutStdio(mockListArgs).Return(cmd) + cmd.EXPECT().Output().Return([]byte(""), nil) + + lcc.EXPECT().CreateWithoutStdio(mockCreateArgs).Return(cmd) + cmd.EXPECT().Run().Return(nil) + + dfs.EXPECT().Stat(finch.UserDataDiskPath(homeDir)).Return(nil, nil) + + dfs.EXPECT().Stat(limaPath).Return(nil, nil) + dfs.EXPECT().Remove(limaPath).Return(nil) + + dfs.EXPECT().SymlinkIfPossible(finch.UserDataDiskPath(homeDir), limaPath).Return(nil) + + dfs.EXPECT().Stat(lockPath).Return(nil, fs.ErrNotExist) + }, + }, + { + name: "disk already exists but is locked", + wantErr: nil, + mockSvc: func(lcc *mocks.LimaCmdCreator, dfs *mocks.MockdiskFS, cmd *mocks.Command) { + lcc.EXPECT().CreateWithoutStdio(mockListArgs).Return(cmd) + cmd.EXPECT().Output().Return(listSuccessOutput, nil) + + dfs.EXPECT().ReadlinkIfPossible(limaPath).Return(finch.UserDataDiskPath(homeDir), nil) + + dfs.EXPECT().Stat(lockPath).Return(nil, nil) + lcc.EXPECT().CreateWithoutStdio(mockUnlockArgs).Return(cmd) + cmd.EXPECT().Run().Return(nil) + }, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + ctrl := gomock.NewController(t) + lcc := mocks.NewLimaCmdCreator(ctrl) + dfs := mocks.NewMockdiskFS(ctrl) + cmd := mocks.NewCommand(ctrl) + tc.mockSvc(lcc, dfs, cmd) + dm := NewUserDataDiskManager(lcc, dfs, finch, homeDir) + err := dm.EnsureUserDataDisk() + assert.Equal(t, tc.wantErr, err) + }) + } +} diff --git a/pkg/mocks/pkg_disk_disk.go b/pkg/mocks/pkg_disk_disk.go new file mode 100644 index 000000000..66cab6001 --- /dev/null +++ b/pkg/mocks/pkg_disk_disk.go @@ -0,0 +1,289 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: pkg/disk/disk.go + +// Package mocks is a generated GoMock package. +package mocks + +import ( + os "os" + reflect "reflect" + time "time" + + gomock "github.com/golang/mock/gomock" + afero "github.com/spf13/afero" +) + +// MockUserDataDiskManager is a mock of UserDataDiskManager interface. +type MockUserDataDiskManager struct { + ctrl *gomock.Controller + recorder *MockUserDataDiskManagerMockRecorder +} + +// MockUserDataDiskManagerMockRecorder is the mock recorder for MockUserDataDiskManager. +type MockUserDataDiskManagerMockRecorder struct { + mock *MockUserDataDiskManager +} + +// NewMockUserDataDiskManager creates a new mock instance. +func NewMockUserDataDiskManager(ctrl *gomock.Controller) *MockUserDataDiskManager { + mock := &MockUserDataDiskManager{ctrl: ctrl} + mock.recorder = &MockUserDataDiskManagerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockUserDataDiskManager) EXPECT() *MockUserDataDiskManagerMockRecorder { + return m.recorder +} + +// EnsureUserDataDisk mocks base method. +func (m *MockUserDataDiskManager) EnsureUserDataDisk() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "EnsureUserDataDisk") + ret0, _ := ret[0].(error) + return ret0 +} + +// EnsureUserDataDisk indicates an expected call of EnsureUserDataDisk. +func (mr *MockUserDataDiskManagerMockRecorder) EnsureUserDataDisk() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureUserDataDisk", reflect.TypeOf((*MockUserDataDiskManager)(nil).EnsureUserDataDisk)) +} + +// MockdiskFS is a mock of diskFS interface. +type MockdiskFS struct { + ctrl *gomock.Controller + recorder *MockdiskFSMockRecorder +} + +// MockdiskFSMockRecorder is the mock recorder for MockdiskFS. +type MockdiskFSMockRecorder struct { + mock *MockdiskFS +} + +// NewMockdiskFS creates a new mock instance. +func NewMockdiskFS(ctrl *gomock.Controller) *MockdiskFS { + mock := &MockdiskFS{ctrl: ctrl} + mock.recorder = &MockdiskFSMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockdiskFS) EXPECT() *MockdiskFSMockRecorder { + return m.recorder +} + +// Chmod mocks base method. +func (m *MockdiskFS) Chmod(name string, mode os.FileMode) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Chmod", name, mode) + ret0, _ := ret[0].(error) + return ret0 +} + +// Chmod indicates an expected call of Chmod. +func (mr *MockdiskFSMockRecorder) Chmod(name, mode interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Chmod", reflect.TypeOf((*MockdiskFS)(nil).Chmod), name, mode) +} + +// Chown mocks base method. +func (m *MockdiskFS) Chown(name string, uid, gid int) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Chown", name, uid, gid) + ret0, _ := ret[0].(error) + return ret0 +} + +// Chown indicates an expected call of Chown. +func (mr *MockdiskFSMockRecorder) Chown(name, uid, gid interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Chown", reflect.TypeOf((*MockdiskFS)(nil).Chown), name, uid, gid) +} + +// Chtimes mocks base method. +func (m *MockdiskFS) Chtimes(name string, atime, mtime time.Time) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Chtimes", name, atime, mtime) + ret0, _ := ret[0].(error) + return ret0 +} + +// Chtimes indicates an expected call of Chtimes. +func (mr *MockdiskFSMockRecorder) Chtimes(name, atime, mtime interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Chtimes", reflect.TypeOf((*MockdiskFS)(nil).Chtimes), name, atime, mtime) +} + +// Create mocks base method. +func (m *MockdiskFS) Create(name string) (afero.File, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Create", name) + ret0, _ := ret[0].(afero.File) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Create indicates an expected call of Create. +func (mr *MockdiskFSMockRecorder) Create(name interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockdiskFS)(nil).Create), name) +} + +// Mkdir mocks base method. +func (m *MockdiskFS) Mkdir(name string, perm os.FileMode) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Mkdir", name, perm) + ret0, _ := ret[0].(error) + return ret0 +} + +// Mkdir indicates an expected call of Mkdir. +func (mr *MockdiskFSMockRecorder) Mkdir(name, perm interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Mkdir", reflect.TypeOf((*MockdiskFS)(nil).Mkdir), name, perm) +} + +// MkdirAll mocks base method. +func (m *MockdiskFS) MkdirAll(path string, perm os.FileMode) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MkdirAll", path, perm) + ret0, _ := ret[0].(error) + return ret0 +} + +// MkdirAll indicates an expected call of MkdirAll. +func (mr *MockdiskFSMockRecorder) MkdirAll(path, perm interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MkdirAll", reflect.TypeOf((*MockdiskFS)(nil).MkdirAll), path, perm) +} + +// Name mocks base method. +func (m *MockdiskFS) Name() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Name") + ret0, _ := ret[0].(string) + return ret0 +} + +// Name indicates an expected call of Name. +func (mr *MockdiskFSMockRecorder) Name() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Name", reflect.TypeOf((*MockdiskFS)(nil).Name)) +} + +// Open mocks base method. +func (m *MockdiskFS) Open(name string) (afero.File, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Open", name) + ret0, _ := ret[0].(afero.File) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Open indicates an expected call of Open. +func (mr *MockdiskFSMockRecorder) Open(name interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Open", reflect.TypeOf((*MockdiskFS)(nil).Open), name) +} + +// OpenFile mocks base method. +func (m *MockdiskFS) OpenFile(name string, flag int, perm os.FileMode) (afero.File, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "OpenFile", name, flag, perm) + ret0, _ := ret[0].(afero.File) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// OpenFile indicates an expected call of OpenFile. +func (mr *MockdiskFSMockRecorder) OpenFile(name, flag, perm interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OpenFile", reflect.TypeOf((*MockdiskFS)(nil).OpenFile), name, flag, perm) +} + +// ReadlinkIfPossible mocks base method. +func (m *MockdiskFS) ReadlinkIfPossible(name string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReadlinkIfPossible", name) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReadlinkIfPossible indicates an expected call of ReadlinkIfPossible. +func (mr *MockdiskFSMockRecorder) ReadlinkIfPossible(name interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadlinkIfPossible", reflect.TypeOf((*MockdiskFS)(nil).ReadlinkIfPossible), name) +} + +// Remove mocks base method. +func (m *MockdiskFS) Remove(name string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Remove", name) + ret0, _ := ret[0].(error) + return ret0 +} + +// Remove indicates an expected call of Remove. +func (mr *MockdiskFSMockRecorder) Remove(name interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Remove", reflect.TypeOf((*MockdiskFS)(nil).Remove), name) +} + +// RemoveAll mocks base method. +func (m *MockdiskFS) RemoveAll(path string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveAll", path) + ret0, _ := ret[0].(error) + return ret0 +} + +// RemoveAll indicates an expected call of RemoveAll. +func (mr *MockdiskFSMockRecorder) RemoveAll(path interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveAll", reflect.TypeOf((*MockdiskFS)(nil).RemoveAll), path) +} + +// Rename mocks base method. +func (m *MockdiskFS) Rename(oldname, newname string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Rename", oldname, newname) + ret0, _ := ret[0].(error) + return ret0 +} + +// Rename indicates an expected call of Rename. +func (mr *MockdiskFSMockRecorder) Rename(oldname, newname interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Rename", reflect.TypeOf((*MockdiskFS)(nil).Rename), oldname, newname) +} + +// Stat mocks base method. +func (m *MockdiskFS) Stat(name string) (os.FileInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Stat", name) + ret0, _ := ret[0].(os.FileInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Stat indicates an expected call of Stat. +func (mr *MockdiskFSMockRecorder) Stat(name interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stat", reflect.TypeOf((*MockdiskFS)(nil).Stat), name) +} + +// SymlinkIfPossible mocks base method. +func (m *MockdiskFS) SymlinkIfPossible(oldname, newname string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SymlinkIfPossible", oldname, newname) + ret0, _ := ret[0].(error) + return ret0 +} + +// SymlinkIfPossible indicates an expected call of SymlinkIfPossible. +func (mr *MockdiskFSMockRecorder) SymlinkIfPossible(oldname, newname interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SymlinkIfPossible", reflect.TypeOf((*MockdiskFS)(nil).SymlinkIfPossible), oldname, newname) +} diff --git a/pkg/path/finch.go b/pkg/path/finch.go index e46d5a2fe..b5472ed06 100644 --- a/pkg/path/finch.go +++ b/pkg/path/finch.go @@ -18,6 +18,12 @@ func (Finch) ConfigFilePath(homeDir string) string { return fmt.Sprintf("%s/.finch/finch.yaml", homeDir) } +// UserDataDiskPath returns the path to the permanent storage location of the Finch +// user data disk. +func (Finch) UserDataDiskPath(homeDir string) string { + return fmt.Sprintf("%s/.finch/.datadisk", homeDir) +} + // LimaHomePath returns the path that should be set to LIMA_HOME for Finch. func (w Finch) LimaHomePath() string { return fmt.Sprintf("%s/lima/data", w) diff --git a/pkg/path/finch_test.go b/pkg/path/finch_test.go index f7e514d77..c1cb2d5d4 100644 --- a/pkg/path/finch_test.go +++ b/pkg/path/finch_test.go @@ -23,6 +23,13 @@ func TestFinch_ConfigFilePath(t *testing.T) { assert.Equal(t, res, "homeDir/.finch/finch.yaml") } +func TestFinch_UserDataDiskPath(t *testing.T) { + t.Parallel() + + res := mockFinch.UserDataDiskPath("homeDir") + assert.Equal(t, res, "homeDir/.finch/.datadisk") +} + func TestFinch_LimaHomePath(t *testing.T) { t.Parallel()