From 3a0be350701b47905b2ade3fc72b5d297a0a3cf4 Mon Sep 17 00:00:00 2001 From: giuli007 <39272329+giuli007@users.noreply.github.com> Date: Fri, 23 Sep 2022 23:31:24 +0200 Subject: [PATCH] Delete previous plans on autoplan or atlantis plan (#1633) * Delete previous plans on autoplan or atlantis plan When using non-default workspaces, plans are stored in pr-and-workspace-specific directories. If a PR is subsequently updated it might happen that some of the plans are no longer relevant with regards to the latest changes. This change ensures that plans are always deleted when a generic plan is triggered either by autoplan or by a "atlantis plan" command. NB Plans are not cleaned up when specific projects are planned explicitly with "atlantis plan -p/-d/-w". * Also delete locks along with plans when running autoplan or generic plan command * Fix Plan deletion tests after 0.19.8 rebase * Fix Plan deletion tests after v0.19.9-pre.20220912 rebase * Rename struct field to follow camelCase naming convention Co-authored-by: giuli007 --- runatlantis.io/docs/using-atlantis.md | 5 ++ .../events/events_controller_e2e_test.go | 1 + server/events/command_runner_test.go | 77 ++++++++++++++++++- server/events/plan_command_runner.go | 23 ++++++ server/server.go | 1 + 5 files changed, 105 insertions(+), 2 deletions(-) diff --git a/runatlantis.io/docs/using-atlantis.md b/runatlantis.io/docs/using-atlantis.md index 4695831e81..fe499623b1 100644 --- a/runatlantis.io/docs/using-atlantis.md +++ b/runatlantis.io/docs/using-atlantis.md @@ -45,6 +45,10 @@ atlantis plan -w staging * `-w workspace` Switch to this [Terraform workspace](https://www.terraform.io/docs/state/workspaces.html) before planning. Defaults to `default`. If not using Terraform workspaces you can ignore this. * `--verbose` Append Atlantis log to comment. +::: warning NOTE +A `atlantis plan` (without flags), like autoplans, discards all plans previously created with `atlantis plan` `-p`/`-d`/`-w` +::: + ### Additional Terraform flags If you need to run `terraform plan` with additional arguments, like `-target=resource` or `-var 'foo-bar'` or `-var-file myfile.tfvars` @@ -65,6 +69,7 @@ Runs `terraform apply` for the plan that matches the directory/project/workspace ::: tip If no directory/project/workspace is specified, ex. `atlantis apply`, this command will apply **all unapplied plans from this pull request**. +This includes all projects that have been planned manually with `atlantis plan` `-p`/`-d`/`-w` since the last autoplan or `atlantis plan` command. ::: ### Examples diff --git a/server/controllers/events/events_controller_e2e_test.go b/server/controllers/events/events_controller_e2e_test.go index 6d56f52f28..213b983d28 100644 --- a/server/controllers/events/events_controller_e2e_test.go +++ b/server/controllers/events/events_controller_e2e_test.go @@ -1026,6 +1026,7 @@ func setupE2E(t *testing.T, repoDir string) (events_controllers.VCSEventsControl parallelPoolSize, silenceNoProjects, boltdb, + lockingClient, ) e2ePullReqStatusFetcher := vcs.NewPullReqStatusFetcher(e2eVCSClient) diff --git a/server/events/command_runner_test.go b/server/events/command_runner_test.go index 19f46e13de..1df1c8692f 100644 --- a/server/events/command_runner_test.go +++ b/server/events/command_runner_test.go @@ -63,6 +63,7 @@ var policyCheckCommandRunner *events.PolicyCheckCommandRunner var approvePoliciesCommandRunner *events.ApprovePoliciesCommandRunner var planCommandRunner *events.PlanCommandRunner var applyLockChecker *lockingmocks.MockApplyLockChecker +var lockingLocker *lockingmocks.MockLocker var applyCommandRunner *events.ApplyCommandRunner var unlockCommandRunner *events.UnlockCommandRunner var preWorkflowHooksCommandRunner events.PreWorkflowHooksCommandRunner @@ -89,6 +90,7 @@ func setup(t *testing.T) *vcsmocks.MockClient { drainer = &events.Drainer{} deleteLockCommand = eventmocks.NewMockDeleteLockCommand() applyLockChecker = lockingmocks.NewMockApplyLockChecker() + lockingLocker = lockingmocks.NewMockLocker() dbUpdater = &events.DBUpdater{ Backend: defaultBoltDB, @@ -133,6 +135,7 @@ func setup(t *testing.T) *vcsmocks.MockClient { parallelPoolSize, SilenceNoProjects, defaultBoltDB, + lockingLocker, ) pullReqStatusFetcher := vcs.NewPullReqStatusFetcher(vcsClient) @@ -529,9 +532,78 @@ func TestRunUnlockCommandFail_VCSComment(t *testing.T) { vcsClient.VerifyWasCalledOnce().CreateComment(fixtures.GithubRepo, fixtures.Pull.Num, "Failed to delete PR locks", "unlock") } +func TestRunAutoplanCommand_DeletePlans(t *testing.T) { + setup(t) + tmp, cleanup := TempDir(t) + defer cleanup() + boltDB, err := db.New(tmp) + Ok(t, err) + dbUpdater.Backend = boltDB + applyCommandRunner.Backend = boltDB + autoMerger.GlobalAutomerge = true + defer func() { autoMerger.GlobalAutomerge = false }() + + When(projectCommandBuilder.BuildAutoplanCommands(matchers.AnyPtrToEventsCommandContext())). + ThenReturn([]command.ProjectContext{ + { + CommandName: command.Plan, + }, + { + CommandName: command.Plan, + }, + }, nil) + When(projectCommandRunner.Plan(matchers.AnyModelsProjectCommandContext())).ThenReturn(command.ProjectResult{PlanSuccess: &models.PlanSuccess{}}) + When(workingDir.GetPullDir(matchers.AnyModelsRepo(), matchers.AnyModelsPullRequest())).ThenReturn(tmp, nil) + fixtures.Pull.BaseRepo = fixtures.GithubRepo + ch.RunAutoplanCommand(fixtures.GithubRepo, fixtures.GithubRepo, fixtures.Pull, fixtures.User) + pendingPlanFinder.VerifyWasCalledOnce().DeletePlans(tmp) + lockingLocker.VerifyWasCalledOnce().UnlockByPull(fixtures.Pull.BaseRepo.FullName, fixtures.Pull.Num) +} + +func TestRunGenericPlanCommand_DeletePlans(t *testing.T) { + setup(t) + tmp, cleanup := TempDir(t) + defer cleanup() + boltDB, err := db.New(tmp) + Ok(t, err) + dbUpdater.Backend = boltDB + applyCommandRunner.Backend = boltDB + autoMerger.GlobalAutomerge = true + defer func() { autoMerger.GlobalAutomerge = false }() + + When(projectCommandRunner.Plan(matchers.AnyModelsProjectCommandContext())).ThenReturn(command.ProjectResult{PlanSuccess: &models.PlanSuccess{}}) + When(workingDir.GetPullDir(matchers.AnyModelsRepo(), matchers.AnyModelsPullRequest())).ThenReturn(tmp, nil) + pull := &github.PullRequest{State: github.String("open")} + modelPull := models.PullRequest{BaseRepo: fixtures.GithubRepo, State: models.OpenPullState, Num: fixtures.Pull.Num} + When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(pull, nil) + When(eventParsing.ParseGithubPull(pull)).ThenReturn(modelPull, modelPull.BaseRepo, fixtures.GithubRepo, nil) + fixtures.Pull.BaseRepo = fixtures.GithubRepo + ch.RunCommentCommand(fixtures.GithubRepo, nil, nil, fixtures.User, fixtures.Pull.Num, &events.CommentCommand{Name: command.Plan}) + pendingPlanFinder.VerifyWasCalledOnce().DeletePlans(tmp) + lockingLocker.VerifyWasCalledOnce().UnlockByPull(fixtures.Pull.BaseRepo.FullName, fixtures.Pull.Num) +} + +func TestRunSpecificPlanCommandDoesnt_DeletePlans(t *testing.T) { + setup(t) + tmp, cleanup := TempDir(t) + defer cleanup() + boltDB, err := db.New(tmp) + Ok(t, err) + dbUpdater.Backend = boltDB + applyCommandRunner.Backend = boltDB + autoMerger.GlobalAutomerge = true + defer func() { autoMerger.GlobalAutomerge = false }() + + When(projectCommandRunner.Plan(matchers.AnyModelsProjectCommandContext())).ThenReturn(command.ProjectResult{PlanSuccess: &models.PlanSuccess{}}) + When(workingDir.GetPullDir(matchers.AnyModelsRepo(), matchers.AnyModelsPullRequest())).ThenReturn(tmp, nil) + fixtures.Pull.BaseRepo = fixtures.GithubRepo + ch.RunCommentCommand(fixtures.GithubRepo, nil, nil, fixtures.User, fixtures.Pull.Num, &events.CommentCommand{Name: command.Plan, ProjectName: "default"}) + pendingPlanFinder.VerifyWasCalled(Never()).DeletePlans(tmp) +} + // Test that if one plan fails and we are using automerge, that // we delete the plans. -func TestRunAutoplanCommand_DeletePlans(t *testing.T) { +func TestRunAutoplanCommandWithError_DeletePlans(t *testing.T) { setup(t) tmp, cleanup := TempDir(t) defer cleanup() @@ -574,7 +646,8 @@ func TestRunAutoplanCommand_DeletePlans(t *testing.T) { ThenReturn(tmp, nil) fixtures.Pull.BaseRepo = fixtures.GithubRepo ch.RunAutoplanCommand(fixtures.GithubRepo, fixtures.GithubRepo, fixtures.Pull, fixtures.User) - pendingPlanFinder.VerifyWasCalledOnce().DeletePlans(tmp) + // gets called twice: the first time before the plan starts, the second time after the plan errors + pendingPlanFinder.VerifyWasCalled(Times(2)).DeletePlans(tmp) } func TestFailedApprovalCreatesFailedStatusUpdate(t *testing.T) { diff --git a/server/events/plan_command_runner.go b/server/events/plan_command_runner.go index 0e9d644124..7b3fb8c28b 100644 --- a/server/events/plan_command_runner.go +++ b/server/events/plan_command_runner.go @@ -1,6 +1,7 @@ package events import ( + "github.com/runatlantis/atlantis/server/core/locking" "github.com/runatlantis/atlantis/server/events/command" "github.com/runatlantis/atlantis/server/events/models" "github.com/runatlantis/atlantis/server/events/vcs" @@ -22,6 +23,7 @@ func NewPlanCommandRunner( parallelPoolSize int, SilenceNoProjects bool, pullStatusFetcher PullStatusFetcher, + lockingLocker locking.Locker, ) *PlanCommandRunner { return &PlanCommandRunner{ silenceVCSStatusNoPlans: silenceVCSStatusNoPlans, @@ -39,6 +41,7 @@ func NewPlanCommandRunner( parallelPoolSize: parallelPoolSize, SilenceNoProjects: SilenceNoProjects, pullStatusFetcher: pullStatusFetcher, + lockingLocker: lockingLocker, } } @@ -64,6 +67,7 @@ type PlanCommandRunner struct { autoMerger *AutoMerger parallelPoolSize int pullStatusFetcher PullStatusFetcher + lockingLocker locking.Locker } func (p *PlanCommandRunner) runAutoplan(ctx *command.Context) { @@ -106,6 +110,14 @@ func (p *PlanCommandRunner) runAutoplan(ctx *command.Context) { ctx.Log.Warn("unable to update plan commit status: %s", err) } + // discard previous plans that might not be relevant anymore + ctx.Log.Debug("deleting previous plans and locks") + p.deletePlans(ctx) + _, err = p.lockingLocker.UnlockByPull(baseRepo.FullName, pull.Num) + if err != nil { + ctx.Log.Err("deleting locks: %s", err) + } + // Only run commands in parallel if enabled var result command.Result if p.isParallelEnabled(projectCmds) { @@ -180,6 +192,17 @@ func (p *PlanCommandRunner) run(ctx *command.Context, cmd *CommentCommand) { projectCmds, policyCheckCmds := p.partitionProjectCmds(ctx, projectCmds) + // if the plan is generic, new plans will be generated based on changes + // discard previous plans that might not be relevant anymore + if !cmd.IsForSpecificProject() { + ctx.Log.Debug("deleting previous plans and locks") + p.deletePlans(ctx) + _, err = p.lockingLocker.UnlockByPull(baseRepo.FullName, pull.Num) + if err != nil { + ctx.Log.Err("deleting locks: %s", err) + } + } + // Only run commands in parallel if enabled var result command.Result if p.isParallelEnabled(projectCmds) { diff --git a/server/server.go b/server/server.go index 90451b4b41..adf784df4f 100644 --- a/server/server.go +++ b/server/server.go @@ -639,6 +639,7 @@ func NewServer(userConfig UserConfig, config Config) (*Server, error) { userConfig.ParallelPoolSize, userConfig.SilenceNoProjects, backend, + lockingClient, ) pullReqStatusFetcher := vcs.NewPullReqStatusFetcher(vcsClient)