diff --git a/server/events/command_runner.go b/server/events/command_runner.go index 540d3b6002..54bffa8aff 100644 --- a/server/events/command_runner.go +++ b/server/events/command_runner.go @@ -262,6 +262,14 @@ func (c *DefaultCommandRunner) RunCommentCommand(baseRepo models.Repo, maybeHead c.deletePlans(ctx) result.PlansDeleted = true } + + // If this was a successful discard command, delete plans anyway + if cmd.Name == models.DiscardCommand && !result.HasErrors() { + c.deletePlans(ctx) + result.PlansDeleted = true + } + + // TODO: check here for updating PR with discard c.updatePull( ctx, cmd, diff --git a/server/events/markdown_renderer.go b/server/events/markdown_renderer.go index 197b99579a..32725bf6bb 100644 --- a/server/events/markdown_renderer.go +++ b/server/events/markdown_renderer.go @@ -24,8 +24,9 @@ import ( ) const ( - planCommandTitle = "Plan" - applyCommandTitle = "Apply" + planCommandTitle = "Plan" + applyCommandTitle = "Apply" + discardCommandTitle = "Discard" // maxUnwrappedLines is the maximum number of lines the Terraform output // can be before we wrap it in an expandable template. maxUnwrappedLines = 12 @@ -144,6 +145,8 @@ func (m *MarkdownRenderer) renderProjectResults(results []models.ProjectResult, resultData.Rendered = m.renderTemplate(applyUnwrappedSuccessTmpl, struct{ Output string }{result.ApplySuccess}) } + } else if result.DiscardSuccess != "" { + resultData.Rendered = m.renderTemplate(discardUnwrappedSuccessTmpl, struct{ Output string }{result.DiscardSuccess}) } else { resultData.Rendered = "Found no template. This is a bug!" } @@ -156,6 +159,8 @@ func (m *MarkdownRenderer) renderProjectResults(results []models.ProjectResult, tmpl = singleProjectPlanSuccessTmpl case len(resultsTmplData) == 1 && common.Command == planCommandTitle && numPlanSuccesses == 0: tmpl = singleProjectPlanUnsuccessfulTmpl + case len(resultsTmplData) == 1 && common.Command == discardCommandTitle: + tmpl = singleProjectDiscardTmpl case len(resultsTmplData) == 1 && common.Command == applyCommandTitle: tmpl = singleProjectApplyTmpl case common.Command == planCommandTitle: @@ -265,6 +270,10 @@ var applyWrappedSuccessTmpl = template.Must(template.New("").Parse( "{{.Output}}\n" + "```\n" + "")) +var discardUnwrappedSuccessTmpl = template.Must(template.New("").Parse( + "```diff\n" + + "{{.Output}}\n" + + "```")) var unwrappedErrTmplText = "**{{.Command}} Error**\n" + "```\n" + "{{.Error}}\n" + diff --git a/server/events/models/models.go b/server/events/models/models.go index 15792ef0df..265655f7f7 100644 --- a/server/events/models/models.go +++ b/server/events/models/models.go @@ -374,14 +374,15 @@ func SplitRepoFullName(repoFullName string) (owner string, repo string) { // ProjectResult is the result of executing a plan/apply for a specific project. type ProjectResult struct { - Command CommandName - RepoRelDir string - Workspace string - Error error - Failure string - PlanSuccess *PlanSuccess - ApplySuccess string - ProjectName string + Command CommandName + RepoRelDir string + Workspace string + Error error + Failure string + PlanSuccess *PlanSuccess + ApplySuccess string + DiscardSuccess string + ProjectName string } // CommitStatus returns the vcs commit status of this project result. diff --git a/server/events/project_command_runner.go b/server/events/project_command_runner.go index fb647b6d5a..6f71d617e4 100644 --- a/server/events/project_command_runner.go +++ b/server/events/project_command_runner.go @@ -87,7 +87,7 @@ type ProjectCommandRunner interface { Plan(ctx models.ProjectCommandContext) models.ProjectResult // Apply runs terraform apply for the project described by ctx. Apply(ctx models.ProjectCommandContext) models.ProjectResult - // Discard runs terraform discard for the project described by ctx. + // Discard discards the plan for project described by ctx. Discard(ctx models.ProjectCommandContext) models.ProjectResult } @@ -137,15 +137,15 @@ func (p *DefaultProjectCommandRunner) Apply(ctx models.ProjectCommandContext) mo // Discard deletes the atlantis plan and discards the lock for the project described by ctx. func (p *DefaultProjectCommandRunner) Discard(ctx models.ProjectCommandContext) models.ProjectResult { - planSuccess, failure, err := p.doPlan(ctx) + discardOut, failure, err := p.doDiscard(ctx) return models.ProjectResult{ - Command: models.PlanCommand, - PlanSuccess: planSuccess, - Error: err, - Failure: failure, - RepoRelDir: ctx.RepoRelDir, - Workspace: ctx.Workspace, - ProjectName: ctx.ProjectName, + Command: models.PlanCommand, + Error: err, + Failure: failure, + DiscardSuccess: discardOut, + RepoRelDir: ctx.RepoRelDir, + Workspace: ctx.Workspace, + ProjectName: ctx.ProjectName, } } @@ -281,3 +281,68 @@ func (p *DefaultProjectCommandRunner) doApply(ctx models.ProjectCommandContext) } return strings.Join(outputs, "\n"), "", nil } + +func (p *DefaultProjectCommandRunner) doDiscard(ctx models.ProjectCommandContext) (discardOut string, failure string, err error) { + // Definitely need this to prevent applying. But need to do something more to + // Lead to generation of message: plan is required + //if err := p.WorkingDir.Delete(ctx.BaseRepo, ctx.Pull); err != nil { + // return "", "", errors.Wrap(err, "cleaning workspace") + //} + + // TryLock is idembpotent so OK to run even if lock already exists - which will normally be the case + // TODO try to expose a method to identify if a lock is there to begin with, if not error + lockAttempt, err := p.Locker.TryLock(ctx.Log, ctx.Pull, ctx.User, ctx.Workspace, models.NewProject(ctx.BaseRepo.FullName, ctx.RepoRelDir)) + ctx.Log.Err("discard: attempting to lock") + if err != nil { + ctx.Log.Err("discard: failed to lock: %v", err) + return "", "", errors.Wrap(err, "acquiring lock") + } + + if !lockAttempt.LockAcquired { + return "", lockAttempt.LockFailureReason, nil + } + ctx.Log.Debug("discard: acquired lock for project") + ctx.Log.Debug("discard: attempting to unlock project") + if unlockErr := lockAttempt.UnlockFn(); unlockErr != nil { + ctx.Log.Err("error unlocking state after plan error: %v", unlockErr) + } + + /* + if ctx.BaseRepo != (models.Repo{}) { + unlock, err := p.WorkingDirLocker.TryLock(ctx.BaseRepo.FullName, ctx.Pull.Num, ctx.Workspace) + if err != nil { + ctx.Log.Err("unable to obtain working dir lock when trying to delete plans: %s", err) + } else { + defer unlock() + // nolint: vetshadow + if err := p.WorkingDir.DeleteForWorkspace(ctx.BaseRepo, ctx.Pull, ctx.Workspace); err != nil { + ctx.Log.Err("unable to delete workspace: %s", err) + } + } + + if err := p.DB.DeleteProjectStatus(lock.Pull, lock.Workspace, lock.Project.Path); err != nil { + l.Logger.Err("unable to delete project status: %s", err) + } + + // Once the lock has been deleted, comment back on the pull request. + comment := fmt.Sprintf("**Warning**: The plan for dir: `%s` workspace: `%s` was **discarded** via the Atlantis UI.\n\n"+ + "To `apply` this plan you must run `plan` again.", lock.Project.Path, lock.Workspace) + err = l.VCSClient.CreateComment(lock.Pull.BaseRepo, lock.Pull.Num, comment) + if err != nil { + l.respond(w, logging.Error, http.StatusInternalServerError, "Failed commenting on pull request: %s", err) + return + } + + } + */ + + return "discard successful", "", nil + + // Finally, delete locks. We do this last because when someone + // unlocks a project, right now we don't actually delete the plan + // so we might have plans laying around but no locks. + //locks, err := p.Locker(repo.FullName, pull.Num) + //if err != nil { + // return nil, "", errors.Wrap(err, "cleaning up locks") + //} +}