diff --git a/circle.yml b/circle.yml index 57b320f754..32e5954a85 100644 --- a/circle.yml +++ b/circle.yml @@ -30,7 +30,7 @@ test: post: - cd "${WORKDIR}" && ./scripts/e2e-deps.sh # Start atlantis server - - cd "${WORKDIR}/e2e" && ./atlantis server --gh-user="$GITHUB_USERNAME" --gh-password="$GITHUB_PASSWORD" --data-dir="/tmp" --require-approval=false --s3-bucket="$ATLANTIS_S3_BUCKET_NAME" --log-level="debug" &> /tmp/atlantis-server.log: + - cd "${WORKDIR}/e2e" && ./atlantis server --gh-user="$GITHUB_USERNAME" --gh-password="$GITHUB_PASSWORD" --data-dir="/tmp" --require-approval=false --plan-backend="file" --log-level="debug" &> /tmp/atlantis-server.log: background: true - sleep 2 - cd "${WORKDIR}/e2e" && ./ngrok http 4141: diff --git a/cmd/root.go b/cmd/root.go index 930567769c..5eaf0d7d81 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -7,7 +7,7 @@ import ( var RootCmd = &cobra.Command{ Use: "atlantis", - Short: "Terraform collaboration tool", + Short: "Terraform collaboration tool", // todo: decide on name #opensource } func Execute() { diff --git a/cmd/server.go b/cmd/server.go index b0e4db79cf..b2ffe937a8 100644 --- a/cmd/server.go +++ b/cmd/server.go @@ -25,9 +25,11 @@ const ( lockingBackendFlag = "locking-backend" lockingTableFlag = "locking-dynamodb-table" logLevelFlag = "log-level" + planBackendFlag = "plan-backend" + planS3BucketFlag = "plan-s3-bucket" + planS3PrefixFlag = "plan-s3-prefix" portFlag = "port" requireApprovalFlag = "require-approval" - s3BucketFlag = "s3-bucket" scratchDirFlag = "scratch-dir" sshKeyFlag = "ssh-key" ) @@ -77,7 +79,7 @@ var stringFlags = []stringFlag{ }, { name: lockingTableFlag, - description: "Name of table in DynamoDB to use for locking. Only read if locking-backend is set to dynamodb.", + description: "Name of table in DynamoDB to use for locking. Only read if " + lockingBackendFlag + " is set to dynamodb.", value: "atlantis-locks", }, { @@ -86,10 +88,20 @@ var stringFlags = []stringFlag{ value: "warn", }, { - name: s3BucketFlag, - description: "The S3 bucket name to store atlantis data (terraform plans, terraform state, etc).", + name: planS3BucketFlag, + description: "S3 bucket for storing plan files. Only read if " + planBackendFlag + " is set to s3", value: "atlantis", }, + { + name: planS3PrefixFlag, + description: "Prefix of plan file names stored in S3. Only read if " + planBackendFlag + " is set to s3", + value: "", + }, + { + name: planBackendFlag, + description: "How to store plan files: file or s3. If set to file, will store plan files on disk in the directory specified by data-dir.", + value: "file", + }, { name: scratchDirFlag, description: "Path to directory to use as a temporary workspace for checking out repos.", @@ -147,7 +159,7 @@ Config values are overridden by environment variables which in turn are overridd if configFile != "" { viper.SetConfigFile(configFile) if err := viper.ReadInConfig(); err != nil { - return fmt.Errorf("invalid config: reading %s: %s", configFile, err) + return errors.Wrapf(err, "invalid config: reading %s", configFile) } } return nil @@ -212,6 +224,9 @@ func validate(config server.ServerConfig) error { if config.LockingBackend != server.LockingFileBackend && config.LockingBackend != server.LockingDynamoDBBackend { return fmt.Errorf("unsupported locking backend %q: not one of %q or %q", config.LockingBackend, server.LockingFileBackend, server.LockingDynamoDBBackend) } + if config.PlanBackend != server.PlanFileBackend && config.PlanBackend != server.PlanS3Backend { + return fmt.Errorf("unsupported plan backend %q: not one of %q or %q", config.PlanBackend, server.PlanFileBackend, server.PlanS3Backend) + } return nil } diff --git a/cmd/version.go b/cmd/version.go index d23669cf75..a64ba35e60 100644 --- a/cmd/version.go +++ b/cmd/version.go @@ -8,7 +8,7 @@ import ( var versionCmd = &cobra.Command{ Use: "version", - Short: "Print the current atlantis version", + Short: "Print the current Atlantis version", Run: func(cmd *cobra.Command, args []string) { fmt.Printf("atlantis %s\n", viper.Get("version")) }, diff --git a/e2e/main.go b/e2e/main.go index 40d00629ae..4ae9898fa9 100644 --- a/e2e/main.go +++ b/e2e/main.go @@ -14,8 +14,8 @@ import ( var defaultAtlantisURL = "http://localhost:4141" var projectTypes = []Project{ - Project{"standalone", "run plan", "run apply"}, - Project{"standalone-with-env", "run plan staging", "run apply staging"}, + {"standalone", "run plan", "run apply"}, + {"standalone-with-env", "run plan staging", "run apply staging"}, } type Project struct { diff --git a/e2e/secrets-envs b/e2e/secrets-envs index 83f1846239..048767d7ab 100644 Binary files a/e2e/secrets-envs and b/e2e/secrets-envs differ diff --git a/plan/backend.go b/plan/backend.go new file mode 100644 index 0000000000..3de19bc555 --- /dev/null +++ b/plan/backend.go @@ -0,0 +1,16 @@ +package plan + +import ( + "github.com/hootsuite/atlantis/models" +) + +type Backend interface { + SavePlan(path string, project models.Project, env string, pullNum int) error + CopyPlans(dstRepoPath string, repoFullName string, env string, pullNum int) ([]Plan, error) +} + +type Plan struct { + Project models.Project + // LocalPath is the path to the plan on disk + LocalPath string +} diff --git a/plan/file/file.go b/plan/file/file.go new file mode 100644 index 0000000000..be760093a3 --- /dev/null +++ b/plan/file/file.go @@ -0,0 +1,93 @@ +package file + +import ( + "github.com/hootsuite/atlantis/models" + "github.com/hootsuite/atlantis/plan" + "github.com/pkg/errors" + "io/ioutil" + "os" + "path/filepath" + "strconv" +) + +type Backend struct { + // baseDir is the root at which all plans will be stored + baseDir string +} + +func New(baseDir string) (*Backend, error) { + baseDir = filepath.Clean(baseDir) + if err := os.MkdirAll(baseDir, 0755); err != nil { + return nil, err + } + return &Backend{baseDir}, nil +} + +// save plans to baseDir/owner/repo/pullNum/path/env.tfplan +func (b *Backend) SavePlan(path string, project models.Project, env string, pullNum int) error { + savePath := b.path(project, pullNum) + if err := os.MkdirAll(savePath, 0755); err != nil { + return errors.Wrap(err, "creating save directory") + } + if err := b.copy(path, filepath.Join(savePath, env+".tfplan")); err != nil { + return errors.Wrap(err, "saving plan") + } + return nil +} + +func (b *Backend) CopyPlans(dstRepo string, repoFullName string, env string, pullNum int) ([]plan.Plan, error) { + // Look in the directory for this repo/pull and get plans for all projects. + // Then filter to the plans for this environment + var toCopy []string // will contain paths to the plan files relative to repo root + root := filepath.Join(b.baseDir, repoFullName, strconv.Itoa(pullNum)) + err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + // if the plan is for the right env, + if info.Name() == env+".tfplan" { + rel, err := filepath.Rel(root, path) + if err == nil { + toCopy = append(toCopy, rel) + } + } + return nil + }) + + var plans []plan.Plan + if err != nil { + return plans, errors.Wrap(err, "listing plans") + } + + // copy the plans to the destination repo + for _, file := range toCopy { + dst := filepath.Join(dstRepo, file) + if err := b.copy(filepath.Join(root, file), dst); err != nil { + return plans, errors.Wrap(err, "copying plan") + } + plans = append(plans, plan.Plan{ + Project: models.Project{ + Path: filepath.Dir(file), + RepoFullName: repoFullName, + }, + LocalPath: dst, + }) + } + return plans, nil +} + +func (b *Backend) copy(src string, dst string) error { + data, err := ioutil.ReadFile(src) + if err != nil { + return errors.Wrapf(err, "reading %s", src) + } + + if err = ioutil.WriteFile(dst, data, 0644); err != nil { + return errors.Wrapf(err, "writing %s", dst) + } + return nil +} + +func (b *Backend) path(p models.Project, pullNum int) string { + return filepath.Join(b.baseDir, p.RepoFullName, strconv.Itoa(pullNum), p.Path) +} diff --git a/plan/s3/s3.go b/plan/s3/s3.go new file mode 100644 index 0000000000..29fd51217e --- /dev/null +++ b/plan/s3/s3.go @@ -0,0 +1,106 @@ +package s3 + +import ( + "os" + pathutil "path" + "path/filepath" + "strconv" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3manager" + "github.com/hootsuite/atlantis/models" + "github.com/hootsuite/atlantis/plan" + "github.com/pkg/errors" +) + +type Backend struct { + s3 *s3.S3 + uploader *s3manager.Uploader + downloader *s3manager.Downloader + bucket string + keyPrefix string +} + +func New(p client.ConfigProvider, bucket string, keyPrefix string) *Backend { + return &Backend{ + s3: s3.New(p), + uploader: s3manager.NewUploader(p), + downloader: s3manager.NewDownloader(p), + bucket: bucket, + keyPrefix: keyPrefix, + } +} + +func (b *Backend) CopyPlans(repoDir string, repoFullName string, env string, pullNum int) ([]plan.Plan, error) { + // first list the plans with the correct prefix + prefix := pathutil.Join(b.keyPrefix, repoFullName, strconv.Itoa(pullNum)) + list, err := b.s3.ListObjects(&s3.ListObjectsInput{Bucket: aws.String(b.bucket), Prefix: &prefix}) + if err != nil { + return nil, errors.Wrap(err, "listing plans") + } + + var plans []plan.Plan + for _, obj := range list.Contents { + planName := pathutil.Base(*obj.Key) + + // only get plans from the correct env + if planName != env+".tfplan" { + continue + } + + // determine the path relative to the repo + relPath, err := filepath.Rel(prefix, *obj.Key) + if err != nil { + continue + } + downloadPath := filepath.Join(repoDir, relPath) + file, err := os.Create(downloadPath) + if err != nil { + return nil, errors.Wrapf(err, "creating file %s to download plan to", downloadPath) + } + defer file.Close() + + _, err = b.downloader.Download(file, + &s3.GetObjectInput{ + Bucket: aws.String(b.bucket), + Key: obj.Key, + }) + if err != nil { + return nil, errors.Wrapf(err, "downloading file at %s", *obj.Key) + } + plans = append(plans, plan.Plan{ + Project: models.Project{ + Path: pathutil.Dir(relPath), + RepoFullName: repoFullName, + }, + LocalPath: downloadPath, + }) + } + return plans, nil +} + +func (b *Backend) SavePlan(path string, project models.Project, env string, pullNum int) error { + f, err := os.Open(path) + if err != nil { + return errors.Wrapf(err, "opening plan at %s", path) + } + + key := pathutil.Join(b.keyPrefix, project.RepoFullName, strconv.Itoa(pullNum), project.Path, env+".tfplan") + _, err = b.uploader.Upload(&s3manager.UploadInput{ + Bucket: aws.String(b.bucket), + Key: &key, + Body: f, + Metadata: map[string]*string{ + "repoFullName": aws.String(project.RepoFullName), + "path": aws.String(project.Path), + "env": aws.String(env), + "pullNum": aws.String(strconv.Itoa(pullNum)), + }, + }) + if err != nil { + return errors.Wrap(err, "uploading plan to s3") + } + return nil +} diff --git a/server/apply_executor.go b/server/apply_executor.go index c7ea5404be..fe9125f579 100644 --- a/server/apply_executor.go +++ b/server/apply_executor.go @@ -4,17 +4,12 @@ import ( "errors" "fmt" "os" - "path" - "regexp" "strings" "path/filepath" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/aws/aws-sdk-go/service/s3/s3manager" "github.com/hootsuite/atlantis/locking" - "github.com/hootsuite/atlantis/models" + "github.com/hootsuite/atlantis/plan" "strconv" ) @@ -22,12 +17,12 @@ type ApplyExecutor struct { github *GithubClient awsConfig *AWSConfig scratchDir string - s3Bucket string sshKey string terraform *TerraformClient githubCommentRenderer *GithubCommentRenderer lockingClient *locking.Client requireApproval bool + planStorage plan.Backend } /** Result Types **/ @@ -62,6 +57,7 @@ func (n NoPlansFailure) Template() *CompiledTemplate { } func (a *ApplyExecutor) execute(ctx *CommandContext, github *GithubClient) { + a.github.UpdateStatus(ctx.Repo, ctx.Pull, Pending, "Applying...") res := a.setupAndApply(ctx) res.Command = Apply comment := a.githubCommentRenderer.render(res, ctx.Log.History.String(), ctx.Command.verbose) @@ -69,79 +65,60 @@ func (a *ApplyExecutor) execute(ctx *CommandContext, github *GithubClient) { } func (a *ApplyExecutor) setupAndApply(ctx *CommandContext) ExecutionResult { - a.github.UpdateStatus(ctx.Repo, ctx.Pull, PendingStatus, "Applying...") - - if a.requireApproval { - ok, err := a.github.PullIsApproved(ctx.Repo, ctx.Pull) - if err != nil { - msg := fmt.Sprintf("failed to determine if pull request was approved: %v", err) - ctx.Log.Err(msg) - a.github.UpdateStatus(ctx.Repo, ctx.Pull, ErrorStatus, "Apply Error") - return ExecutionResult{SetupError: GeneralError{errors.New(msg)}} - } - if !ok { - ctx.Log.Info("pull request was not approved") - a.github.UpdateStatus(ctx.Repo, ctx.Pull, FailureStatus, "Apply Failed") - return ExecutionResult{SetupFailure: PullNotApprovedFailure{}} - } + if approved, res := a.isApproved(ctx); !approved { + return res } - planPaths, err := a.downloadPlans(ctx.Repo.FullName, ctx.Pull.Num, ctx.Command.environment, a.scratchDir, a.awsConfig, a.s3Bucket) + // todo: reclone repo and switch branch, don't assume it's already there + repoDir := filepath.Join(a.scratchDir, ctx.Repo.FullName, strconv.Itoa(ctx.Pull.Num)) + plans, err := a.planStorage.CopyPlans(repoDir, ctx.Repo.FullName, ctx.Command.environment, ctx.Pull.Num) if err != nil { - errMsg := fmt.Sprintf("failed to download plans: %v", err) + errMsg := fmt.Sprintf("failed to get plans: %s", err) ctx.Log.Err(errMsg) - a.github.UpdateStatus(ctx.Repo, ctx.Pull, ErrorStatus, "Apply Error") + a.github.UpdateStatus(ctx.Repo, ctx.Pull, Error, "Apply Error") return ExecutionResult{SetupError: GeneralError{errors.New(errMsg)}} } - - // If there are no plans found for the pull request - if len(planPaths) == 0 { + if len(plans) == 0 { failure := "found 0 plans for this pull request" ctx.Log.Warn(failure) - a.github.UpdateStatus(ctx.Repo, ctx.Pull, FailureStatus, "Apply Failure") + a.github.UpdateStatus(ctx.Repo, ctx.Pull, Failure, "Apply Failure") return ExecutionResult{SetupFailure: NoPlansFailure{}} } - //runLog = append(runLog, fmt.Sprintf("-> Downloaded plans: %v", planPaths)) applyOutputs := []PathResult{} - for _, planPath := range planPaths { - output := a.apply(ctx, planPath) - output.Path = planPath + for _, plan := range plans { + output := a.apply(ctx, repoDir, plan) + output.Path = plan.LocalPath applyOutputs = append(applyOutputs, output) + } a.updateGithubStatus(ctx, applyOutputs) return ExecutionResult{PathResults: applyOutputs} } -func (a *ApplyExecutor) apply(ctx *CommandContext, planPath string) PathResult { - planName := path.Base(planPath) - planSubDir := a.determinePlanSubDir(planName, ctx.Pull.Num) - // todo: don't assume repo is cloned here - repoDir := filepath.Join(a.scratchDir, ctx.Repo.FullName, strconv.Itoa(ctx.Pull.Num)) - planDir := filepath.Join(repoDir, planSubDir) - project := models.NewProject(ctx.Repo.FullName, planSubDir) - execPath := NewExecutionPath(planDir, planSubDir) +func (a *ApplyExecutor) apply(ctx *CommandContext, repoDir string, plan plan.Plan) PathResult { var config Config var remoteStatePath string // check if config file is found, if not we continue the run - if config.Exists(execPath.Absolute) { - ctx.Log.Info("Config file found in %s", execPath.Absolute) - err := config.Read(execPath.Absolute) + projectAbsolutePath := filepath.Dir(plan.LocalPath) + if config.Exists(projectAbsolutePath) { + ctx.Log.Info("Config file found in %s", projectAbsolutePath) + err := config.Read(projectAbsolutePath) if err != nil { msg := fmt.Sprintf("Error reading config file: %v", err) ctx.Log.Err(msg) return PathResult{ - Status: "error", + Status: Error, Result: GeneralError{errors.New(msg)}, } } // need to use the remote state path and backend to do remote configure - err = PreRun(&config, ctx.Log, execPath.Absolute, ctx.Command) + err = PreRun(&config, ctx.Log, projectAbsolutePath, ctx.Command) if err != nil { msg := fmt.Sprintf("pre run failed: %v", err) ctx.Log.Err(msg) return PathResult{ - Status: "error", + Status: Error, Result: GeneralError{errors.New(msg)}, } } @@ -156,12 +133,12 @@ func (a *ApplyExecutor) apply(ctx *CommandContext, planPath string) PathResult { // NOTE: THIS CODE IS TO SUPPORT TERRAFORM PROJECTS THAT AREN'T USING ATLANTIS CONFIG FILE. if config.StashPath == "" { // configure remote state - statePath, err := a.terraform.ConfigureRemoteState(ctx.Log, repoDir, project, ctx.Command.environment, a.sshKey) + statePath, err := a.terraform.ConfigureRemoteState(ctx.Log, repoDir, plan.Project, ctx.Command.environment, a.sshKey) if err != nil { msg := fmt.Sprintf("failed to set up remote state: %v", err) ctx.Log.Err(msg) return PathResult{ - Status: "error", + Status: Error, Result: GeneralError{errors.New(msg)}, } } @@ -177,16 +154,16 @@ func (a *ApplyExecutor) apply(ctx *CommandContext, planPath string) PathResult { tfEnv = "default" } - lockAttempt, err := a.lockingClient.TryLock(project, tfEnv, ctx.Pull.Num) + lockAttempt, err := a.lockingClient.TryLock(plan.Project, tfEnv, ctx.Pull.Num) if err != nil { return PathResult{ - Status: "error", + Status: Error, Result: GeneralError{fmt.Errorf("failed to acquire lock: %s", err)}, } } if lockAttempt.LockAcquired != true && lockAttempt.LockingPullNum != ctx.Pull.Num { return PathResult{ - Status: "error", + Status: Error, Result: GeneralError{fmt.Errorf("failed to acquire lock: lock held by pull request #%d", lockAttempt.LockingPullNum)}, } } @@ -199,7 +176,7 @@ func (a *ApplyExecutor) apply(ctx *CommandContext, planPath string) PathResult { if err != nil { ctx.Log.Err(err.Error()) return PathResult{ - Status: "error", + Status: Error, Result: GeneralError{err}, } } @@ -209,123 +186,58 @@ func (a *ApplyExecutor) apply(ctx *CommandContext, planPath string) PathResult { msg := fmt.Sprintf("failed to get assumed role credentials: %v", err) ctx.Log.Err(msg) return PathResult{ - Status: "error", + Status: Error, Result: GeneralError{errors.New(msg)}, } } - ctx.Log.Info("running apply from %q", execPath.Relative) - - terraformApplyCmdArgs, output, err := a.terraform.RunTerraformCommand(execPath.Absolute, []string{"apply", "-no-color", planPath}, []string{ + ctx.Log.Info("running apply from %q", plan.Project.Path) + terraformApplyCmdArgs, output, err := a.terraform.RunTerraformCommand(projectAbsolutePath, []string{"apply", "-no-color", plan.LocalPath}, []string{ fmt.Sprintf("AWS_ACCESS_KEY_ID=%s", credVals.AccessKeyID), fmt.Sprintf("AWS_SECRET_ACCESS_KEY=%s", credVals.SecretAccessKey), fmt.Sprintf("AWS_SESSION_TOKEN=%s", credVals.SessionToken), }) - //runLog = append(runLog, "```\n Apply output:\n", fmt.Sprintf("```bash\n%s\n", string(out[:]))) if err != nil { ctx.Log.Err("failed to apply: %v %s", err, output) return PathResult{ - Status: "failure", + Status: Failure, Result: ApplyFailure{Command: strings.Join(terraformApplyCmdArgs, " "), Output: output, ErrorMessage: err.Error()}, } } // clean up, delete local plan file - os.Remove(execPath.Absolute) // swallow errors, okay if we failed to delete + os.Remove(plan.LocalPath) // swallow errors, okay if we failed to delete return PathResult{ - Status: "success", + Status: Success, Result: ApplySuccess{output}, } } -func (a *ApplyExecutor) downloadPlans(repoFullName string, pullNum int, env string, outputDir string, awsConfig *AWSConfig, s3Bucket string) (planPaths []string, err error) { - awsSession, err := awsConfig.CreateAWSSession() - if err != nil { - return nil, fmt.Errorf("failed to assume role: %v", err) - } - - // now use the assumed role to download all the plans - s3Client := s3.New(awsSession) - - // this will be plans/owner/repo/owner_repo_1, may be more than one if there are subdirs or multiple envs - plansPath := fmt.Sprintf("plans/%s/%s_%d", repoFullName, strings.Replace(repoFullName, "/", "_", -1), pullNum) - list, err := s3Client.ListObjects(&s3.ListObjectsInput{Bucket: aws.String(s3Bucket), Prefix: &plansPath}) - if err != nil { - return nil, fmt.Errorf("failed to list plans in path: %v", err) - } - - for _, obj := range list.Contents { - planName := path.Base(*obj.Key) - // filter to plans for the right env, plan names have the format owner_repo_pullNum_optional_sub_dirs.tfvars.env - if !strings.HasSuffix(planName, env) { - continue - } - // will be something like /tmp/owner_repo_pullNum_optional_sub_dirs.tfvars.env - outputPath := fmt.Sprintf("%s/%s", outputDir, planName) - file, err := os.Create(outputPath) - if err != nil { - return nil, fmt.Errorf("failed to create file to write plan to: %v", err) - } - defer file.Close() - - downloader := s3manager.NewDownloader(awsSession) - _, err = downloader.Download(file, - &s3.GetObjectInput{ - Bucket: aws.String(s3Bucket), - Key: obj.Key, - }) - if err != nil { - return nil, fmt.Errorf("failed to download plan from s3: %v", err) - } - planPaths = append(planPaths, outputPath) +func (a *ApplyExecutor) updateGithubStatus(ctx *CommandContext, pathResults []PathResult) { + var statuses []Status + for _, p := range pathResults { + statuses = append(statuses, p.Status) } - return planPaths, nil + worst := WorstStatus(statuses) + a.github.UpdateStatus(ctx.Repo, ctx.Pull, worst, "Apply "+worst.String()) } -func (a *ApplyExecutor) determinePlanSubDir(planName string, pullNum int) string { - planDirRegex := fmt.Sprintf(`.*_%d_(.*?)\.`, pullNum) - regex := regexp.MustCompile(planDirRegex) // we assume this will compile - match := regex.FindStringSubmatch(planName) - if len(match) <= 1 { - return "." +func (a *ApplyExecutor) isApproved(ctx *CommandContext) (bool, ExecutionResult) { + if !a.requireApproval { + return false, ExecutionResult{} } - dirsStr := match[1] // in form dir_subdir_subsubdir - return filepath.Clean(strings.Replace(dirsStr, "_", "/", -1)) -} -func (a *ApplyExecutor) updateGithubStatus(ctx *CommandContext, pathResults []PathResult) { - // the status will be the worst result - worstResult := a.worstResult(pathResults) - if worstResult == "success" { - a.github.UpdateStatus(ctx.Repo, ctx.Pull, SuccessStatus, "Apply Succeeded") - } else if worstResult == "failure" { - a.github.UpdateStatus(ctx.Repo, ctx.Pull, FailureStatus, "Apply Failed") - } else { - a.github.UpdateStatus(ctx.Repo, ctx.Pull, ErrorStatus, "Apply Error") + ok, err := a.github.PullIsApproved(ctx.Repo, ctx.Pull) + if err != nil { + msg := fmt.Sprintf("failed to determine if pull request was approved: %v", err) + ctx.Log.Err(msg) + a.github.UpdateStatus(ctx.Repo, ctx.Pull, Error, "Apply Error") + return false, ExecutionResult{SetupError: GeneralError{errors.New(msg)}} } -} - -func (a *ApplyExecutor) worstResult(results []PathResult) string { - var worst string = "success" - for _, result := range results { - if result.Status == "error" { - return result.Status - } else if result.Status == "failure" { - worst = result.Status - } + if !ok { + ctx.Log.Info("pull request was not approved") + a.github.UpdateStatus(ctx.Repo, ctx.Pull, Failure, "Apply Failed") + return false, ExecutionResult{SetupFailure: PullNotApprovedFailure{}} } - return worst -} - -type ExecutionPath struct { - // Absolute is the full path on the OS where we will execute. - // Will never end with a '/'. - Absolute string - // Relative is the path relative to the repo root. - // Will never end with a '/'. - Relative string -} - -func NewExecutionPath(absolutePath string, relativePath string) ExecutionPath { - return ExecutionPath{filepath.Clean(absolutePath), filepath.Clean(relativePath)} + return true, ExecutionResult{} } diff --git a/server/command_handler.go b/server/command_handler.go new file mode 100644 index 0000000000..b41598e055 --- /dev/null +++ b/server/command_handler.go @@ -0,0 +1,74 @@ +package server + +import ( + "fmt" + "github.com/hootsuite/atlantis/logging" + "github.com/hootsuite/atlantis/recovery" +) + +type CommandHandler struct { + planExecutor *PlanExecutor + applyExecutor *ApplyExecutor + helpExecutor *HelpExecutor + githubClient *GithubClient + eventParser *EventParser + logger *logging.SimpleLogger +} + +type CommandType int + +const ( + Apply CommandType = iota + Plan + Help +) + +type Command struct { + verbose bool + environment string + commandType CommandType +} + +func (s *CommandHandler) ExecuteCommand(ctx *CommandContext) { + src := fmt.Sprintf("%s/pull/%d", ctx.Repo.FullName, ctx.Pull.Num) + // it'e safe to reuse the underlying logger e.logger.Log + ctx.Log = logging.NewSimpleLogger(src, s.logger.Log, true, s.logger.Level) + defer s.recover(ctx) + + // need to get additional data from the PR + ghPull, _, err := s.githubClient.GetPullRequest(ctx.Repo, ctx.Pull.Num) + if err != nil { + ctx.Log.Err("pull request data api call failed: %v", err) + return + } + pull, err := s.eventParser.ExtractPullData(ghPull) + if err != nil { + ctx.Log.Err("failed to extract required fields from comment data: %v", err) + return + } + ctx.Pull = pull + + switch ctx.Command.commandType { + case Plan: + s.planExecutor.execute(ctx, s.githubClient) + case Apply: + s.applyExecutor.execute(ctx, s.githubClient) + case Help: + s.helpExecutor.execute(ctx, s.githubClient) + default: + ctx.Log.Err("failed to determine desired command, neither plan nor apply") + } +} + +func (s *CommandHandler) SetDeleteLockURL(f func(id string) (url string)) { + s.planExecutor.DeleteLockURL = f +} + +// recover logs and creates a comment on the pull request for panics +func (s *CommandHandler) recover(ctx *CommandContext) { + if err := recover(); err != nil { + stack := recovery.Stack(3) + s.githubClient.CreateComment(ctx, fmt.Sprintf("**Error: goroutine panic. This is a bug.**\n```\n%s\n%s```", err, stack)) + ctx.Log.Err("PANIC: %s\n%s", err, stack) + } +} diff --git a/server/request_parser.go b/server/event_parser.go similarity index 79% rename from server/request_parser.go rename to server/event_parser.go index df9d24117c..4d4515246c 100644 --- a/server/request_parser.go +++ b/server/event_parser.go @@ -8,23 +8,9 @@ import ( "regexp" ) -type RequestParser struct{} +type EventParser struct{} -type CommandType int - -const ( - Apply CommandType = iota - Plan - Help -) - -type Command struct { - verbose bool - environment string - commandType CommandType -} - -func (r *RequestParser) DetermineCommand(comment *github.IssueCommentEvent) (*Command, error) { +func (e *EventParser) DetermineCommand(comment *github.IssueCommentEvent) (*Command, error) { // for legacy, also support "run" instead of atlantis atlantisCommentRegex := `^(?:run|atlantis) (plan|apply|help)([[:blank:]])?([a-zA-Z0-9_-]+)?\s*(--verbose)?$` runPlanMatcher := regexp.MustCompile(atlantisCommentRegex) @@ -62,7 +48,7 @@ func (r *RequestParser) DetermineCommand(comment *github.IssueCommentEvent) (*Co return command, nil } -func (r *RequestParser) ExtractCommentData(comment *github.IssueCommentEvent, ctx *CommandContext) error { +func (e *EventParser) ExtractCommentData(comment *github.IssueCommentEvent, ctx *CommandContext) error { repoFullName := comment.Repo.GetFullName() if repoFullName == "" { return errors.New("repository.full_name is null") @@ -110,38 +96,38 @@ func (r *RequestParser) ExtractCommentData(comment *github.IssueCommentEvent, ct return nil } -func (r *RequestParser) ExtractPullData(pull *github.PullRequest, params *CommandContext) error { +func (e *EventParser) ExtractPullData(pull *github.PullRequest) (models.PullRequest, error) { + var pullModel models.PullRequest commit := pull.Head.GetSHA() if commit == "" { - return errors.New("head.sha is null") + return pullModel, errors.New("head.sha is null") } base := pull.Base.GetSHA() if base == "" { - return errors.New("base.sha is null") + return pullModel, errors.New("base.sha is null") } pullLink := pull.GetHTMLURL() if pullLink == "" { - return errors.New("html_url is null") + return pullModel, errors.New("html_url is null") } branch := pull.Head.GetRef() if branch == "" { - return errors.New("head.ref is null") + return pullModel, errors.New("head.ref is null") } authorUsername := pull.User.GetLogin() if authorUsername == "" { - return errors.New("user.login is null") + return pullModel, errors.New("user.login is null") } num := pull.GetNumber() if num == 0 { - return errors.New("number is null") + return pullModel, errors.New("number is null") } - params.Pull = models.PullRequest{ + return models.PullRequest{ BaseCommit: base, Author: authorUsername, Branch: branch, HeadCommit: commit, Link: pullLink, Num: num, - } - return nil + }, nil } diff --git a/server/github_client.go b/server/github_client.go index 49c583f379..2b9d26a823 100644 --- a/server/github_client.go +++ b/server/github_client.go @@ -2,9 +2,9 @@ package server import ( "context" - "fmt" "github.com/google/go-github/github" "github.com/hootsuite/atlantis/models" + "github.com/pkg/errors" ) type GithubClient struct { @@ -14,16 +14,47 @@ type GithubClient struct { const ( statusContext = "Atlantis" - PendingStatus = "pending" - SuccessStatus = "success" - ErrorStatus = "error" - FailureStatus = "failure" ) -func (g *GithubClient) UpdateStatus(repo models.Repo, pull models.PullRequest, status string, description string) { - repoStatus := github.RepoStatus{State: github.String(status), Description: github.String(description), Context: github.String(statusContext)} +type Status int + +const ( + Pending Status = iota + Success + Failure + Error +) + +func (s Status) String() string { + switch s { + case Pending: + return "pending" + case Success: + return "success" + case Failure: + return "failure" + case Error: + return "error" + } + return "error" +} + +func WorstStatus(ss []Status) Status { + if len(ss) == 0 { + return Success + } + worst := Success + for _, s := range ss { + if s > worst { + worst = s + } + } + return worst +} + +func (g *GithubClient) UpdateStatus(repo models.Repo, pull models.PullRequest, status Status, description string) { + repoStatus := github.RepoStatus{State: github.String(status.String()), Description: github.String(description), Context: github.String(statusContext)} g.client.Repositories.CreateStatus(g.ctx, repo.Owner, repo.Name, pull.HeadCommit, &repoStatus) - // todo: deal with error updating status } // GetModifiedFiles returns the names of files that were modified in the pull request. @@ -46,22 +77,12 @@ func (g *GithubClient) CreateComment(ctx *CommandContext, comment string) error } func (g *GithubClient) PullIsApproved(repo models.Repo, pull models.PullRequest) (bool, error) { - // todo: move back to using g.client.PullRequests.ListReviews when we update our GitHub enterprise version - // to where we don't need to include the custom accept header - u := fmt.Sprintf("repos/%v/%v/pulls/%d/reviews", repo.Owner, repo.Name, pull.Num) - req, err := g.client.NewRequest("GET", u, nil) - if err != nil { - return false, err - } - req.Header.Set("Accept", "application/vnd.github.black-cat-preview+json") - - var reviews []*github.PullRequestReview - _, err = g.client.Do(g.ctx, req, &reviews) + reviews, _, err := g.client.PullRequests.ListReviews(g.ctx, repo.Owner, repo.Name, pull.Num, nil) if err != nil { - return false, fmt.Errorf("failed to retrieve reviews: %v", err) + return false, errors.Wrap(err, "getting reviews") } for _, review := range reviews { - if review != nil && review.State != nil && *review.State == "APPROVED" { + if review != nil && review.GetState() == "APPROVED" { return true, nil } } diff --git a/server/github_comment_renderer.go b/server/github_comment_renderer.go index ea1085ac1d..a5c6466040 100644 --- a/server/github_comment_renderer.go +++ b/server/github_comment_renderer.go @@ -144,7 +144,7 @@ func (g *GithubCommentRenderer) render(res ExecutionResult, log string, verbose } else { hasErrors := false for _, res := range res.PathResults { - if res.Status == "error" { + if res.Status == Error { hasErrors = true } } diff --git a/server/plan_executor.go b/server/plan_executor.go index a4f40f25c0..6abf9790a6 100644 --- a/server/plan_executor.go +++ b/server/plan_executor.go @@ -6,6 +6,7 @@ import ( "github.com/hootsuite/atlantis/locking" "github.com/hootsuite/atlantis/logging" "github.com/hootsuite/atlantis/models" + "github.com/hootsuite/atlantis/plan" "io/ioutil" "os" "os/exec" @@ -26,6 +27,7 @@ type PlanExecutor struct { lockingClient *locking.Client // DeleteLockURL is a function that given a lock id will return a url for deleting the lock DeleteLockURL func(id string) (url string) + planStorage plan.Backend } /** Result Types **/ @@ -77,7 +79,7 @@ func (p *PlanExecutor) execute(ctx *CommandContext, github *GithubClient) { } func (p *PlanExecutor) setupAndPlan(ctx *CommandContext) ExecutionResult { - p.github.UpdateStatus(ctx.Repo, ctx.Pull, "pending", "Planning...") + p.github.UpdateStatus(ctx.Repo, ctx.Pull, Pending, "Planning...") // todo: lock when cloning or somehow separate workspaces // clean the directory where we're going to clone @@ -100,7 +102,7 @@ func (p *PlanExecutor) setupAndPlan(ctx *CommandContext) ExecutionResult { if err != nil { errMsg := fmt.Sprintf("failed to create git ssh wrapper: %v", err) ctx.Log.Err(errMsg) - p.github.UpdateStatus(ctx.Repo, ctx.Pull, ErrorStatus, "Plan Error") + p.github.UpdateStatus(ctx.Repo, ctx.Pull, Error, "Plan Error") return ExecutionResult{SetupError: GeneralError{errors.New(errMsg)}} } @@ -115,7 +117,7 @@ func (p *PlanExecutor) setupAndPlan(ctx *CommandContext) ExecutionResult { if output, err := cloneCmd.CombinedOutput(); err != nil { errMsg := fmt.Sprintf("failed to clone repository %q: %v: %s", ctx.Repo.SSHURL, err, string(output)) ctx.Log.Err(errMsg) - p.github.UpdateStatus(ctx.Repo, ctx.Pull, ErrorStatus, "Plan Error") + p.github.UpdateStatus(ctx.Repo, ctx.Pull, Error, "Plan Error") return ExecutionResult{SetupError: GeneralError{errors.New(errMsg)}} } @@ -126,7 +128,7 @@ func (p *PlanExecutor) setupAndPlan(ctx *CommandContext) ExecutionResult { if err := checkoutCmd.Run(); err != nil { errMsg := fmt.Sprintf("failed to git checkout branch %q: %v", ctx.Pull.Branch, err) ctx.Log.Err(errMsg) - p.github.UpdateStatus(ctx.Repo, ctx.Pull, ErrorStatus, "Plan Error") + p.github.UpdateStatus(ctx.Repo, ctx.Pull, Error, "Plan Error") return ExecutionResult{SetupError: GeneralError{errors.New(errMsg)}} } @@ -135,13 +137,13 @@ func (p *PlanExecutor) setupAndPlan(ctx *CommandContext) ExecutionResult { if err != nil { errMsg := fmt.Sprintf("failed to retrieve list of modified files from GitHub: %v", err) ctx.Log.Err(errMsg) - p.github.UpdateStatus(ctx.Repo, ctx.Pull, ErrorStatus, "Plan Error") + p.github.UpdateStatus(ctx.Repo, ctx.Pull, Error, "Plan Error") return ExecutionResult{SetupError: GeneralError{errors.New(errMsg)}} } modifiedTerraformFiles := p.filterToTerraform(modifiedFiles) if len(modifiedTerraformFiles) == 0 { ctx.Log.Info("no modified terraform files found, exiting") - p.github.UpdateStatus(ctx.Repo, ctx.Pull, FailureStatus, "Plan Failed") + p.github.UpdateStatus(ctx.Repo, ctx.Pull, Failure, "Plan Failed") return ExecutionResult{SetupError: GeneralError{errors.New("Plan Failed: no modified terraform files found")}} } ctx.Log.Debug("Found %d modified terraform files: %v", len(modifiedTerraformFiles), modifiedTerraformFiles) @@ -149,7 +151,7 @@ func (p *PlanExecutor) setupAndPlan(ctx *CommandContext) ExecutionResult { projects := p.ModifiedProjects(ctx.Repo.FullName, modifiedTerraformFiles) if len(projects) == 0 { ctx.Log.Info("no Terraform projects were modified") - p.github.UpdateStatus(ctx.Repo, ctx.Pull, FailureStatus, "Plan Failed") + p.github.UpdateStatus(ctx.Repo, ctx.Pull, Failure, "Plan Failed") return ExecutionResult{SetupError: GeneralError{errors.New("Plan Failed: we determined that no terraform projects were modified")}} } @@ -157,19 +159,13 @@ func (p *PlanExecutor) setupAndPlan(ctx *CommandContext) ExecutionResult { if err := p.CleanWorkspace(ctx.Log, planFilesPrefix, p.scratchDir, cloneDir, projects); err != nil { errMsg := fmt.Sprintf("failed to clean workspace, aborting: %v", err) ctx.Log.Err(errMsg) - p.github.UpdateStatus(ctx.Repo, ctx.Pull, ErrorStatus, "Plan Error") + p.github.UpdateStatus(ctx.Repo, ctx.Pull, Error, "Plan Error") return ExecutionResult{SetupError: GeneralError{errors.New(errMsg)}} } - s3Client := NewS3Client(p.awsConfig, p.s3Bucket, "plans") var config Config - // run `terraform plan` in each plan path and collect the results planOutputs := []PathResult{} for _, project := range projects { - // todo: not sure it makes sense to be generating the output filename and plan name here - tfPlanFilename := p.GenerateOutputFilename(project, ctx.Command.environment) - tfPlanName := fmt.Sprintf("%s_%d%s", strings.Replace(ctx.Repo.FullName, "/", "_", -1), ctx.Pull.Num, tfPlanFilename) - s3Key := fmt.Sprintf("%s/%s", ctx.Repo.FullName, tfPlanName) // check if config file is found, if not we continue the run absolutePath := filepath.Join(cloneDir, project.Path) if config.Exists(absolutePath) { @@ -195,7 +191,7 @@ func (p *PlanExecutor) setupAndPlan(ctx *CommandContext) ExecutionResult { p.terraform.tfExecutableName = "terraform" } } - generatePlanResponse := p.plan(ctx, cloneDir, p.scratchDir, tfPlanName, s3Client, project, s3Key, p.sshKey, config.StashPath) + generatePlanResponse := p.plan(ctx, cloneDir, p.scratchDir, project, p.sshKey, config.StashPath) generatePlanResponse.Path = project.Path planOutputs = append(planOutputs, generatePlanResponse) } @@ -209,10 +205,7 @@ func (p *PlanExecutor) plan( ctx *CommandContext, repoDir string, planOutDir string, - tfPlanName string, - s3Client S3Client, project models.Project, - s3Key string, sshKey string, stashPath string) PathResult { ctx.Log.Info("generating plan for path %q", project.Path) @@ -222,7 +215,7 @@ func (p *PlanExecutor) plan( _, err := p.terraform.ConfigureRemoteState(ctx.Log, repoDir, project, ctx.Command.environment, sshKey) if err != nil { return PathResult{ - Status: "error", + Status: Error, Result: GeneralError{fmt.Errorf("failed to configure remote state: %s", err)}, } } @@ -235,7 +228,7 @@ func (p *PlanExecutor) plan( lockAttempt, err := p.lockingClient.TryLock(project, ctx.Command.environment, ctx.Pull.Num) if err != nil { return PathResult{ - Status: " failure", + Status: Failure, Result: GeneralError{fmt.Errorf("failed to lock state: %v", err)}, } } @@ -243,7 +236,7 @@ func (p *PlanExecutor) plan( // the run is locked unless the locking run is the same pull id as this run if lockAttempt.LockAcquired == false && lockAttempt.LockingPullNum != ctx.Pull.Num { return PathResult{ - Status: "failure", + Status: Failure, Result: RunLockedFailure{lockAttempt.LockingPullNum}, } } @@ -251,22 +244,20 @@ func (p *PlanExecutor) plan( // Run terraform plan ctx.Log.Info("running terraform plan in directory %q", project.Path) tfPlanCmd := []string{"plan", "-refresh", "-no-color"} - // Generate terraform plan filename - tfPlanOutputPath := filepath.Join(planOutDir, tfPlanName) - // Generate terraform plan arguments + planFile := filepath.Join(repoDir, project.Path, fmt.Sprintf("%s.tfplan", tfEnv)) if ctx.Command.environment != "" { tfEnvFileName := filepath.Join("env", ctx.Command.environment+".tfvars") if _, err := os.Stat(filepath.Join(repoDir, project.Path, tfEnvFileName)); err == nil { - tfPlanCmd = append(tfPlanCmd, "-var-file", tfEnvFileName, "-out", tfPlanOutputPath) + tfPlanCmd = append(tfPlanCmd, "-var-file", tfEnvFileName, "-out", planFile) } else { ctx.Log.Err("environment file %q not found", tfEnvFileName) return PathResult{ - Status: "failure", + Status: Failure, Result: EnvironmentFileNotFoundFailure{tfEnvFileName}, } } } else { - tfPlanCmd = append(tfPlanCmd, "-out", tfPlanOutputPath) + tfPlanCmd = append(tfPlanCmd, "-out", planFile) } // set pull request creator as the session name @@ -275,7 +266,7 @@ func (p *PlanExecutor) plan( if err != nil { ctx.Log.Err(err.Error()) return PathResult{ - Status: "error", + Status: Error, Result: GeneralError{err}, } } @@ -285,7 +276,7 @@ func (p *PlanExecutor) plan( err = fmt.Errorf("failed to get assumed role credentials: %v", err) ctx.Log.Err(err.Error()) return PathResult{ - Status: "error", + Status: Error, Result: GeneralError{err}, } } @@ -310,32 +301,31 @@ func (p *PlanExecutor) plan( ctx.Log.Err("error unlocking state: %v", err) } return PathResult{ - Status: "failure", + Status: Failure, Result: err, } } - // Upload plan to S3 - ctx.Log.Info("uploading plan to S3 with key %q", s3Key) - if err := UploadPlanFile(s3Client, s3Key, tfPlanOutputPath); err != nil { - err = fmt.Errorf("failed to upload to S3: %v", err) - ctx.Log.Err(err.Error()) + // Save the plan + if err := p.planStorage.SavePlan(planFile, project, tfEnv, ctx.Pull.Num); err != nil { + ctx.Log.Err("saving plan: %s", err) + // there was an error planning so unlock if err := p.lockingClient.Unlock(lockAttempt.LockKey); err != nil { - ctx.Log.Err("error unlocking state: %v", err) + ctx.Log.Err("error unlocking: %v", err) } return PathResult{ - Status: "error", + Status: Error, Result: GeneralError{err}, } } + ctx.Log.Info("saved plan successfully") + // Delete local plan file - planFilePath := fmt.Sprintf("%s/%s", planOutDir, tfPlanName) - ctx.Log.Info("deleting local plan file %q", planFilePath) - if err := os.Remove(planFilePath); err != nil { - ctx.Log.Err("failed to delete local plan file %q", planFilePath, err) - // todo: return an error + if err := os.Remove(planFile); err != nil { + ctx.Log.Err("failed to delete local plan file %q: %s", planFile, err) + // don't return an error since it should still be fine } return PathResult{ - Status: "success", + Status: Success, Result: PlanSuccess{ TerraformOutput: output, LockURL: p.DeleteLockURL(lockAttempt.LockKey), @@ -357,13 +347,6 @@ func (p *PlanExecutor) isInExcludeList(fileName string) bool { return strings.Contains(fileName, "terraform.tfstate") || strings.Contains(fileName, "terraform.tfstate.backup") || strings.Contains(fileName, "_modules") || strings.Contains(fileName, "modules") } -func (p *PlanExecutor) trimSuffix(s, suffix string) string { - if strings.HasSuffix(s, suffix) { - s = s[:len(s)-len(suffix)] - } - return s -} - // ModifiedProjects returns the list of Terraform projects that have been changed due to the // modified files func (p *PlanExecutor) ModifiedProjects(repoFullName string, modifiedFiles []string) []models.Project { @@ -416,53 +399,15 @@ func (p *PlanExecutor) CleanWorkspace(log *logging.SimpleLogger, deleteFilesPref return nil } -func (p *PlanExecutor) DeleteLocalPlanFile(path string) error { - return os.Remove(path) -} - -// GenerateOutputFilename determines the name of the plan that will be stored in s3 -// if we're executing inside a sub directory, there will be a leading underscore -func (p *PlanExecutor) GenerateOutputFilename(project models.Project, tfEnvName string) string { - prefix := "" - if project.Path != "." { - // If not executing at repo root, need to encode the sub dir in the name of the output file. - // We do this by substituting / for _ - // We also add an _ because this gets appended to a larger path - // todo: refactor the path handling so it's all in one place - prefix = "_" + strings.Replace(project.Path, "/", "_", -1) - } - suffix := "" - if tfEnvName != "" { - suffix = "." + tfEnvName - } - - return prefix + ".tfplan" + suffix -} - func generateStatePath(path string, tfEnvName string) string { return strings.Replace(path, "$ENVIRONMENT", tfEnvName, -1) } -func (p *PlanExecutor) updateGithubStatus(ctx *CommandContext, pathResults []PathResult) { - // the status will be the worst result - worstResult := p.worstResult(pathResults) - if worstResult == "success" { - p.github.UpdateStatus(ctx.Repo, ctx.Pull, SuccessStatus, "Plan Succeeded") - } else if worstResult == "failure" { - p.github.UpdateStatus(ctx.Repo, ctx.Pull, FailureStatus, "Plan Failed") - } else { - p.github.UpdateStatus(ctx.Repo, ctx.Pull, ErrorStatus, "Plan Error") - } -} - -func (p *PlanExecutor) worstResult(results []PathResult) string { - var worst string = "success" - for _, result := range results { - if result.Status == "error" { - return result.Status - } else if result.Status == "failure" { - worst = result.Status - } +func (a *PlanExecutor) updateGithubStatus(ctx *CommandContext, pathResults []PathResult) { + var statuses []Status + for _, p := range pathResults { + statuses = append(statuses, p.Status) } - return worst + worst := WorstStatus(statuses) + a.github.UpdateStatus(ctx.Repo, ctx.Pull, worst, "Plan "+worst.String()) } diff --git a/server/plan_executor_test.go b/server/plan_executor_test.go index 47548c17b8..53b9589e1c 100644 --- a/server/plan_executor_test.go +++ b/server/plan_executor_test.go @@ -1,7 +1,6 @@ package server import ( - "github.com/hootsuite/atlantis/models" . "github.com/hootsuite/atlantis/testing_util" "testing" ) @@ -26,16 +25,3 @@ func runTest(t *testing.T, testDescrip string, filesChanged []string, expectedPa Equals(t, expectedPaths[i], p.Path) } } - -func TestGenerateOutputFilename(t *testing.T) { - runTestGetOutputFilename(t, "should handle root", ".", "env", ".tfplan.env") - runTestGetOutputFilename(t, "should handle empty environment", ".", "", ".tfplan") - runTestGetOutputFilename(t, "should prepend underscore on relative paths", "a/b", "", "_a_b.tfplan") - runTestGetOutputFilename(t, "should prepend underscore on relative paths and env", "a/b", "env", "_a_b.tfplan.env") -} - -func runTestGetOutputFilename(t *testing.T, testDescrip string, path string, env string, expected string) { - t.Log(testDescrip) - outputFileName := p.GenerateOutputFilename(models.NewProject("owner/repo", path), env) - Equals(t, expected, outputFileName) -} diff --git a/server/s3_client.go b/server/s3_client.go deleted file mode 100644 index a1222cc5b4..0000000000 --- a/server/s3_client.go +++ /dev/null @@ -1,88 +0,0 @@ -package server - -import ( - "bytes" - "fmt" - "net/http" - "os" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" -) - -type S3Client struct { - bucketName string - prefix string - client *s3.S3 - err error -} - -// Setup s3 info -func (s *S3Client) SetupS3(bucketName string, prefix string, client *s3.S3, err error) { - s.bucketName = bucketName - s.prefix = prefix - s.client = client - s.err = err -} - -// Get s3 info -func (s S3Client) GetS3Info() S3Client { - return s -} - -// Initialize s3 client -func NewS3Client(awsConfig *AWSConfig, bucketName string, prefix string) S3Client { - _s3 := S3Client{} - - awsSession, err := awsConfig.CreateAWSSession() - if err != nil { - _s3.err = err - return _s3.GetS3Info() - } - - // now use the assumed role to connect to s3 - s3Client := s3.New(awsSession) - - _s3.SetupS3(bucketName, prefix, s3Client, nil) - - return _s3.GetS3Info() -} - -// todo: make OO -func UploadPlanFile(s S3Client, key string, outputFilePath string) error { - file, err := os.Open(outputFilePath) - if err != nil { - return fmt.Errorf("failed to open file %q: %v", outputFilePath, err) - } - - defer file.Close() - - fileInfo, _ := file.Stat() - var size int64 = fileInfo.Size() - - buffer := make([]byte, size) - // read file content to buffer - file.Read(buffer) - fileBytes := bytes.NewReader(buffer) - fileType := http.DetectContentType(buffer) - - keyWithPrefix := fmt.Sprintf("%s/%s", s.prefix, key) - - // Create put object - params := &s3.PutObjectInput{ - Bucket: aws.String(s.bucketName), // required - Key: aws.String(keyWithPrefix), // required - Body: fileBytes, - ContentLength: aws.Int64(size), - ContentType: aws.String(fileType), - Metadata: map[string]*string{ - "Key": aws.String("MetadataValue"), //required //todo: I don't think this is required - }, - } - - _, err = s.client.PutObject(params) - if err != nil { - return fmt.Errorf("failed to upload plan file to S3: %v", err) - } - return nil -} diff --git a/server/server.go b/server/server.go index 93345b260d..57eeff014b 100644 --- a/server/server.go +++ b/server/server.go @@ -2,7 +2,6 @@ package server import ( "context" - "encoding/json" "fmt" "log" "net/http" @@ -10,9 +9,9 @@ import ( "os" "strings" - "io/ioutil" "time" + "github.com/aws/aws-sdk-go/aws/session" "github.com/elazarl/go-bindata-assetfs" "github.com/google/go-github/github" "github.com/gorilla/mux" @@ -22,35 +21,32 @@ import ( "github.com/hootsuite/atlantis/logging" "github.com/hootsuite/atlantis/middleware" "github.com/hootsuite/atlantis/models" - "github.com/hootsuite/atlantis/recovery" + "github.com/hootsuite/atlantis/plan" + "github.com/hootsuite/atlantis/plan/file" + "github.com/hootsuite/atlantis/plan/s3" "github.com/pkg/errors" "github.com/urfave/cli" "github.com/urfave/negroni" + "io/ioutil" ) const ( deleteLockRoute = "delete-lock" LockingFileBackend = "file" LockingDynamoDBBackend = "dynamodb" + PlanFileBackend = "file" + PlanS3Backend = "s3" ) // Server listens for GitHub events and runs the necessary Atlantis command type Server struct { - router *mux.Router - port int - scratchDir string - awsRegion string - s3Bucket string - githubBaseClient *github.Client - githubClient *GithubClient - applyExecutor *ApplyExecutor - planExecutor *PlanExecutor - helpExecutor *HelpExecutor - logger *logging.SimpleLogger - githubComments *GithubCommentRenderer - requestParser *RequestParser - lockingClient *locking.Client - atlantisURL string + router *mux.Router + port int + commandHandler *CommandHandler + logger *logging.SimpleLogger + eventParser *EventParser + lockingClient *locking.Client + atlantisURL string } // the mapstructure tags correspond to flags in cmd/server.go @@ -66,8 +62,10 @@ type ServerConfig struct { LockingDynamoDBTable string `mapstructure:"locking-dynamodb-table"` LogLevel string `mapstructure:"log-level"` Port int `mapstructure:"port"` + PlanS3Bucket string `mapstructure:"plan-s3-bucket"` + PlanS3Prefix string `mapstructure:"plan-s3-prefix"` + PlanBackend string `mapstructure:"plan-backend"` RequireApproval bool `mapstructure:"require-approval"` - S3Bucket string `mapstructure:"s3-bucket"` SSHKey string `mapstructure:"ssh-key"` ScratchDir string `mapstructure:"scratch-dir"` } @@ -80,6 +78,7 @@ type CommandContext struct { Log *logging.SimpleLogger } +// todo: These structs have nothing to do with the server. Move to a different file/package #refactor type ExecutionResult struct { SetupError Templater SetupFailure Templater @@ -89,7 +88,7 @@ type ExecutionResult struct { type PathResult struct { Path string - Status string // todo: this should be an enum for success/error/failure + Status Status Result Templater } @@ -105,6 +104,8 @@ func (g GeneralError) Template() *CompiledTemplate { return GeneralErrorTmpl } +// todo: /end + func NewServer(config ServerConfig) (*Server, error) { tp := github.BasicAuthTransport{ Username: strings.TrimSpace(config.GitHubUser), @@ -126,13 +127,16 @@ func NewServer(config ServerConfig) (*Server, error) { AWSRegion: config.AWSRegion, AWSRoleArn: config.AssumeRole, } + + var awsSession *session.Session var lockingClient *locking.Client + var err error if config.LockingBackend == LockingDynamoDBBackend { - session, err := awsConfig.CreateAWSSession() + awsSession, err = awsConfig.CreateAWSSession() if err != nil { return nil, errors.Wrap(err, "creating aws session for DynamoDB") } - lockingClient = locking.NewClient(dynamodb.New(config.LockingDynamoDBTable, session)) + lockingClient = locking.NewClient(dynamodb.New(config.LockingDynamoDBTable, awsSession)) } else { backend, err := boltdb.New(config.DataDir) if err != nil { @@ -140,46 +144,62 @@ func NewServer(config ServerConfig) (*Server, error) { } lockingClient = locking.NewClient(backend) } + var planBackend plan.Backend + if config.PlanBackend == PlanS3Backend { + if awsSession == nil { + awsSession, err = awsConfig.CreateAWSSession() + if err != nil { + return nil, errors.Wrap(err, "creating aws session for S3") + } + } + planBackend = s3.New(awsSession, config.PlanS3Bucket, config.PlanS3Prefix) + } else { + planBackend, err = file.New(config.DataDir) + if err != nil { + return nil, errors.Wrap(err, "creating file backend for plans") + } + } applyExecutor := &ApplyExecutor{ github: githubClient, awsConfig: awsConfig, scratchDir: config.ScratchDir, - s3Bucket: config.S3Bucket, sshKey: config.SSHKey, terraform: terraformClient, githubCommentRenderer: githubComments, lockingClient: lockingClient, requireApproval: config.RequireApproval, + planStorage: planBackend, } planExecutor := &PlanExecutor{ github: githubClient, awsConfig: awsConfig, scratchDir: config.ScratchDir, - s3Bucket: config.S3Bucket, sshKey: config.SSHKey, terraform: terraformClient, githubCommentRenderer: githubComments, lockingClient: lockingClient, + planStorage: planBackend, } helpExecutor := &HelpExecutor{} logger := logging.NewSimpleLogger("server", log.New(os.Stderr, "", log.LstdFlags), false, logging.ToLogLevel(config.LogLevel)) + eventParser := &EventParser{} + commandHandler := &CommandHandler{ + applyExecutor: applyExecutor, + planExecutor: planExecutor, + helpExecutor: helpExecutor, + eventParser: eventParser, + githubClient: githubClient, + logger: logger, + } router := mux.NewRouter() return &Server{ - router: router, - port: config.Port, - scratchDir: config.ScratchDir, - awsRegion: config.AWSRegion, - s3Bucket: config.S3Bucket, - applyExecutor: applyExecutor, - planExecutor: planExecutor, - helpExecutor: helpExecutor, - githubBaseClient: githubBaseClient, - githubClient: githubClient, - logger: logger, - githubComments: githubComments, - requestParser: &RequestParser{}, - lockingClient: lockingClient, - atlantisURL: config.AtlantisURL, + router: router, + port: config.Port, + commandHandler: commandHandler, + eventParser: eventParser, + logger: logger, + lockingClient: lockingClient, + atlantisURL: config.AtlantisURL, }, nil } @@ -198,11 +218,11 @@ func (s *Server) Start() error { // function that planExecutor can use to construct delete lock urls // injecting this here because this is the earliest routes are created - s.planExecutor.DeleteLockURL = func(lockID string) string { + s.commandHandler.SetDeleteLockURL(func(lockID string) string { // ignoring error since guaranteed to succeed if "id" is specified u, _ := deleteLockRoute.URL("id", url.QueryEscape(lockID)) return s.atlantisURL + u.RequestURI() - } + }) n := negroni.New(&negroni.Recovery{ Logger: log.New(os.Stdout, "", log.LstdFlags), PrintStack: false, @@ -264,33 +284,53 @@ func (s *Server) deleteLock(w http.ResponseWriter, r *http.Request) { func (s *Server) postHooks(w http.ResponseWriter, r *http.Request) { githubReqID := "X-Github-Delivery=" + r.Header.Get("X-Github-Delivery") - defer r.Body.Close() - bytes, err := ioutil.ReadAll(r.Body) - if err != nil { - w.WriteHeader(http.StatusBadRequest) - fmt.Fprintf(w, "could not read body: %s\n", err) - return - } + var payload []byte - // Try to unmarshal the request into the supported event types - var commentEvent github.IssueCommentEvent - var pullEvent github.PullRequestEvent - if json.Unmarshal(bytes, &commentEvent) == nil && s.isCommentCreatedEvent(commentEvent) { - s.logger.Debug("Handling comment event %s", githubReqID) - s.handleCommentCreatedEvent(w, commentEvent, githubReqID) - } else if json.Unmarshal(bytes, &pullEvent) == nil && s.isPullClosedEvent(pullEvent) { - s.logger.Debug("Handling pull request event %s", githubReqID) - s.handlePullClosedEvent(w, pullEvent, githubReqID) + // webhook requests can either be application/json or application/x-www-form-urlencoded + if r.Header.Get("Content-Type") == "application/x-www-form-urlencoded" { + // GitHub stores the json payload as a form value + payloadForm := r.PostFormValue("payload") + if payloadForm == "" { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprint(w, "request did not contain expected 'payload' form value") + return + + } + payload = []byte(payloadForm) } else { + // else read it as json + defer r.Body.Close() + var err error + payload, err = ioutil.ReadAll(r.Body) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "could not read body: %s", err) + return + } + } + + event, _ := github.ParseWebHook(github.WebHookType(r), payload) + switch event := event.(type) { + case *github.IssueCommentEvent: + s.handleCommentEvent(w, event, githubReqID) + case *github.PullRequestEvent: + s.handlePullRequestEvent(w, event, githubReqID) + default: s.logger.Debug("Ignoring unsupported event %s", githubReqID) fmt.Fprintln(w, "Ignoring") } } -// handlePullClosedEvent will delete any locks associated with the pull request -func (s *Server) handlePullClosedEvent(w http.ResponseWriter, pullEvent github.PullRequestEvent, githubReqID string) { - repo := *pullEvent.Repo.FullName - pullNum := *pullEvent.PullRequest.Number +// handlePullRequestEvent will delete any locks associated with the pull request +func (s *Server) handlePullRequestEvent(w http.ResponseWriter, pullEvent *github.PullRequestEvent, githubReqID string) { + if pullEvent.GetAction() != "closed" { + s.logger.Debug("Ignoring pull request event since action was not closed %s", githubReqID) + fmt.Fprintln(w, "Ignoring") + return + } + repo := pullEvent.Repo.GetFullName() + pullNum := pullEvent.PullRequest.GetNumber() + s.logger.Debug("Unlocking locks for repo %s and pull %d %s", repo, pullNum, githubReqID) err := s.lockingClient.UnlockByPull(repo, pullNum) if err != nil { @@ -302,10 +342,16 @@ func (s *Server) handlePullClosedEvent(w http.ResponseWriter, pullEvent github.P fmt.Fprintln(w, "Locks unlocked") } -func (s *Server) handleCommentCreatedEvent(w http.ResponseWriter, comment github.IssueCommentEvent, githubReqID string) { +func (s *Server) handleCommentEvent(w http.ResponseWriter, event *github.IssueCommentEvent, githubReqID string) { + if event.GetAction() != "created" { + s.logger.Debug("Ignoring comment event since action was not created %s", githubReqID) + fmt.Fprintln(w, "Ignoring") + return + } + // determine if the comment matches a plan or apply command ctx := &CommandContext{} - command, err := s.requestParser.DetermineCommand(&comment) + command, err := s.eventParser.DetermineCommand(event) if err != nil { s.logger.Debug("Ignoring request: %v %s", err, githubReqID) fmt.Fprintln(w, "Ignoring") @@ -313,58 +359,12 @@ func (s *Server) handleCommentCreatedEvent(w http.ResponseWriter, comment github } ctx.Command = command - if err = s.requestParser.ExtractCommentData(&comment, ctx); err != nil { + if err = s.eventParser.ExtractCommentData(event, ctx); err != nil { s.logger.Err("Failed parsing event: %v %s", err, githubReqID) fmt.Fprintln(w, "Ignoring") return } // respond with success and then actually execute the command asynchronously fmt.Fprintln(w, "Processing...") - go s.executeCommand(ctx) -} - -func (s *Server) executeCommand(ctx *CommandContext) { - src := fmt.Sprintf("%s/pull/%d", ctx.Repo.FullName, ctx.Pull.Num) - // it's safe to reuse the underlying logger s.logger.Log - ctx.Log = logging.NewSimpleLogger(src, s.logger.Log, true, s.logger.Level) - defer s.recover(ctx) - - // we've got data from the comment, now we need to get data from the actual PR - pull, _, err := s.githubClient.GetPullRequest(ctx.Repo, ctx.Pull.Num) - if err != nil { - ctx.Log.Err("pull request data api call failed: %v", err) - return - } - if err := s.requestParser.ExtractPullData(pull, ctx); err != nil { - ctx.Log.Err("failed to extract required fields from comment data: %v", err) - return - } - - switch ctx.Command.commandType { - case Plan: - s.planExecutor.execute(ctx, s.githubClient) - case Apply: - s.applyExecutor.execute(ctx, s.githubClient) - case Help: - s.helpExecutor.execute(ctx, s.githubClient) - default: - ctx.Log.Err("failed to determine desired command, neither plan nor apply") - } -} - -func (s *Server) isCommentCreatedEvent(event github.IssueCommentEvent) bool { - return event.Action != nil && *event.Action == "created" && event.Comment != nil -} - -func (s *Server) isPullClosedEvent(event github.PullRequestEvent) bool { - return event.Action != nil && *event.Action == "closed" && event.PullRequest != nil -} - -// recover logs and creates a comment on the pull request for panics -func (s *Server) recover(ctx *CommandContext) { - if err := recover(); err != nil { - stack := recovery.Stack(3) - s.githubClient.CreateComment(ctx, fmt.Sprintf("**Error: goroutine panic. This is a bug.**\n```\n%s\n%s```", err, stack)) - ctx.Log.Err("PANIC: %s\n%s", err, stack) - } + go s.commandHandler.ExecuteCommand(ctx) } diff --git a/server/terraform_client.go b/server/terraform_client.go index ee01d1b35c..42cf3c0571 100644 --- a/server/terraform_client.go +++ b/server/terraform_client.go @@ -67,7 +67,7 @@ func (t *TerraformClient) ConfigureRemoteState(log *logging.SimpleLogger, repoDi // Get remote state path from setup.sh output r, _ := regexp.Compile("REMOTE_STATE_PATH=([^ ]*.tfstate)") match := r.FindStringSubmatch(output) - // Store remote state path + // Backend remote state path remoteStatePath := match[1] log.Info("remote state path %q", remoteStatePath)