Skip to content

Commit

Permalink
Merge pull request #9761 from hashicorp/b-9758-enforce-policy-on-scale
Browse files Browse the repository at this point in the history
in Job.Scale, ensure that new count is within [min,max] configured in  scaling policy
  • Loading branch information
cgbaker authored Jan 8, 2021
2 parents cf4534c + cb1c018 commit d234000
Show file tree
Hide file tree
Showing 4 changed files with 59 additions and 1 deletion.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ BUG FIXES:
* consul/connect: Fixed a bug where in-place upgrade of Nomad client running Connect enabled jobs would panic [[GH-9738](https://github.com/hashicorp/nomad/issues/9738)]
* template: Fixed multiple issues in template src/dest and artifact dest interpolation [[GH-9671](https://github.com/hashicorp/nomad/issues/9671)]
* template: Fixed a bug where dynamic secrets did not trigger the template `change_mode` after a client restart. [[GH-9636](https://github.com/hashicorp/nomad/issues/9636)]
* scaling: Fixed a bug where job scaling endpoint did not enforce scaling policy min/max [[GH-9761](https://github.com/hashicorp/nomad/issues/9761)]
* server: Fixed a bug where new servers may bootstrap prematurely when configured with `bootstrap_expect = 0`.

## 1.0.1 (December 16, 2020)
Expand Down
2 changes: 1 addition & 1 deletion api/util_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ func testJobWithScalingPolicy() *Job {
job.TaskGroups[0].Scaling = &ScalingPolicy{
Policy: map[string]interface{}{},
Min: int64ToPtr(1),
Max: int64ToPtr(1),
Max: int64ToPtr(5),
Enabled: boolToPtr(true),
}
return job
Expand Down
14 changes: 14 additions & 0 deletions nomad/job_endpoint.go
Original file line number Diff line number Diff line change
Expand Up @@ -1039,6 +1039,20 @@ func (j *Job) Scale(args *structs.JobScaleRequest, reply *structs.JobRegisterRes
prevCount := found.Count
if args.Count != nil {

// if there is a scaling policy, check that the new count is within bounds
if found.Scaling != nil {
if *args.Count < found.Scaling.Min {
return structs.NewErrRPCCoded(400,
fmt.Sprintf("group count was less than scaling policy minimum: %d < %d",
*args.Count, found.Scaling.Min))
}
if found.Scaling.Max < *args.Count {
return structs.NewErrRPCCoded(400,
fmt.Sprintf("group count was greater than scaling policy maximum: %d > %d",
*args.Count, found.Scaling.Max))
}
}

// Lookup the latest deployment, to see whether this scaling event should be blocked
d, err := snap.LatestDeploymentByJobID(ws, namespace, args.JobID)
if err != nil {
Expand Down
43 changes: 43 additions & 0 deletions nomad/job_endpoint_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6705,6 +6705,49 @@ func TestJobEndpoint_Scale_Invalid(t *testing.T) {
require.Contains(err.Error(), "should not contain count if error is true")
}

func TestJobEndpoint_Scale_OutOfBounds(t *testing.T) {
t.Parallel()
require := require.New(t)

s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()

job, pol := mock.JobWithScalingPolicy()
pol.Min = 3
pol.Max = 10
job.TaskGroups[0].Count = 5

// register the job
err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, job)
require.Nil(err)

var resp structs.JobRegisterResponse
scale := &structs.JobScaleRequest{
JobID: job.ID,
Target: map[string]string{
structs.ScalingTargetGroup: job.TaskGroups[0].Name,
},
Count: helper.Int64ToPtr(pol.Max + 1),
Message: "out of bounds",
PolicyOverride: false,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
err = msgpackrpc.CallWithCodec(codec, "Job.Scale", scale, &resp)
require.Error(err)
require.Contains(err.Error(), "group count was greater than scaling policy maximum: 11 > 10")

scale.Count = helper.Int64ToPtr(2)
err = msgpackrpc.CallWithCodec(codec, "Job.Scale", scale, &resp)
require.Error(err)
require.Contains(err.Error(), "group count was less than scaling policy minimum: 2 < 3")
}

func TestJobEndpoint_Scale_NoEval(t *testing.T) {
t.Parallel()
require := require.New(t)
Expand Down

0 comments on commit d234000

Please sign in to comment.