diff --git a/.changelog/11426.txt b/.changelog/11426.txt new file mode 100644 index 00000000000..88557dfe1a1 --- /dev/null +++ b/.changelog/11426.txt @@ -0,0 +1,3 @@ +```release-note:bug +rpc: Set the job deregistration eval priority to the job priority +``` diff --git a/nomad/job_endpoint.go b/nomad/job_endpoint.go index 531bc362b87..07d992467c7 100644 --- a/nomad/job_endpoint.go +++ b/nomad/job_endpoint.go @@ -829,12 +829,26 @@ func (j *Job) Deregister(args *structs.JobDeregisterRequest, reply *structs.JobD // priority even if the job was. now := time.Now().UnixNano() + // Set our default priority initially, but update this to that configured + // within the job if possible. It is reasonable from a user perspective + // that jobs with a higher priority have their deregister evaluated before + // those of a lower priority. + // + // Alternatively, the previous behaviour was to set the eval priority to + // the default value. Jobs with a lower than default register priority + // would therefore have their deregister eval priorities higher than + // expected. + priority := structs.JobDefaultPriority + if job != nil { + priority = job.Priority + } + // If the job is periodic or parameterized, we don't create an eval. if job == nil || !(job.IsPeriodic() || job.IsParameterized()) { eval = &structs.Evaluation{ ID: uuid.Generate(), Namespace: args.RequestNamespace(), - Priority: structs.JobDefaultPriority, + Priority: priority, Type: structs.JobTypeService, TriggeredBy: structs.EvalTriggerJobDeregister, JobID: args.JobID, diff --git a/nomad/job_endpoint_test.go b/nomad/job_endpoint_test.go index 235882fe537..33bbdad78bf 100644 --- a/nomad/job_endpoint_test.go +++ b/nomad/job_endpoint_test.go @@ -3825,6 +3825,56 @@ func TestJobEndpoint_BatchDeregister_ACL(t *testing.T) { require.NotEqual(validResp2.Index, 0) } +func TestJobEndpoint_Deregister_Priority(t *testing.T) { + t.Parallel() + requireAssertion := require.New(t) + + s1, cleanupS1 := TestServer(t, func(c *Config) { + c.NumSchedulers = 0 // Prevent automatic dequeue + }) + defer cleanupS1() + codec := rpcClient(t, s1) + testutil.WaitForLeader(t, s1.RPC) + fsmState := s1.fsm.State() + + // Create a job which a custom priority and register this. + job := mock.Job() + job.Priority = 90 + err := fsmState.UpsertJob(structs.MsgTypeTestSetup, 100, job) + requireAssertion.Nil(err) + + // Deregister. + dereg := &structs.JobDeregisterRequest{ + JobID: job.ID, + Purge: true, + WriteRequest: structs.WriteRequest{ + Region: "global", + Namespace: job.Namespace, + }, + } + var resp structs.JobDeregisterResponse + requireAssertion.Nil(msgpackrpc.CallWithCodec(codec, "Job.Deregister", dereg, &resp)) + requireAssertion.NotZero(resp.Index) + + // Check for the job in the FSM which should not be there as it was purged. + out, err := fsmState.JobByID(nil, job.Namespace, job.ID) + requireAssertion.Nil(err) + requireAssertion.Nil(out) + + // Lookup the evaluation + eval, err := fsmState.EvalByID(nil, resp.EvalID) + requireAssertion.Nil(err) + requireAssertion.NotNil(eval) + requireAssertion.EqualValues(resp.EvalCreateIndex, eval.CreateIndex) + requireAssertion.Equal(job.Priority, eval.Priority) + requireAssertion.Equal(job.Type, eval.Type) + requireAssertion.Equal(structs.EvalTriggerJobDeregister, eval.TriggeredBy) + requireAssertion.Equal(job.ID, eval.JobID) + requireAssertion.Equal(structs.EvalStatusPending, eval.Status) + requireAssertion.NotZero(eval.CreateTime) + requireAssertion.NotZero(eval.ModifyTime) +} + func TestJobEndpoint_GetJob(t *testing.T) { t.Parallel()