From 6e9f87c196de742c47f04a36c67772482e9b2383 Mon Sep 17 00:00:00 2001 From: robertdavidsmith <34475852+robertdavidsmith@users.noreply.github.com> Date: Fri, 22 Nov 2024 17:21:37 +0000 Subject: [PATCH 01/12] Scheduler: better diagnostics for unknown preemption reason (#4055) * Scheduler: better diagnostics for unknown preemption reason Signed-off-by: Robert Smith * fix Signed-off-by: Robert Smith --------- Signed-off-by: Robert Smith --- internal/scheduler/internaltypes/node.go | 82 +++++++++++++------ internal/scheduler/internaltypes/node_test.go | 10 +++ .../testfixtures/testfixtures.go | 24 ++++++ internal/scheduler/nodedb/nodedb_test.go | 5 +- internal/scheduler/nodedb/nodeidindex_test.go | 1 + .../scheduler/nodedb/nodematching_test.go | 2 + internal/scheduler/scheduling/context/job.go | 19 +++-- .../scheduler/scheduling/context/job_test.go | 17 ++-- internal/scheduler/scheduling/eviction.go | 2 +- .../preempting_queue_scheduler_test.go | 1 + .../scheduling/preemption_description.go | 4 +- .../scheduling/preemption_description_test.go | 35 ++++---- 12 files changed, 145 insertions(+), 57 deletions(-) create mode 100644 internal/scheduler/internaltypes/testfixtures/testfixtures.go diff --git a/internal/scheduler/internaltypes/node.go b/internal/scheduler/internaltypes/node.go index d91ed8f52c0..c1280577a6d 100644 --- a/internal/scheduler/internaltypes/node.go +++ b/internal/scheduler/internaltypes/node.go @@ -1,6 +1,7 @@ package internaltypes import ( + "fmt" "math" "github.com/pkg/errors" @@ -44,6 +45,8 @@ type Node struct { // Total space allocatable on this node totalResources ResourceList + unallocatableResources map[int32]ResourceList + // This field is set when inserting the Node into a NodeDb. Keys [][]byte @@ -92,6 +95,11 @@ func FromSchedulerObjectsNode(node *schedulerobjects.Node, } allocatableByPriority[EvictedPriority] = allocatableByPriority[minimumPriority] + unallocatableResources := map[int32]ResourceList{} + for p, u := range node.UnallocatableResources { + unallocatableResources[p] = resourceListFactory.FromJobResourceListIgnoreUnknown(u.Resources) + } + return CreateNode( node.Id, nodeType, @@ -102,6 +110,7 @@ func FromSchedulerObjectsNode(node *schedulerobjects.Node, taints, labels, resourceListFactory.FromNodeProto(totalResources.Resources), + unallocatableResources, allocatableByPriority, map[string]ResourceList{}, map[string]ResourceList{}, @@ -119,6 +128,7 @@ func CreateNode( taints []v1.Taint, labels map[string]string, totalResources ResourceList, + unallocatableResources map[int32]ResourceList, allocatableByPriority map[int32]ResourceList, allocatedByQueue map[string]ResourceList, allocatedByJobId map[string]ResourceList, @@ -126,20 +136,21 @@ func CreateNode( keys [][]byte, ) *Node { return &Node{ - id: id, - nodeType: nodeType, - index: index, - executor: executor, - name: name, - pool: pool, - taints: koTaint.DeepCopyTaints(taints), - labels: deepCopyLabels(labels), - totalResources: totalResources, - AllocatableByPriority: maps.Clone(allocatableByPriority), - AllocatedByQueue: maps.Clone(allocatedByQueue), - AllocatedByJobId: maps.Clone(allocatedByJobId), - EvictedJobRunIds: evictedJobRunIds, - Keys: keys, + id: id, + nodeType: nodeType, + index: index, + executor: executor, + name: name, + pool: pool, + taints: koTaint.DeepCopyTaints(taints), + labels: deepCopyLabels(labels), + totalResources: totalResources, + unallocatableResources: maps.Clone(unallocatableResources), + AllocatableByPriority: maps.Clone(allocatableByPriority), + AllocatedByQueue: maps.Clone(allocatedByQueue), + AllocatedByJobId: maps.Clone(allocatedByJobId), + EvictedJobRunIds: evictedJobRunIds, + Keys: keys, } } @@ -204,18 +215,23 @@ func (node *Node) GetTotalResources() ResourceList { return node.totalResources } +func (node *Node) GetUnallocatableResources() map[int32]ResourceList { + return maps.Clone(node.unallocatableResources) +} + func (node *Node) DeepCopyNilKeys() *Node { return &Node{ // private fields are immutable so a shallow copy is fine - id: node.id, - index: node.index, - executor: node.executor, - name: node.name, - pool: node.pool, - nodeType: node.nodeType, - taints: node.taints, - labels: node.labels, - totalResources: node.totalResources, + id: node.id, + index: node.index, + executor: node.executor, + name: node.name, + pool: node.pool, + nodeType: node.nodeType, + taints: node.taints, + labels: node.labels, + totalResources: node.totalResources, + unallocatableResources: node.unallocatableResources, // keys set to nil Keys: nil, @@ -228,6 +244,26 @@ func (node *Node) DeepCopyNilKeys() *Node { } } +func (node *Node) SummaryString() string { + if node == nil { + return "" + } + + result := fmt.Sprintf("Id: %s\n", node.id) + result += fmt.Sprintf("Index: %d\n", node.index) + result += fmt.Sprintf("Executor: %s\n", node.executor) + result += fmt.Sprintf("Name: %s\n", node.name) + result += fmt.Sprintf("Pool: %s\n", node.pool) + result += fmt.Sprintf("TotalResources: %s\n", node.totalResources.String()) + result += fmt.Sprintf("Labels: %v\n", node.labels) + result += fmt.Sprintf("Taints: %v\n", node.taints) + for p, u := range node.unallocatableResources { + result += fmt.Sprintf("Unallocatable at %d: %s\n", p, u.String()) + } + + return result +} + func deepCopyLabels(labels map[string]string) map[string]string { result := make(map[string]string, len(labels)) for k, v := range labels { diff --git a/internal/scheduler/internaltypes/node_test.go b/internal/scheduler/internaltypes/node_test.go index 58a8d6c446c..15c9320f16a 100644 --- a/internal/scheduler/internaltypes/node_test.go +++ b/internal/scheduler/internaltypes/node_test.go @@ -40,6 +40,14 @@ func TestNode(t *testing.T) { "memory": resource.MustParse("32Gi"), }, ) + unallocatableResources := map[int32]ResourceList{ + 1: resourceListFactory.FromJobResourceListIgnoreUnknown( + map[string]resource.Quantity{ + "cpu": resource.MustParse("8"), + "memory": resource.MustParse("16Gi"), + }, + ), + } allocatableByPriority := map[int32]ResourceList{ 1: resourceListFactory.FromNodeProto( map[string]resource.Quantity{ @@ -103,6 +111,7 @@ func TestNode(t *testing.T) { taints, labels, totalResources, + unallocatableResources, allocatableByPriority, allocatedByQueue, allocatedByJobId, @@ -119,6 +128,7 @@ func TestNode(t *testing.T) { assert.Equal(t, taints, node.GetTaints()) assert.Equal(t, labels, node.GetLabels()) assert.Equal(t, totalResources, node.GetTotalResources()) + assert.Equal(t, unallocatableResources, node.GetUnallocatableResources()) assert.Equal(t, allocatableByPriority, node.AllocatableByPriority) assert.Equal(t, allocatedByQueue, node.AllocatedByQueue) assert.Equal(t, allocatedByJobId, node.AllocatedByJobId) diff --git a/internal/scheduler/internaltypes/testfixtures/testfixtures.go b/internal/scheduler/internaltypes/testfixtures/testfixtures.go new file mode 100644 index 00000000000..9c8414a747d --- /dev/null +++ b/internal/scheduler/internaltypes/testfixtures/testfixtures.go @@ -0,0 +1,24 @@ +package testfixtures + +import ( + "github.com/armadaproject/armada/internal/scheduler/internaltypes" +) + +func TestSimpleNode(id string) *internaltypes.Node { + return internaltypes.CreateNode( + id, + nil, + 0, + "", + "", + "", + nil, + nil, + internaltypes.ResourceList{}, + nil, + nil, + nil, + nil, + nil, + nil) +} diff --git a/internal/scheduler/nodedb/nodedb_test.go b/internal/scheduler/nodedb/nodedb_test.go index 3ace44245c4..d72f8d5cbd5 100644 --- a/internal/scheduler/nodedb/nodedb_test.go +++ b/internal/scheduler/nodedb/nodedb_test.go @@ -14,6 +14,7 @@ import ( "github.com/armadaproject/armada/internal/common/util" schedulerconfig "github.com/armadaproject/armada/internal/scheduler/configuration" "github.com/armadaproject/armada/internal/scheduler/internaltypes" + ittestfixtures "github.com/armadaproject/armada/internal/scheduler/internaltypes/testfixtures" "github.com/armadaproject/armada/internal/scheduler/jobdb" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" "github.com/armadaproject/armada/internal/scheduler/scheduling/context" @@ -81,7 +82,7 @@ func TestSelectNodeForPod_NodeIdLabel_Success(t *testing.T) { jctxs := context.JobSchedulingContextsFromJobs(jobs) for _, jctx := range jctxs { txn := db.Txn(false) - jctx.SetAssignedNodeId(nodeId) + jctx.SetAssignedNode(ittestfixtures.TestSimpleNode(nodeId)) node, err := db.SelectNodeForJobWithTxn(txn, jctx) txn.Abort() require.NoError(t, err) @@ -106,7 +107,7 @@ func TestSelectNodeForPod_NodeIdLabel_Failure(t *testing.T) { jctxs := context.JobSchedulingContextsFromJobs(jobs) for _, jctx := range jctxs { txn := db.Txn(false) - jctx.SetAssignedNodeId("non-existent node") + jctx.SetAssignedNode(ittestfixtures.TestSimpleNode("non-existent node")) node, err := db.SelectNodeForJobWithTxn(txn, jctx) txn.Abort() if !assert.NoError(t, err) { diff --git a/internal/scheduler/nodedb/nodeidindex_test.go b/internal/scheduler/nodedb/nodeidindex_test.go index 024e19c130f..a36fb577633 100644 --- a/internal/scheduler/nodedb/nodeidindex_test.go +++ b/internal/scheduler/nodedb/nodeidindex_test.go @@ -56,6 +56,7 @@ func makeTestNode(id string) *internaltypes.Node { []v1.Taint{}, map[string]string{}, internaltypes.ResourceList{}, + nil, map[int32]internaltypes.ResourceList{}, map[string]internaltypes.ResourceList{}, map[string]internaltypes.ResourceList{}, diff --git a/internal/scheduler/nodedb/nodematching_test.go b/internal/scheduler/nodedb/nodematching_test.go index 9b1bdcfacff..0decf8d9fe4 100644 --- a/internal/scheduler/nodedb/nodematching_test.go +++ b/internal/scheduler/nodedb/nodematching_test.go @@ -662,6 +662,7 @@ func makeTestNodeTaintsLabels(taints []v1.Taint, labels map[string]string) *inte taints, labels, internaltypes.ResourceList{}, + nil, map[int32]internaltypes.ResourceList{}, map[string]internaltypes.ResourceList{}, map[string]internaltypes.ResourceList{}, @@ -685,6 +686,7 @@ func makeTestNodeResources(t *testing.T, allocatableByPriority map[int32]interna []v1.Taint{}, map[string]string{}, totalResources, + nil, allocatableByPriority, map[string]internaltypes.ResourceList{}, map[string]internaltypes.ResourceList{}, diff --git a/internal/scheduler/scheduling/context/job.go b/internal/scheduler/scheduling/context/job.go index e92167c3275..e9ad86ed71a 100644 --- a/internal/scheduler/scheduling/context/job.go +++ b/internal/scheduler/scheduling/context/job.go @@ -59,7 +59,7 @@ type JobSchedulingContext struct { GangInfo // This is the node the pod is assigned to. // This is only set for evicted jobs and is set alongside adding an additionalNodeSelector for the node - AssignedNodeId string + AssignedNode *internaltypes.Node // Id of job that preempted this pod PreemptingJobId string // Description of the cause of preemption @@ -109,14 +109,21 @@ func (jctx *JobSchedulingContext) Fail(unschedulableReason string) { } } +func (jctx *JobSchedulingContext) GetAssignedNode() *internaltypes.Node { + return jctx.AssignedNode +} + func (jctx *JobSchedulingContext) GetAssignedNodeId() string { - return jctx.AssignedNodeId + if jctx.AssignedNode == nil { + return "" + } + return jctx.AssignedNode.GetId() } -func (jctx *JobSchedulingContext) SetAssignedNodeId(assignedNodeId string) { - if assignedNodeId != "" { - jctx.AssignedNodeId = assignedNodeId - jctx.AddNodeSelector(schedulerconfig.NodeIdLabel, assignedNodeId) +func (jctx *JobSchedulingContext) SetAssignedNode(assignedNode *internaltypes.Node) { + if assignedNode != nil { + jctx.AssignedNode = assignedNode + jctx.AddNodeSelector(schedulerconfig.NodeIdLabel, assignedNode.GetId()) } } diff --git a/internal/scheduler/scheduling/context/job_test.go b/internal/scheduler/scheduling/context/job_test.go index 2d0ef0b0773..42999746039 100644 --- a/internal/scheduler/scheduling/context/job_test.go +++ b/internal/scheduler/scheduling/context/job_test.go @@ -6,21 +6,26 @@ import ( "github.com/stretchr/testify/assert" "github.com/armadaproject/armada/internal/scheduler/configuration" + ittestfixtures "github.com/armadaproject/armada/internal/scheduler/internaltypes/testfixtures" "github.com/armadaproject/armada/internal/scheduler/testfixtures" ) -func TestJobSchedulingContext_SetAssignedNodeId(t *testing.T) { +func TestJobSchedulingContext_SetAssignedNode(t *testing.T) { jctx := &JobSchedulingContext{} - assert.Equal(t, "", jctx.GetAssignedNodeId()) + assert.Nil(t, jctx.GetAssignedNode()) + assert.Empty(t, jctx.GetAssignedNodeId()) assert.Empty(t, jctx.AdditionalNodeSelectors) - // Will not add a node selector if input is empty - jctx.SetAssignedNodeId("") - assert.Equal(t, "", jctx.GetAssignedNodeId()) + // Will not add a node selector if input is nil + jctx.SetAssignedNode(nil) + assert.Nil(t, jctx.GetAssignedNode()) + assert.Empty(t, jctx.GetAssignedNodeId()) assert.Empty(t, jctx.AdditionalNodeSelectors) - jctx.SetAssignedNodeId("node1") + n := ittestfixtures.TestSimpleNode("node1") + jctx.SetAssignedNode(n) + assert.Equal(t, n, jctx.GetAssignedNode()) assert.Equal(t, "node1", jctx.GetAssignedNodeId()) assert.Len(t, jctx.AdditionalNodeSelectors, 1) assert.Equal(t, map[string]string{configuration.NodeIdLabel: "node1"}, jctx.AdditionalNodeSelectors) diff --git a/internal/scheduler/scheduling/eviction.go b/internal/scheduler/scheduling/eviction.go index eefbda16f2b..cdee7881bc9 100644 --- a/internal/scheduler/scheduling/eviction.go +++ b/internal/scheduler/scheduling/eviction.go @@ -192,7 +192,7 @@ func (evi *Evictor) Evict(ctx *armadacontext.Context, nodeDbTxn *memdb.Txn) (*Ev // TODO(albin): We can remove the checkOnlyDynamicRequirements flag in the nodeDb now that we've added the tolerations. jctx := schedulercontext.JobSchedulingContextFromJob(job) jctx.IsEvicted = true - jctx.SetAssignedNodeId(node.GetId()) + jctx.SetAssignedNode(node) evictedJctxsByJobId[job.Id()] = jctx jctx.AdditionalTolerations = append(jctx.AdditionalTolerations, node.GetTolerationsForTaints()...) diff --git a/internal/scheduler/scheduling/preempting_queue_scheduler_test.go b/internal/scheduler/scheduling/preempting_queue_scheduler_test.go index f8fdbac4ce8..8aae7c57230 100644 --- a/internal/scheduler/scheduling/preempting_queue_scheduler_test.go +++ b/internal/scheduler/scheduling/preempting_queue_scheduler_test.go @@ -2507,6 +2507,7 @@ func testNodeWithTaints(node *internaltypes.Node, taints []v1.Taint) *internalty taints, node.GetLabels(), node.GetTotalResources(), + nil, node.AllocatableByPriority, node.AllocatedByQueue, node.AllocatedByJobId, diff --git a/internal/scheduler/scheduling/preemption_description.go b/internal/scheduler/scheduling/preemption_description.go index 3ae3e595e38..781cfcfe80f 100644 --- a/internal/scheduler/scheduling/preemption_description.go +++ b/internal/scheduler/scheduling/preemption_description.go @@ -10,7 +10,7 @@ import ( ) const ( - unknownPreemptionCause = "Preempted by scheduler due to the job failing to reschedule - possibly node resource changed causing this job to be unschedulable" + unknownPreemptionCause = "Preempted by scheduler due to the job failing to reschedule - possibly node resource changed causing this job to be unschedulable\nNode Summary:\n%s" unknownGangPreemptionCause = "Preempted by scheduler due to the job failing to reschedule - possibly another job in the gang was preempted or the node resource changed causing this job to be unschedulable" fairSharePreemptionTemplate = "Preempted by scheduler using fair share preemption - preempting job %s" urgencyPreemptionTemplate = "Preempted by scheduler using urgency preemption - preempting job %s" @@ -45,7 +45,7 @@ func PopulatePreemptionDescriptions(preemptedJobs []*context.JobSchedulingContex if isGang { preemptedJctx.PreemptionDescription = fmt.Sprintf(unknownGangPreemptionCause) } else { - preemptedJctx.PreemptionDescription = fmt.Sprintf(unknownPreemptionCause) + preemptedJctx.PreemptionDescription = fmt.Sprintf(unknownPreemptionCause, preemptedJctx.GetAssignedNode().SummaryString()) } } else if len(potentialPreemptingJobs) == 1 { preemptedJctx.PreemptionDescription = fmt.Sprintf(urgencyPreemptionTemplate, potentialPreemptingJobs[0].JobId) diff --git a/internal/scheduler/scheduling/preemption_description_test.go b/internal/scheduler/scheduling/preemption_description_test.go index 0d70f3dad0a..952f3d75441 100644 --- a/internal/scheduler/scheduling/preemption_description_test.go +++ b/internal/scheduler/scheduling/preemption_description_test.go @@ -7,6 +7,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + ittestfixtures "github.com/armadaproject/armada/internal/scheduler/internaltypes/testfixtures" "github.com/armadaproject/armada/internal/scheduler/jobdb" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" "github.com/armadaproject/armada/internal/scheduler/scheduling/context" @@ -38,61 +39,61 @@ func TestPopulatePreemptionDescriptions(t *testing.T) { }{ "unknown cause - basic job": { preemptedJobContext: &context.JobSchedulingContext{ - JobId: "job-1", - AssignedNodeId: "node-3", - Job: makeJob(t, "job-1", false), + JobId: "job-1", + AssignedNode: ittestfixtures.TestSimpleNode("node-3"), + Job: makeJob(t, "job-1", false), }, expectedPreemptedJobContext: &context.JobSchedulingContext{ JobId: "job-1", - AssignedNodeId: "node-3", + AssignedNode: ittestfixtures.TestSimpleNode("node-3"), Job: makeJob(t, "job-1", false), - PreemptionDescription: unknownPreemptionCause, + PreemptionDescription: fmt.Sprintf(unknownPreemptionCause, ittestfixtures.TestSimpleNode("node-3").SummaryString()), }, }, "unknown cause - gang job": { preemptedJobContext: &context.JobSchedulingContext{ - JobId: "job-1", - AssignedNodeId: "node-3", - Job: makeJob(t, "job-1", true), + JobId: "job-1", + AssignedNode: ittestfixtures.TestSimpleNode("node-3"), + Job: makeJob(t, "job-1", true), }, expectedPreemptedJobContext: &context.JobSchedulingContext{ JobId: "job-1", - AssignedNodeId: "node-3", + AssignedNode: ittestfixtures.TestSimpleNode("node-3"), Job: makeJob(t, "job-1", true), PreemptionDescription: unknownGangPreemptionCause, }, }, "urgency preemption - single preempting job": { preemptedJobContext: &context.JobSchedulingContext{ - JobId: "job-1", - AssignedNodeId: "node-1", + JobId: "job-1", + AssignedNode: ittestfixtures.TestSimpleNode("node-1"), }, expectedPreemptedJobContext: &context.JobSchedulingContext{ JobId: "job-1", - AssignedNodeId: "node-1", + AssignedNode: ittestfixtures.TestSimpleNode("node-1"), PreemptionDescription: fmt.Sprintf(urgencyPreemptionTemplate, "job-2"), }, }, "urgency preemption - multiple preempting jobs": { preemptedJobContext: &context.JobSchedulingContext{ - JobId: "job-1", - AssignedNodeId: "node-2", + JobId: "job-1", + AssignedNode: ittestfixtures.TestSimpleNode("node-2"), }, expectedPreemptedJobContext: &context.JobSchedulingContext{ JobId: "job-1", - AssignedNodeId: "node-2", + AssignedNode: ittestfixtures.TestSimpleNode("node-2"), PreemptionDescription: fmt.Sprintf(urgencyPreemptionMultiJobTemplate, "job-3,job-4"), }, }, "fairshare": { preemptedJobContext: &context.JobSchedulingContext{ JobId: "job-1", - AssignedNodeId: "node-4", + AssignedNode: ittestfixtures.TestSimpleNode("node-4"), PreemptingJobId: "job-7", }, expectedPreemptedJobContext: &context.JobSchedulingContext{ JobId: "job-1", - AssignedNodeId: "node-4", + AssignedNode: ittestfixtures.TestSimpleNode("node-4"), PreemptingJobId: "job-7", PreemptionDescription: fmt.Sprintf(fairSharePreemptionTemplate, "job-7"), }, From 2db637202a6f9c99d2c9f77a973b0092e4b138f1 Mon Sep 17 00:00:00 2001 From: Martynas Asipauskas Date: Mon, 25 Nov 2024 09:19:28 +0000 Subject: [PATCH 02/12] Add new query api methods (#284) (#4056) Co-authored-by: Martynas Asipauskas --- client/python/armada_client/asyncio_client.py | 33 ++++++++++ client/python/armada_client/client.py | 32 ++++++++++ client/python/pyproject.toml | 2 +- docs/python_armada_client.md | 61 +++++++++++++++++++ 4 files changed, 127 insertions(+), 1 deletion(-) diff --git a/client/python/armada_client/asyncio_client.py b/client/python/armada_client/asyncio_client.py index 301e7923445..6217ec9df3e 100644 --- a/client/python/armada_client/asyncio_client.py +++ b/client/python/armada_client/asyncio_client.py @@ -202,6 +202,39 @@ async def get_job_status(self, job_ids: List[str]) -> job_pb2.JobStatusResponse: resp = await self.job_stub.GetJobStatus(req) return resp + async def get_job_status_by_external_job_uri( + self, queue: str, job_set_id: str, external_job_uri: str + ) -> job_pb2.JobDetailsResponse: + """ + Retrieves the status of a job based on externalJobUri annotation. + + :param queue: The name of the queue + :param job_set_id: The name of the job set (a grouping of jobs) + :param external_job_uri: externalJobUri annotation value + + :returns: The response from the server containing the job status. + :rtype: JobStatusResponse + """ + req = job_pb2.JobStatusUsingExternalJobUriRequest( + queue, job_set_id, external_job_uri + ) + resp = await self.job_stub.GetJobStatusUsingExternalJobUri(req) + return resp + + async def get_job_errors(self, job_ids: List[str]) -> job_pb2.JobErrorsResponse: + """ + Retrieves termination reason from query api. + + :param job_ids: A list of unique job identifiers. + :type job_ids: List[str] + + :returns: The response from the server containing the job errors. + :rtype: JobErrorsResponse + """ + req = job_pb2.JobErrorsRequest(job_ids=job_ids) + resp = await self.job_stub.GetJobErrors(req) + return resp + async def get_job_details(self, job_ids: List[str]) -> job_pb2.JobDetailsResponse: """ Asynchronously retrieves the details of a job from Armada. diff --git a/client/python/armada_client/client.py b/client/python/armada_client/client.py index 1cf36c7c2ed..efa0d71976c 100644 --- a/client/python/armada_client/client.py +++ b/client/python/armada_client/client.py @@ -177,6 +177,38 @@ def get_job_status(self, job_ids: List[str]) -> job_pb2.JobStatusResponse: req = job_pb2.JobStatusRequest(job_ids=job_ids) return self.job_stub.GetJobStatus(req) + def get_job_status_by_external_job_uri( + self, queue: str, job_set_id: str, external_job_uri: str + ) -> job_pb2.JobDetailsResponse: + """ + Retrieves the status of a job based on externalJobUri annotation. + + :param queue: The name of the queue + :param job_set_id: The name of the job set (a grouping of jobs) + :param external_job_uri: externalJobUri annotation value + + :returns: The response from the server containing the job status. + :rtype: JobStatusResponse + """ + req = job_pb2.JobStatusUsingExternalJobUriRequest( + queue, job_set_id, external_job_uri + ) + return self.job_stub.GetJobStatusUsingExternalJobUri(req) + + def get_job_errors(self, job_ids: List[str]) -> job_pb2.JobErrorsResponse: + """ + Retrieves termination reason from query api. + + :param queue: The name of the queue + :param job_set_id: The name of the job set (a grouping of jobs) + :param external_job_uri: externalJobUri annotation value + + :returns: The response from the server containing the job errors. + :rtype: JobErrorsResponse + """ + req = job_pb2.JobErrorsRequest(job_ids=job_ids) + return self.job_stub.GetJobErrors(req) + def get_job_details(self, job_ids: List[str]) -> job_pb2.JobDetailsResponse: """ Retrieves the details of a job from Armada. diff --git a/client/python/pyproject.toml b/client/python/pyproject.toml index 5b0952245f3..97ca4209e3e 100644 --- a/client/python/pyproject.toml +++ b/client/python/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "armada_client" -version = "0.4.6" +version = "0.4.7" description = "Armada gRPC API python client" readme = "README.md" requires-python = ">=3.7" diff --git a/docs/python_armada_client.md b/docs/python_armada_client.md index ce44f8741ab..44cc6ff9abd 100644 --- a/docs/python_armada_client.md +++ b/docs/python_armada_client.md @@ -277,6 +277,38 @@ Retrieves the details of a job from Armada. +#### get_job_errors(job_ids) +Retrieves termination reason from query api. + + +* **Parameters** + + + * **queue** – The name of the queue + + + * **job_set_id** – The name of the job set (a grouping of jobs) + + + * **external_job_uri** – externalJobUri annotation value + + + * **job_ids** (*List**[**str**]*) – + + + +* **Returns** + + The response from the server containing the job errors. + + + +* **Return type** + + JobErrorsResponse + + + #### get_job_events_stream(queue, job_set_id, from_message_id=None) Get event stream for a job set. @@ -362,6 +394,35 @@ Retrieves the status of a list of jobs from Armada. +#### get_job_status_by_external_job_uri(queue, job_set_id, external_job_uri) +Retrieves the status of a job based on externalJobUri annotation. + + +* **Parameters** + + + * **queue** (*str*) – The name of the queue + + + * **job_set_id** (*str*) – The name of the job set (a grouping of jobs) + + + * **external_job_uri** (*str*) – externalJobUri annotation value + + + +* **Returns** + + The response from the server containing the job status. + + + +* **Return type** + + JobStatusResponse + + + #### get_queue(name) Get the queue by name. From 39ea0d3f0444a290e498e083c9f8fe62b9a0ed0b Mon Sep 17 00:00:00 2001 From: Chris Martin Date: Mon, 25 Nov 2024 15:39:59 +0000 Subject: [PATCH 03/12] Fix Error categorisation logic (#4054) Signed-off-by: Chris Martin --- internal/scheduler/metrics/state_metrics.go | 5 ++- .../scheduler/metrics/state_metrics_test.go | 40 +++++++++++++++++++ 2 files changed, 43 insertions(+), 2 deletions(-) diff --git a/internal/scheduler/metrics/state_metrics.go b/internal/scheduler/metrics/state_metrics.go index faa10d50901..4babfb81295 100644 --- a/internal/scheduler/metrics/state_metrics.go +++ b/internal/scheduler/metrics/state_metrics.go @@ -90,7 +90,7 @@ func newJobStateMetrics(errorRegexes []*regexp.Regexp, trackedResourceNames []v1 ) jobErrorsByNode := prometheus.NewCounterVec( prometheus.CounterOpts{ - Name: prefix + "error_classification_by_node", + Name: prefix + "job_error_classification_by_node", Help: "Failed jobs ey error classification at the node level", }, []string{nodeLabel, poolLabel, clusterLabel, errorCategoryLabel, errorSubcategoryLabel}, @@ -188,7 +188,8 @@ func (m *jobStateMetrics) ReportStateTransitions( m.completedRunDurations.WithLabelValues(job.Queue(), run.Pool()).Observe(duration) jobRunError := jobRunErrorsByRunId[run.Id()] category, subCategory := m.failedCategoryAndSubCategoryFromJob(jobRunError) - m.jobErrorsByQueue.WithLabelValues(job.Queue(), run.Executor(), category, subCategory).Inc() + m.jobErrorsByQueue.WithLabelValues(job.Queue(), run.Pool(), category, subCategory).Inc() + m.jobErrorsByNode.WithLabelValues(run.NodeName(), run.Pool(), run.Executor(), category, subCategory).Inc() } if jst.Succeeded { duration, priorState := stateDuration(job, run, run.TerminatedTime()) diff --git a/internal/scheduler/metrics/state_metrics_test.go b/internal/scheduler/metrics/state_metrics_test.go index aed4d510892..820d1fe41fe 100644 --- a/internal/scheduler/metrics/state_metrics_test.go +++ b/internal/scheduler/metrics/state_metrics_test.go @@ -10,6 +10,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/testutil" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" "github.com/armadaproject/armada/internal/scheduler/jobdb" @@ -343,6 +344,45 @@ func TestReportJobStateTransitions(t *testing.T) { } } +func TestCategoriseErrors(t *testing.T) { + run := baseRun. + WithExecutor(testCluster). + WithNodeName(testNode). + WithPool(testPool) + + job := baseJob.WithUpdatedRun(run) + + r, err := regexp.Compile("generic pod error") + require.NoError(t, err) + + jobRunErrorsByRunId := map[string]*armadaevents.Error{ + run.Id(): { + Terminal: true, + Reason: &armadaevents.Error_PodError{ + PodError: &armadaevents.PodError{ + Message: "generic pod error", + }, + }, + }, + } + + jsts := []jobdb.JobStateTransitions{ + { + Job: job, + Failed: true, + }, + } + + metrics := newJobStateMetrics([]*regexp.Regexp{r}, []v1.ResourceName{"cpu"}, 12*time.Hour) + metrics.ReportStateTransitions(jsts, jobRunErrorsByRunId) + + actualjobErrorsByQueue := testutil.ToFloat64(metrics.jobErrorsByQueue.WithLabelValues(testQueue, testPool, "podError", "generic pod error")) + assert.InDelta(t, 1, actualjobErrorsByQueue, epsilon) + + actualjobErrorsByNode := testutil.ToFloat64(metrics.jobErrorsByNode.WithLabelValues(testNode, testPool, testCluster, "podError", "generic pod error")) + assert.InDelta(t, 1, actualjobErrorsByNode, epsilon) +} + func TestReset(t *testing.T) { byQueueLabels := []string{testQueue, testPool, "running", "pending"} byNodeLabels := []string{testNode, testPool, testCluster, "running", "pending"} From 7313fb818dce2d1eb4ac59a82d889b32a07ce2c0 Mon Sep 17 00:00:00 2001 From: Martynas Asipauskas Date: Mon, 25 Nov 2024 16:12:54 +0000 Subject: [PATCH 04/12] Airflow operator - raise structured exceptions and retrieve job termination reason Co-authored-by: Martynas Asipauskas --- third_party/airflow/armada/hooks.py | 12 +++++ .../airflow/armada/operators/armada.py | 19 +++++-- .../airflow/armada/operators/errors.py | 50 +++++++++++++++++++ third_party/airflow/pyproject.toml | 2 +- .../test/unit/operators/test_armada.py | 11 ++-- .../test/unit/operators/test_errors.py | 39 +++++++++++++++ 6 files changed, 122 insertions(+), 11 deletions(-) create mode 100644 third_party/airflow/armada/operators/errors.py create mode 100644 third_party/airflow/test/unit/operators/test_errors.py diff --git a/third_party/airflow/armada/hooks.py b/third_party/airflow/armada/hooks.py index c1ba6349b74..bf52a4cc6eb 100644 --- a/third_party/airflow/armada/hooks.py +++ b/third_party/airflow/armada/hooks.py @@ -92,6 +92,18 @@ def submit_job( return RunningJobContext(queue, job.job_id, job_set_id, DateTime.utcnow()) + @tenacity.retry( + wait=tenacity.wait_random_exponential(max=3), + stop=tenacity.stop_after_attempt(5), + reraise=True, + ) + @log_exceptions + def job_termination_reason(self, job_context: RunningJobContext) -> str: + resp = self.client.get_job_errors([job_context.job_id]) + job_error = resp.job_errors.get(job_context.job_id, "") + + return job_error or "" + @tenacity.retry( wait=tenacity.wait_random_exponential(max=3), stop=tenacity.stop_after_attempt(5), diff --git a/third_party/airflow/armada/operators/armada.py b/third_party/airflow/armada/operators/armada.py index 614de1ab8ff..8d38a65d260 100644 --- a/third_party/airflow/armada/operators/armada.py +++ b/third_party/airflow/armada/operators/armada.py @@ -26,7 +26,6 @@ import jinja2 import tenacity from airflow.configuration import conf -from airflow.exceptions import AirflowException from airflow.models import BaseOperator, BaseOperatorLink, XCom from airflow.models.taskinstancekey import TaskInstanceKey from airflow.serialization.serde import deserialize @@ -40,6 +39,7 @@ from google.protobuf.json_format import MessageToDict, ParseDict from pendulum import DateTime +from .errors import ArmadaOperatorJobFailedError from ..hooks import ArmadaHook from ..model import RunningJobContext from ..triggers import ArmadaPollJobTrigger @@ -349,9 +349,11 @@ def _running_job_terminated(self, context: RunningJobContext): f"job {context.job_id} terminated with state: {context.state.name}" ) if context.state != JobState.SUCCEEDED: - raise AirflowException( - f"job {context.job_id} did not succeed. " - f"Final status was {context.state.name}" + raise ArmadaOperatorJobFailedError( + context.armada_queue, + context.job_id, + context.state, + self.hook.job_termination_reason(context), ) def _not_acknowledged_within_timeout(self) -> bool: @@ -363,6 +365,13 @@ def _not_acknowledged_within_timeout(self) -> bool: return True return False + def _should_have_a_pod_in_k8s(self) -> bool: + return self.job_context.state in { + JobState.RUNNING, + JobState.FAILED, + JobState.SUCCEEDED, + } + @log_exceptions def _check_job_status_and_fetch_logs(self, context) -> None: self.job_context = self.hook.refresh_context( @@ -377,7 +386,7 @@ def _check_job_status_and_fetch_logs(self, context) -> None: self.job_context = self.hook.cancel_job(self.job_context) return - if self.job_context.cluster and self.container_logs: + if self._should_have_a_pod_in_k8s() and self.container_logs: try: last_log_time = self.pod_manager.fetch_container_logs( k8s_context=self.job_context.cluster, diff --git a/third_party/airflow/armada/operators/errors.py b/third_party/airflow/armada/operators/errors.py new file mode 100644 index 00000000000..bd50d8c84a8 --- /dev/null +++ b/third_party/airflow/armada/operators/errors.py @@ -0,0 +1,50 @@ +from airflow.exceptions import AirflowException + +from armada_client.typings import JobState + + +class ArmadaOperatorJobFailedError(AirflowException): + """ + Raised when an ArmadaOperator job has terminated unsuccessfully on Armada. + + :param job_id: The unique identifier of the job. + :type job_id: str + :param queue: The queue the job was submitted to. + :type queue: str + :param state: The termination state of the job. + :type state: TerminationState + :param reason: The termination reason, if provided. + :type reason: str + """ + + def __init__(self, queue: str, job_id: str, state: JobState, reason: str = ""): + self.job_id = job_id + self.queue = queue + self.state = state + self.reason = reason + self.message = self._generate_message() + super().__init__(self.message) + + def _generate_message(self) -> str: + """ + Generate a user-friendly error message. + + :return: Formatted error message with job details. + :rtype: str + """ + message = ( + f"ArmadaOperator job '{self.job_id}' in queue '{self.queue}'" + f" terminated with state '{self.state.name.capitalize()}'." + ) + if self.reason: + message += f" Termination reason: {self.reason}" + return message + + def __str__(self) -> str: + """ + Return the error message when the exception is converted to a string. + + :return: The error message. + :rtype: str + """ + return self.message diff --git a/third_party/airflow/pyproject.toml b/third_party/airflow/pyproject.toml index a4ae0607679..67c7b91f678 100644 --- a/third_party/airflow/pyproject.toml +++ b/third_party/airflow/pyproject.toml @@ -10,7 +10,7 @@ readme='README.md' authors = [{name = "Armada-GROSS", email = "armada@armadaproject.io"}] license = { text = "Apache Software License" } dependencies=[ - 'armada-client>=0.4.6', + 'armada-client>=0.4.7', 'apache-airflow>=2.6.3', 'types-protobuf==4.24.0.1', 'kubernetes>=23.6.0', diff --git a/third_party/airflow/test/unit/operators/test_armada.py b/third_party/airflow/test/unit/operators/test_armada.py index 4e3f8804e36..c8b3272db66 100644 --- a/third_party/airflow/test/unit/operators/test_armada.py +++ b/third_party/airflow/test/unit/operators/test_armada.py @@ -4,9 +4,10 @@ from unittest.mock import MagicMock, patch import pytest -from airflow.exceptions import AirflowException, TaskDeferred +from airflow.exceptions import TaskDeferred from armada.model import GrpcChannelArgs, RunningJobContext from armada.operators.armada import ArmadaOperator +from armada.operators.errors import ArmadaOperatorJobFailedError from armada.triggers import ArmadaPollJobTrigger from armada_client.armada.submit_pb2 import JobSubmitRequestItem from armada_client.typings import JobState @@ -166,12 +167,12 @@ def test_execute_fail(terminal_state, context): for s in [JobState.RUNNING, terminal_state] ] - with pytest.raises(AirflowException) as exec_info: + with pytest.raises(ArmadaOperatorJobFailedError) as exec_info: op.execute(context) # Error message contain terminal state and job id assert DEFAULT_JOB_ID in str(exec_info) - assert terminal_state.name in str(exec_info) + assert terminal_state.name.capitalize() in str(exec_info) op.hook.submit_job.assert_called_once_with( DEFAULT_QUEUE, DEFAULT_JOB_SET, op.job_request @@ -199,12 +200,12 @@ def test_not_acknowledged_within_timeout_terminates_running_job(context): op = operator(JobSubmitRequestItem(), job_acknowledgement_timeout_s=-1) op.hook.refresh_context.return_value = job_context - with pytest.raises(AirflowException) as exec_info: + with pytest.raises(ArmadaOperatorJobFailedError) as exec_info: op.execute(context) # Error message contain terminal state and job id assert DEFAULT_JOB_ID in str(exec_info) - assert JobState.CANCELLED.name in str(exec_info) + assert JobState.CANCELLED.name.capitalize() in str(exec_info) # We also cancel already submitted job op.hook.cancel_job.assert_called_once_with(job_context) diff --git a/third_party/airflow/test/unit/operators/test_errors.py b/third_party/airflow/test/unit/operators/test_errors.py new file mode 100644 index 00000000000..e6b950b5996 --- /dev/null +++ b/third_party/airflow/test/unit/operators/test_errors.py @@ -0,0 +1,39 @@ +import pytest +from armada_client.typings import JobState +from armada.operators.errors import ArmadaOperatorJobFailedError + + +def test_constructor(): + job_id = "test-job" + queue = "default-queue" + state = JobState.FAILED + reason = "Out of memory" + + error = ArmadaOperatorJobFailedError(queue, job_id, state, reason) + + assert error.job_id == job_id + assert error.queue == queue + assert error.state == state + assert error.reason == reason + + +@pytest.mark.parametrize( + "reason,expected_message", + [ + ( + "", + "ArmadaOperator job 'test-job' in queue 'default-queue' terminated " + "with state 'Failed'.", + ), + ( + "Out of memory", + "ArmadaOperator job 'test-job' in queue 'default-queue' terminated " + "with state 'Failed'. Termination reason: Out of memory", + ), + ], +) +def test_message(reason: str, expected_message: str): + error = ArmadaOperatorJobFailedError( + "default-queue", "test-job", JobState.FAILED, reason + ) + assert str(error) == expected_message From 594c4aa35e496faf708873aec79b045dc467378e Mon Sep 17 00:00:00 2001 From: Martynas Asipauskas Date: Tue, 26 Nov 2024 16:37:17 +0000 Subject: [PATCH 05/12] Airflow operator - reattach to running jobs on retries (disabled by default) --- client/python/armada_client/asyncio_client.py | 2 +- client/python/armada_client/client.py | 2 +- client/python/pyproject.toml | 2 +- third_party/airflow/armada/hooks.py | 62 +++++++++++++-- .../airflow/armada/operators/armada.py | 64 ++++++++++----- .../airflow/armada/policies/reattach.py | 54 +++++++++++++ third_party/airflow/armada/policies/retry.py | 0 third_party/airflow/armada/utils.py | 34 +++++++- .../test/unit/operators/test_armada.py | 4 +- .../test/unit/policies/test_reattach.py | 78 +++++++++++++++++++ 10 files changed, 270 insertions(+), 32 deletions(-) create mode 100644 third_party/airflow/armada/policies/reattach.py create mode 100644 third_party/airflow/armada/policies/retry.py create mode 100644 third_party/airflow/test/unit/policies/test_reattach.py diff --git a/client/python/armada_client/asyncio_client.py b/client/python/armada_client/asyncio_client.py index 6217ec9df3e..8b76379bedf 100644 --- a/client/python/armada_client/asyncio_client.py +++ b/client/python/armada_client/asyncio_client.py @@ -216,7 +216,7 @@ async def get_job_status_by_external_job_uri( :rtype: JobStatusResponse """ req = job_pb2.JobStatusUsingExternalJobUriRequest( - queue, job_set_id, external_job_uri + queue=queue, jobset=job_set_id, external_job_uri=external_job_uri ) resp = await self.job_stub.GetJobStatusUsingExternalJobUri(req) return resp diff --git a/client/python/armada_client/client.py b/client/python/armada_client/client.py index efa0d71976c..6cc96f81319 100644 --- a/client/python/armada_client/client.py +++ b/client/python/armada_client/client.py @@ -191,7 +191,7 @@ def get_job_status_by_external_job_uri( :rtype: JobStatusResponse """ req = job_pb2.JobStatusUsingExternalJobUriRequest( - queue, job_set_id, external_job_uri + queue=queue, jobset=job_set_id, external_job_uri=external_job_uri ) return self.job_stub.GetJobStatusUsingExternalJobUri(req) diff --git a/client/python/pyproject.toml b/client/python/pyproject.toml index 97ca4209e3e..c9f06f55ba7 100644 --- a/client/python/pyproject.toml +++ b/client/python/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "armada_client" -version = "0.4.7" +version = "0.4.8" description = "Armada gRPC API python client" readme = "README.md" requires-python = ">=3.7" diff --git a/third_party/airflow/armada/hooks.py b/third_party/airflow/armada/hooks.py index bf52a4cc6eb..425af511bc1 100644 --- a/third_party/airflow/armada/hooks.py +++ b/third_party/airflow/armada/hooks.py @@ -8,7 +8,7 @@ from airflow.models import TaskInstance from airflow.utils.log.logging_mixin import LoggingMixin from armada.model import GrpcChannelArgs -from armada_client.armada.job_pb2 import JobRunDetails +from armada_client.armada.job_pb2 import JobDetailsResponse, JobRunDetails from armada_client.armada.submit_pb2 import JobSubmitRequestItem from armada_client.client import ArmadaClient from armada_client.typings import JobState @@ -99,10 +99,55 @@ def submit_job( ) @log_exceptions def job_termination_reason(self, job_context: RunningJobContext) -> str: - resp = self.client.get_job_errors([job_context.job_id]) - job_error = resp.job_errors.get(job_context.job_id, "") + if job_context.state in { + JobState.REJECTED, + JobState.PREEMPTED, + JobState.FAILED, + }: + resp = self.client.get_job_errors([job_context.job_id]) + job_error = resp.job_errors.get(job_context.job_id, "") + return job_error or "" + return "" - return job_error or "" + @tenacity.retry( + wait=tenacity.wait_random_exponential(max=3), + stop=tenacity.stop_after_attempt(5), + reraise=True, + ) + @log_exceptions + def job_by_external_job_uri( + self, + armada_queue: str, + job_set: str, + external_job_uri: str, + ) -> RunningJobContext: + response = self.client.get_job_status_by_external_job_uri( + armada_queue, job_set, external_job_uri + ) + job_ids = list(response.job_states.keys()) + job_details = self.client.get_job_details(job_ids).job_details.values() + last_submitted = next( + iter( + sorted(job_details, key=lambda d: d.submitted_ts.seconds, reverse=True) + ), + None, + ) + if last_submitted: + cluster = None + latest_run = self._get_latest_job_run_details(last_submitted) + if latest_run: + cluster = latest_run.cluster + return RunningJobContext( + armada_queue, + last_submitted.job_id, + job_set, + DateTime.utcnow(), + last_log_time=None, + cluster=cluster, + job_state=JobState(last_submitted.state).name, + ) + + return None @tenacity.retry( wait=tenacity.wait_random_exponential(max=3), @@ -125,7 +170,9 @@ def refresh_context( if not cluster: # Job is running / or completed already if state == JobState.RUNNING or state.is_terminal(): - run_details = self._get_latest_job_run_details(job_context.job_id) + job_id = job_context.job_id + job_details = self.client.get_job_details([job_id]).job_details[job_id] + run_details = self._get_latest_job_run_details(job_details) if run_details: cluster = run_details.cluster return dataclasses.replace(job_context, job_state=state.name, cluster=cluster) @@ -167,8 +214,9 @@ def context_to_xcom( }, ) - def _get_latest_job_run_details(self, job_id) -> Optional[JobRunDetails]: - job_details = self.client.get_job_details([job_id]).job_details[job_id] + def _get_latest_job_run_details( + self, job_details: Optional[JobDetailsResponse] + ) -> Optional[JobRunDetails]: if job_details and job_details.latest_run_id: for run in job_details.job_runs: if run.run_id == job_details.latest_run_id: diff --git a/third_party/airflow/armada/operators/armada.py b/third_party/airflow/armada/operators/armada.py index 8d38a65d260..158a80cc479 100644 --- a/third_party/airflow/armada/operators/armada.py +++ b/third_party/airflow/armada/operators/armada.py @@ -26,7 +26,9 @@ import jinja2 import tenacity from airflow.configuration import conf +from airflow.exceptions import AirflowFailException from airflow.models import BaseOperator, BaseOperatorLink, XCom +from airflow.models.taskinstance import TaskInstance from airflow.models.taskinstancekey import TaskInstanceKey from airflow.serialization.serde import deserialize from airflow.utils.context import Context @@ -42,8 +44,9 @@ from .errors import ArmadaOperatorJobFailedError from ..hooks import ArmadaHook from ..model import RunningJobContext +from ..policies.reattach import external_job_uri, policy from ..triggers import ArmadaPollJobTrigger -from ..utils import log_exceptions, xcom_pull_for_ti +from ..utils import log_exceptions, xcom_pull_for_ti, resolve_parameter_value class LookoutLink(BaseOperatorLink): @@ -102,6 +105,8 @@ class ArmadaOperator(BaseOperator, LoggingMixin): :type job_acknowledgement_timeout: int :param dry_run: Run Operator in dry-run mode - render Armada request and terminate. :type dry_run: bool +:param reattach_policy: Operator reattach policy to use (defaults to: always) +:type reattach_policy: Optional[str] :param kwargs: Additional keyword arguments to pass to the BaseOperator. """ @@ -130,6 +135,7 @@ def __init__( dry_run: bool = conf.getboolean( "armada_operator", "default_dry_run", fallback=False ), + reattach_policy: Optional[str] = None, **kwargs, ) -> None: super().__init__(**kwargs) @@ -148,6 +154,15 @@ def __init__( self.dry_run = dry_run self.job_context = None + configured_reattach_policy: str = resolve_parameter_value( + "reattach_policy", reattach_policy, kwargs, "never" + ) + self.log.info( + f"Configured reattach policy to: '{configured_reattach_policy}'," + f" max retries: {self.retries}" + ) + self.reattach_policy = policy(configured_reattach_policy) + if self.container_logs and self.k8s_token_retriever is None: self.log.warning( "Token refresh mechanism not configured, airflow may stop retrieving " @@ -326,35 +341,52 @@ def _reattach_or_submit_job( def _try_reattach_to_running_job( self, context: Context ) -> Optional[RunningJobContext]: - # TODO: We should support re-attaching to currently running jobs. - # This is subject to re-attach policy / discovering jobs we already submitted. - # Issue - xcom state gets cleared before re-entry. - # ctx = self.hook.context_from_xcom(ti, re_attach=True) + # On first try we intentionally do not re-attach. + self.log.info(context) + if context["ti"].try_number == 1: + return None + + expected_job_uri = external_job_uri(context) + ctx = self.hook.job_by_external_job_uri( + self.armada_queue, self.job_set_id, expected_job_uri + ) - # if ctx: - # if ctx.state not in {JobState.FAILED, JobState.PREEMPTED}: + if ctx: + termination_reason = self.hook.job_termination_reason(ctx) + if self.reattach_policy(ctx.state, termination_reason): + return ctx + else: + self.log.info( + f"Found: job-id {ctx.job_id} in {ctx.state}. " + "Didn't reattach due to reattach policy." + ) return None - def _poll_for_termination(self, context) -> None: + def _poll_for_termination(self, context: Context) -> None: while self.job_context.state.is_active(): self._check_job_status_and_fetch_logs(context) if self.job_context.state.is_active(): self._yield() - self._running_job_terminated(self.job_context) + self._running_job_terminated(context["ti"], self.job_context) - def _running_job_terminated(self, context: RunningJobContext): + def _running_job_terminated(self, ti: TaskInstance, context: RunningJobContext): self.log.info( f"job {context.job_id} terminated with state: {context.state.name}" ) if context.state != JobState.SUCCEEDED: - raise ArmadaOperatorJobFailedError( + error = ArmadaOperatorJobFailedError( context.armada_queue, context.job_id, context.state, self.hook.job_termination_reason(context), ) + if self.reattach_policy(error.state, error.reason): + self.log.error(str(error)) + raise AirflowFailException() + else: + raise error def _not_acknowledged_within_timeout(self) -> bool: if self.job_context.state == JobState.UNKNOWN: @@ -416,14 +448,6 @@ def _xcom_push(self, context, key: str, value: Any): task_instance = context["ti"] task_instance.xcom_push(key=key, value=value) - def _external_job_uri(self, context: Context) -> str: - task_id = context["ti"].task_id - map_index = context["ti"].map_index - run_id = context["run_id"] - dag_id = context["dag"].dag_id - - return f"airflow://{dag_id}/{task_id}/{run_id}/{map_index}" - def _annotate_job_request(self, context, request: JobSubmitRequestItem): if "ANNOTATION_KEY_PREFIX" in os.environ: annotation_key_prefix = f'{os.environ.get("ANNOTATION_KEY_PREFIX")}' @@ -438,5 +462,5 @@ def _annotate_job_request(self, context, request: JobSubmitRequestItem): request.annotations[annotation_key_prefix + "taskRunId"] = run_id request.annotations[annotation_key_prefix + "dagId"] = dag_id request.annotations[annotation_key_prefix + "externalJobUri"] = ( - self._external_job_uri(context) + external_job_uri(context) ) diff --git a/third_party/airflow/armada/policies/reattach.py b/third_party/airflow/armada/policies/reattach.py new file mode 100644 index 00000000000..789b1d26b57 --- /dev/null +++ b/third_party/airflow/armada/policies/reattach.py @@ -0,0 +1,54 @@ +from typing import Literal +from airflow.utils.context import Context + +from armada_client.typings import JobState + + +def external_job_uri(context: Context) -> str: + task_id = context["ti"].task_id + map_index = context["ti"].map_index + run_id = context["run_id"] + dag_id = context["dag"].dag_id + + return f"airflow://{dag_id}/{task_id}/{run_id}/{map_index}" + + +def policy(policy_type: Literal["always", "never", "running_or_succeeded"]) -> callable: + """ + Returns the corresponding re-attach policy function based on the policy type. + + :param policy_type: The type of policy ('always', 'never', 'running_or_succeeded'). + :type policy_type: Literal['always', 'never', 'running_or_succeeded'] + :return: A function that determines whether to re-attach to an existing job. + :rtype: Callable[[JobState, str], bool] + """ + policy_type = policy_type.lower() + if policy_type == "always": + return always_reattach + elif policy_type == "never": + return never_reattach + elif policy_type == "running_or_succeeded": + return running_or_succeeded_reattach + else: + raise ValueError(f"Unknown policy type: {policy_type}") + + +def never_reattach(state: JobState, termination_reason: str) -> bool: + """ + Policy that never allows re-attaching a job. + """ + return False + + +def always_reattach(state: JobState, termination_reason: str) -> bool: + """ + Policy that always re-attaches to a job. + """ + return True + + +def running_or_succeeded_reattach(state: JobState, termination_reason: str) -> bool: + """ + Policy that allows re-attaching as long as it hasn't failed. + """ + return state not in {JobState.FAILED, JobState.REJECTED} diff --git a/third_party/airflow/armada/policies/retry.py b/third_party/airflow/armada/policies/retry.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/third_party/airflow/armada/utils.py b/third_party/airflow/armada/utils.py index 46f551e4ac4..198bc5e8350 100644 --- a/third_party/airflow/armada/utils.py +++ b/third_party/airflow/armada/utils.py @@ -1,7 +1,8 @@ import functools -from typing import Any +from typing import Any, Callable, Optional, TypeVar import tenacity +from airflow.configuration import conf from airflow.models import TaskInstance @@ -26,3 +27,34 @@ def wrapper(self, *args, **kwargs): @log_exceptions def xcom_pull_for_ti(ti: TaskInstance, key: str) -> Any: return ti.xcom_pull(key=key, task_ids=ti.task_id, map_indexes=ti.map_index) + + +T = TypeVar("T") + + +def resolve_parameter_value( + param_name: str, + param_value: Optional[T], + kwargs: dict, + fallback_value: T, + type_converter: Callable[[str], T] = lambda x: x, +) -> T: + if param_value is not None: + return param_value + + dag = kwargs.get("dag") + if dag and getattr(dag, "default_args", None): + default_args = dag.default_args + if param_name in default_args: + return default_args[param_name] + + airflow_config_value = conf.get("my_section", param_name, fallback=None) + if airflow_config_value is not None: + try: + return type_converter(airflow_config_value) + except ValueError as e: + raise ValueError( + f"Failed to convert '{airflow_config_value}' for '{param_name}': {e}" + ) + + return fallback_value diff --git a/third_party/airflow/test/unit/operators/test_armada.py b/third_party/airflow/test/unit/operators/test_armada.py index c8b3272db66..3706276455c 100644 --- a/third_party/airflow/test/unit/operators/test_armada.py +++ b/third_party/airflow/test/unit/operators/test_armada.py @@ -25,6 +25,8 @@ def default_hook() -> MagicMock: mock = MagicMock() job_context = running_job_context() mock.submit_job.return_value = job_context + mock.job_by_external_job_uri.return_value = None + mock.job_termination_reason.return_value = "FAILED" mock.refresh_context.return_value = dataclasses.replace( job_context, job_state=JobState.SUCCEEDED.name, cluster=DEFAULT_CLUSTER ) @@ -51,7 +53,7 @@ def mock_operator_dependencies(): def context(): mock_ti = MagicMock() mock_ti.task_id = DEFAULT_TASK_ID - mock_ti.try_number = 0 + mock_ti.try_number = 1 mock_ti.xcom_pull.return_value = None mock_dag = MagicMock() diff --git a/third_party/airflow/test/unit/policies/test_reattach.py b/third_party/airflow/test/unit/policies/test_reattach.py new file mode 100644 index 00000000000..6789d91c957 --- /dev/null +++ b/third_party/airflow/test/unit/policies/test_reattach.py @@ -0,0 +1,78 @@ +from unittest.mock import Mock +import pytest + +from armada_client.typings import JobState + +from armada.policies.reattach import ( + policy, + never_reattach, + always_reattach, + running_or_succeeded_reattach, + external_job_uri, +) + + +def test_external_job_uri(): + mock_task_instance = Mock() + mock_task_instance.task_id = "example_task" + mock_task_instance.map_index = 42 + + mock_dag = Mock() + mock_dag.dag_id = "example_dag" + + mock_context = {"ti": mock_task_instance, "run_id": "test_run_123", "dag": mock_dag} + + expected_uri = "airflow://example_dag/example_task/test_run_123/42" + + assert external_job_uri(mock_context) == expected_uri + + +@pytest.mark.parametrize( + "state", + list(JobState), +) +def test_never_reattach(state): + assert not never_reattach(state, termination_reason="any reason") + + +@pytest.mark.parametrize( + "state", + list(JobState), +) +def test_always_reattach(state): + assert always_reattach(state, termination_reason="any reason") + + +@pytest.mark.parametrize( + "state, expected", + [ + (JobState.RUNNING, True), + (JobState.SUCCEEDED, True), + (JobState.CANCELLED, True), + (JobState.FAILED, False), + (JobState.REJECTED, False), + ], +) +def test_running_or_succeeded_reattach(state, expected): + assert ( + running_or_succeeded_reattach(state, termination_reason="any reason") + == expected + ) + + +@pytest.mark.parametrize( + "policy_type, expected_function", + [ + ("always", always_reattach), + ("never", never_reattach), + ("running_or_succeeded", running_or_succeeded_reattach), + ("ALWAYS", always_reattach), + ], +) +def test_policy_selector(policy_type, expected_function): + assert policy(policy_type) is expected_function + + +def test_policy_selector_invalid(): + with pytest.raises(ValueError, match="Unknown policy type: invalid"): + policy("invalid") From c7a2a4e2a3cb48c79aadab8a59c7fac0cf1f721b Mon Sep 17 00:00:00 2001 From: Martynas Asipauskas Date: Tue, 26 Nov 2024 19:34:44 +0000 Subject: [PATCH 06/12] Re-attach logic - final fixes (#4064) --- docs/python_airflow_operator.md | 7 +++- .../airflow/armada/operators/armada.py | 36 ++++++++++++------- third_party/airflow/pyproject.toml | 4 +-- 3 files changed, 31 insertions(+), 16 deletions(-) diff --git a/docs/python_airflow_operator.md b/docs/python_airflow_operator.md index 486d47cde1c..8037c0e8ace 100644 --- a/docs/python_airflow_operator.md +++ b/docs/python_airflow_operator.md @@ -12,7 +12,7 @@ This class provides integration with Airflow and Armada ## armada.operators.armada module -### _class_ armada.operators.armada.ArmadaOperator(name, channel_args, armada_queue, job_request, job_set_prefix='', lookout_url_template=None, poll_interval=30, container_logs=None, k8s_token_retriever=None, deferrable=False, job_acknowledgement_timeout=300, dry_run=False, \*\*kwargs) +### _class_ armada.operators.armada.ArmadaOperator(name, channel_args, armada_queue, job_request, job_set_prefix='', lookout_url_template=None, poll_interval=30, container_logs=None, k8s_token_retriever=None, deferrable=False, job_acknowledgement_timeout=300, dry_run=False, reattach_policy=None, \*\*kwargs) Bases: `BaseOperator`, `LoggingMixin` An Airflow operator that manages Job submission to Armada. @@ -60,6 +60,9 @@ and handles job cancellation if the Airflow task is killed. * **dry_run** (*bool*) – + * **reattach_policy** (*Optional**[**str**] **| **Callable**[**[**JobState**, **str**]**, **bool**]*) – + + #### execute(context) Submits the job to Armada and polls for completion. @@ -167,6 +170,8 @@ acknowledged by Armada. :type job_acknowledgement_timeout: int :param dry_run: Run Operator in dry-run mode - render Armada request and terminate. :type dry_run: bool +:param reattach_policy: Operator reattach policy to use (defaults to: never) +:type reattach_policy: Optional[str] | Callable[[JobState, str], bool] :param kwargs: Additional keyword arguments to pass to the BaseOperator. diff --git a/third_party/airflow/armada/operators/armada.py b/third_party/airflow/armada/operators/armada.py index 158a80cc479..0c683928fb1 100644 --- a/third_party/airflow/armada/operators/armada.py +++ b/third_party/airflow/armada/operators/armada.py @@ -105,8 +105,8 @@ class ArmadaOperator(BaseOperator, LoggingMixin): :type job_acknowledgement_timeout: int :param dry_run: Run Operator in dry-run mode - render Armada request and terminate. :type dry_run: bool -:param reattach_policy: Operator reattach policy to use (defaults to: always) -:type reattach_policy: Optional[str] +:param reattach_policy: Operator reattach policy to use (defaults to: never) +:type reattach_policy: Optional[str] | Callable[[JobState, str], bool] :param kwargs: Additional keyword arguments to pass to the BaseOperator. """ @@ -135,7 +135,7 @@ def __init__( dry_run: bool = conf.getboolean( "armada_operator", "default_dry_run", fallback=False ), - reattach_policy: Optional[str] = None, + reattach_policy: Optional[str] | Callable[[JobState, str], bool] = None, **kwargs, ) -> None: super().__init__(**kwargs) @@ -154,14 +154,21 @@ def __init__( self.dry_run = dry_run self.job_context = None - configured_reattach_policy: str = resolve_parameter_value( - "reattach_policy", reattach_policy, kwargs, "never" - ) - self.log.info( - f"Configured reattach policy to: '{configured_reattach_policy}'," - f" max retries: {self.retries}" - ) - self.reattach_policy = policy(configured_reattach_policy) + if reattach_policy is callable(reattach_policy): + self.log.info( + f"Configured reattach policy with callable'," + f" max retries: {self.retries}" + ) + self.reattach_policy = reattach_policy + else: + configured_reattach_policy: str = resolve_parameter_value( + "reattach_policy", reattach_policy, kwargs, "never" + ) + self.log.info( + f"Configured reattach policy to: '{configured_reattach_policy}'," + f" max retries: {self.retries}" + ) + self.reattach_policy = policy(configured_reattach_policy) if self.container_logs and self.k8s_token_retriever is None: self.log.warning( @@ -342,8 +349,11 @@ def _try_reattach_to_running_job( self, context: Context ) -> Optional[RunningJobContext]: # On first try we intentionally do not re-attach. - self.log.info(context) - if context["ti"].try_number == 1: + new_run = ( + context["ti"].max_tries - context["ti"].try_number + 1 + == context["ti"].task.retries + ) + if new_run: return None expected_job_uri = external_job_uri(context) diff --git a/third_party/airflow/pyproject.toml b/third_party/airflow/pyproject.toml index 67c7b91f678..3e278fade54 100644 --- a/third_party/airflow/pyproject.toml +++ b/third_party/airflow/pyproject.toml @@ -4,13 +4,13 @@ build-backend = "setuptools.build_meta" [project] name = "armada_airflow" -version = "1.0.10" +version = "1.0.11" description = "Armada Airflow Operator" readme='README.md' authors = [{name = "Armada-GROSS", email = "armada@armadaproject.io"}] license = { text = "Apache Software License" } dependencies=[ - 'armada-client>=0.4.7', + 'armada-client>=0.4.8', 'apache-airflow>=2.6.3', 'types-protobuf==4.24.0.1', 'kubernetes>=23.6.0', From 2c4a203cbcffe5d4ae50d6838e197769923dc957 Mon Sep 17 00:00:00 2001 From: Martynas Asipauskas Date: Wed, 27 Nov 2024 15:11:02 +0000 Subject: [PATCH 07/12] Fix operator config prefix (#4066) --- third_party/airflow/armada/utils.py | 2 +- third_party/airflow/pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/third_party/airflow/armada/utils.py b/third_party/airflow/armada/utils.py index 198bc5e8350..7a881f71211 100644 --- a/third_party/airflow/armada/utils.py +++ b/third_party/airflow/armada/utils.py @@ -48,7 +48,7 @@ def resolve_parameter_value( if param_name in default_args: return default_args[param_name] - airflow_config_value = conf.get("my_section", param_name, fallback=None) + airflow_config_value = conf.get("armada_operator", param_name, fallback=None) if airflow_config_value is not None: try: return type_converter(airflow_config_value) diff --git a/third_party/airflow/pyproject.toml b/third_party/airflow/pyproject.toml index 3e278fade54..fd162626326 100644 --- a/third_party/airflow/pyproject.toml +++ b/third_party/airflow/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "armada_airflow" -version = "1.0.11" +version = "1.0.12" description = "Armada Airflow Operator" readme='README.md' authors = [{name = "Armada-GROSS", email = "armada@armadaproject.io"}] From 12634a5da4dfbc0d966e617400c444e366ae3a44 Mon Sep 17 00:00:00 2001 From: robertdavidsmith <34475852+robertdavidsmith@users.noreply.github.com> Date: Thu, 28 Nov 2024 17:00:07 +0000 Subject: [PATCH 08/12] Scheduler: per-pool protectedFractionOfFairShare (#4068) Signed-off-by: Robert Smith --- .../scheduler/configuration/configuration.go | 14 ++++++-- .../configuration/configuration_test.go | 33 +++++++++++++++++++ .../scheduler/internaltypes/resource_list.go | 14 ++++---- .../internaltypes/resource_list_test.go | 12 +++++++ .../scheduler/scheduling/scheduling_algo.go | 16 +++++---- internal/scheduler/simulator/simulator.go | 2 +- 6 files changed, 75 insertions(+), 16 deletions(-) create mode 100644 internal/scheduler/configuration/configuration_test.go diff --git a/internal/scheduler/configuration/configuration.go b/internal/scheduler/configuration/configuration.go index e6aa7cacde8..18415f49531 100644 --- a/internal/scheduler/configuration/configuration.go +++ b/internal/scheduler/configuration/configuration.go @@ -271,6 +271,16 @@ type WellKnownNodeType struct { } type PoolConfig struct { - Name string `validate:"required"` - AwayPools []string + Name string `validate:"required"` + AwayPools []string + ProtectedFractionOfFairShare *float64 +} + +func (sc *SchedulingConfig) GetProtectedFractionOfFairShare(poolName string) float64 { + for _, poolConfig := range sc.Pools { + if poolConfig.Name == poolName && poolConfig.ProtectedFractionOfFairShare != nil { + return *poolConfig.ProtectedFractionOfFairShare + } + } + return sc.ProtectedFractionOfFairShare } diff --git a/internal/scheduler/configuration/configuration_test.go b/internal/scheduler/configuration/configuration_test.go new file mode 100644 index 00000000000..f01ed685a9b --- /dev/null +++ b/internal/scheduler/configuration/configuration_test.go @@ -0,0 +1,33 @@ +package configuration + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGetProtectedFractionOfFairShare(t *testing.T) { + zero := 0.0 + half := 0.5 + sc := SchedulingConfig{ + ProtectedFractionOfFairShare: 0.1, + Pools: []PoolConfig{ + { + Name: "overrides-pool", + ProtectedFractionOfFairShare: &half, + }, + { + Name: "overrides-zero-pool", + ProtectedFractionOfFairShare: &zero, + }, + { + Name: "not-set-pool", + }, + }, + } + + assert.Equal(t, 0.5, sc.GetProtectedFractionOfFairShare("overrides-pool")) + assert.Equal(t, 0.0, sc.GetProtectedFractionOfFairShare("overrides-zero-pool")) + assert.Equal(t, 0.1, sc.GetProtectedFractionOfFairShare("not-set-pool")) + assert.Equal(t, 0.1, sc.GetProtectedFractionOfFairShare("missing-pool")) +} diff --git a/internal/scheduler/internaltypes/resource_list.go b/internal/scheduler/internaltypes/resource_list.go index 63772bbd8ea..ea1fed9f059 100644 --- a/internal/scheduler/internaltypes/resource_list.go +++ b/internal/scheduler/internaltypes/resource_list.go @@ -3,6 +3,7 @@ package internaltypes import ( "fmt" "math" + "strings" "golang.org/x/exp/slices" k8sResource "k8s.io/apimachinery/pkg/api/resource" @@ -44,16 +45,17 @@ func (rl ResourceList) Equal(other ResourceList) bool { func (rl ResourceList) String() string { if rl.IsEmpty() { - return "empty" + return "(empty)" } - result := "" + + parts := []string{} for i, name := range rl.factory.indexToName { - if i > 0 { - result += " " + if rl.resources[i] == 0 { + continue } - result += fmt.Sprintf("%s=%s", name, rl.asQuantity(i).String()) + parts = append(parts, fmt.Sprintf("%s=%s", name, rl.asQuantity(i).String())) } - return result + return "(" + strings.Join(parts, ",") + ")" } func (rl ResourceList) GetByName(name string) (int64, error) { diff --git a/internal/scheduler/internaltypes/resource_list_test.go b/internal/scheduler/internaltypes/resource_list_test.go index 39fa9c6c1e2..f864dd55f8e 100644 --- a/internal/scheduler/internaltypes/resource_list_test.go +++ b/internal/scheduler/internaltypes/resource_list_test.go @@ -395,6 +395,18 @@ func TestNegate_HandlesEmptyCorrectly(t *testing.T) { assert.Equal(t, ResourceList{}, ResourceList{}.Negate()) } +func TestString(t *testing.T) { + factory := testFactory() + + assert.Equal(t, "(memory=102400,cpu=100)", testResourceList(factory, "100", "100Ki").String()) + assert.Equal(t, "(memory=102400)", testResourceList(factory, "0", "100Ki").String()) + assert.Equal(t, "()", testResourceList(factory, "0", "0").String()) +} + +func TestString_HandlesEmptyCorrectly(t *testing.T) { + assert.Equal(t, "(empty)", ResourceList{}.String()) +} + func testResourceList(factory *ResourceListFactory, cpu string, memory string) ResourceList { return factory.FromJobResourceListIgnoreUnknown(map[string]k8sResource.Quantity{ "cpu": k8sResource.MustParse(cpu), diff --git a/internal/scheduler/scheduling/scheduling_algo.go b/internal/scheduler/scheduling/scheduling_algo.go index 0dad9c8061f..be358dee02c 100644 --- a/internal/scheduler/scheduling/scheduling_algo.go +++ b/internal/scheduler/scheduling/scheduling_algo.go @@ -126,12 +126,6 @@ func (l *FairSchedulingAlgo) Schedule( continue } - ctx.Infof("Scheduling on pool %s with capacity %s %s", - pool, - fsctx.nodeDb.TotalKubernetesResources().String(), - l.floatingResourceTypes.GetTotalAvailableForPool(pool.Name).String(), - ) - start := time.Now() schedulerResult, sctx, err := l.SchedulePool(ctx, fsctx, pool.Name) @@ -522,11 +516,12 @@ func (l *FairSchedulingAlgo) SchedulePool( constraints := schedulerconstraints.NewSchedulingConstraints(pool, totalResources, l.schedulingConfig, maps.Values(fsctx.queues)) + protectedFractionOfFairShare := l.schedulingConfig.GetProtectedFractionOfFairShare(pool) scheduler := NewPreemptingQueueScheduler( fsctx.schedulingContext, constraints, l.floatingResourceTypes, - l.schedulingConfig.ProtectedFractionOfFairShare, + protectedFractionOfFairShare, l.schedulingConfig.MaxQueueLookback, fsctx.Txn, fsctx.nodeDb, @@ -534,6 +529,13 @@ func (l *FairSchedulingAlgo) SchedulePool( fsctx.jobIdsByGangId, fsctx.gangIdByJobId, ) + + ctx.Infof("Scheduling on pool %s with capacity %s protectedFractionOfFairShare %f", + pool, + fsctx.nodeDb.TotalKubernetesResources().Add(l.floatingResourceTypes.GetTotalAvailableForPool(pool)).String(), + protectedFractionOfFairShare, + ) + result, err := scheduler.Schedule(ctx) if err != nil { return nil, nil, err diff --git a/internal/scheduler/simulator/simulator.go b/internal/scheduler/simulator/simulator.go index c6b74f21458..9ae751d4d1f 100644 --- a/internal/scheduler/simulator/simulator.go +++ b/internal/scheduler/simulator/simulator.go @@ -596,7 +596,7 @@ func (s *Simulator) handleScheduleEvent(ctx *armadacontext.Context) error { sctx, constraints, s.floatingResourceTypes, - s.schedulingConfig.ProtectedFractionOfFairShare, + s.schedulingConfig.GetProtectedFractionOfFairShare(pool), s.schedulingConfig.MaxQueueLookback, txn, nodeDb, From 500e08a53e4d0d1a031a3f38684a876f265c77df Mon Sep 17 00:00:00 2001 From: JamesMurkin Date: Fri, 29 Nov 2024 15:36:07 +0000 Subject: [PATCH 09/12] Update CandidateGangIterator ordering (#292) (#4069) * add basic defrag * ARMADA-2970 Small simulator improvements - Log input files consistently - Fix log output to only occur every 5 seconds - Was bugged to log every round, but only after 5 seconds - Remove variance on gang jobs, so the finish at the same time * Set 0 tailmean * Update job ordering, to order largest job first * Update so we schedule the larger gang first * Remove unrelated changes * Add test + fixes * Fix comment * Fix comment * Add rollout flag + improved naming * Pass arg in simulator * Improve comment --------- Co-authored-by: James Murkin Co-authored-by: chrismar503 --- config/scheduler/config.yaml | 1 + .../scheduler/configuration/configuration.go | 4 + .../scheduling/preempting_queue_scheduler.go | 20 +-- .../preempting_queue_scheduler_test.go | 3 + .../scheduler/scheduling/queue_scheduler.go | 109 ++++++++++---- .../scheduling/queue_scheduler_test.go | 133 +++++++++++++++++- .../scheduler/scheduling/scheduling_algo.go | 1 + .../scheduling/scheduling_algo_test.go | 4 +- internal/scheduler/simulator/simulator.go | 1 + .../scheduler/testfixtures/testfixtures.go | 1 + 10 files changed, 237 insertions(+), 40 deletions(-) diff --git a/config/scheduler/config.yaml b/config/scheduler/config.yaml index e76fb44d8d5..ffc9f65c2f3 100644 --- a/config/scheduler/config.yaml +++ b/config/scheduler/config.yaml @@ -73,6 +73,7 @@ scheduling: resolution: "1" disableScheduling: false enableAssertions: false + enablePreferLargeJobOrdering: false protectedFractionOfFairShare: 1.0 nodeIdLabel: "kubernetes.io/hostname" priorityClasses: diff --git a/internal/scheduler/configuration/configuration.go b/internal/scheduler/configuration/configuration.go index 18415f49531..9c0ea3e8bba 100644 --- a/internal/scheduler/configuration/configuration.go +++ b/internal/scheduler/configuration/configuration.go @@ -131,6 +131,10 @@ type SchedulingConfig struct { DisableScheduling bool // Set to true to enable scheduler assertions. This results in some performance loss. EnableAssertions bool + // Experimental + // Set to true to enable larger job preferential ordering in the candidate gang iterator. + // This will result in larger jobs being ordered earlier in the job scheduling order + EnablePreferLargeJobOrdering bool // Only queues allocated more than this fraction of their fair share are considered for preemption. ProtectedFractionOfFairShare float64 `validate:"gte=0"` // Armada adds a node selector term to every scheduled pod using this label with the node name as value. diff --git a/internal/scheduler/scheduling/preempting_queue_scheduler.go b/internal/scheduler/scheduling/preempting_queue_scheduler.go index dec7c2422cd..c3056ffd384 100644 --- a/internal/scheduler/scheduling/preempting_queue_scheduler.go +++ b/internal/scheduler/scheduling/preempting_queue_scheduler.go @@ -30,6 +30,7 @@ type PreemptingQueueScheduler struct { floatingResourceTypes *floatingresources.FloatingResourceTypes protectedFractionOfFairShare float64 maxQueueLookBack uint + preferLargeJobOrdering bool jobRepo JobRepository nodeDb *nodedb.NodeDb // Maps job ids to the id of the node the job is associated with. @@ -46,6 +47,7 @@ func NewPreemptingQueueScheduler( sctx *schedulercontext.SchedulingContext, constraints schedulerconstraints.SchedulingConstraints, floatingResourceTypes *floatingresources.FloatingResourceTypes, + preferLargeJobOrdering bool, protectedFractionOfFairShare float64, maxQueueLookBack uint, jobRepo JobRepository, @@ -72,6 +74,7 @@ func NewPreemptingQueueScheduler( constraints: constraints, floatingResourceTypes: floatingResourceTypes, protectedFractionOfFairShare: protectedFractionOfFairShare, + preferLargeJobOrdering: preferLargeJobOrdering, maxQueueLookBack: maxQueueLookBack, jobRepo: jobRepo, nodeDb: nodeDb, @@ -305,7 +308,7 @@ func (sch *PreemptingQueueScheduler) evict(ctx *armadacontext.Context, evictor * if err := sch.nodeDb.Reset(); err != nil { return nil, nil, err } - if err := addEvictedJobsToNodeDb(ctx, sch.schedulingContext, sch.nodeDb, inMemoryJobRepo); err != nil { + if err := sch.addEvictedJobsToNodeDb(ctx, inMemoryJobRepo); err != nil { return nil, nil, err } return result, inMemoryJobRepo, nil @@ -477,22 +480,22 @@ func (q MinimalQueue) GetWeight() float64 { // addEvictedJobsToNodeDb adds evicted jobs to the NodeDb. // Needed to enable the nodeDb accounting for these when preempting. -func addEvictedJobsToNodeDb(_ *armadacontext.Context, sctx *schedulercontext.SchedulingContext, nodeDb *nodedb.NodeDb, inMemoryJobRepo *InMemoryJobRepository) error { +func (sch *PreemptingQueueScheduler) addEvictedJobsToNodeDb(_ *armadacontext.Context, inMemoryJobRepo *InMemoryJobRepository) error { gangItByQueue := make(map[string]*QueuedGangIterator) - for _, qctx := range sctx.QueueSchedulingContexts { + for _, qctx := range sch.schedulingContext.QueueSchedulingContexts { gangItByQueue[qctx.Queue] = NewQueuedGangIterator( - sctx, + sch.schedulingContext, inMemoryJobRepo.GetJobIterator(qctx.Queue), 0, false, ) } - qr := NewMinimalQueueRepositoryFromSchedulingContext(sctx) - candidateGangIterator, err := NewCandidateGangIterator(sctx.Pool, qr, sctx.FairnessCostProvider, gangItByQueue, false) + qr := NewMinimalQueueRepositoryFromSchedulingContext(sch.schedulingContext) + candidateGangIterator, err := NewCandidateGangIterator(sch.schedulingContext.Pool, qr, sch.schedulingContext.FairnessCostProvider, gangItByQueue, false, sch.preferLargeJobOrdering) if err != nil { return err } - txn := nodeDb.Txn(true) + txn := sch.nodeDb.Txn(true) defer txn.Abort() i := 0 for { @@ -502,7 +505,7 @@ func addEvictedJobsToNodeDb(_ *armadacontext.Context, sctx *schedulercontext.Sch break } else { for _, jctx := range gctx.JobSchedulingContexts { - if err := nodeDb.AddEvictedJobSchedulingContextWithTxn(txn, i, jctx); err != nil { + if err := sch.nodeDb.AddEvictedJobSchedulingContextWithTxn(txn, i, jctx); err != nil { return err } i++ @@ -547,6 +550,7 @@ func (sch *PreemptingQueueScheduler) schedule( jobIteratorByQueue, skipUnsuccessfulSchedulingKeyCheck, considerPriorityCLassPriority, + sch.preferLargeJobOrdering, sch.maxQueueLookBack, ) if err != nil { diff --git a/internal/scheduler/scheduling/preempting_queue_scheduler_test.go b/internal/scheduler/scheduling/preempting_queue_scheduler_test.go index 8aae7c57230..86e1a688951 100644 --- a/internal/scheduler/scheduling/preempting_queue_scheduler_test.go +++ b/internal/scheduler/scheduling/preempting_queue_scheduler_test.go @@ -2063,6 +2063,7 @@ func TestPreemptingQueueScheduler(t *testing.T) { sctx, constraints, testfixtures.TestEmptyFloatingResources, + true, tc.SchedulingConfig.ProtectedFractionOfFairShare, tc.SchedulingConfig.MaxQueueLookback, jobDbTxn, @@ -2415,6 +2416,7 @@ func BenchmarkPreemptingQueueScheduler(b *testing.B) { sctx, constraints, testfixtures.TestEmptyFloatingResources, + true, tc.SchedulingConfig.ProtectedFractionOfFairShare, tc.SchedulingConfig.MaxQueueLookback, jobDbTxn, @@ -2477,6 +2479,7 @@ func BenchmarkPreemptingQueueScheduler(b *testing.B) { sctx, constraints, testfixtures.TestEmptyFloatingResources, + true, tc.SchedulingConfig.ProtectedFractionOfFairShare, tc.SchedulingConfig.MaxQueueLookback, jobDbTxn, diff --git a/internal/scheduler/scheduling/queue_scheduler.go b/internal/scheduler/scheduling/queue_scheduler.go index 96969993ede..ebdbf6071be 100644 --- a/internal/scheduler/scheduling/queue_scheduler.go +++ b/internal/scheduler/scheduling/queue_scheduler.go @@ -36,6 +36,7 @@ func NewQueueScheduler( jobIteratorByQueue map[string]JobContextIterator, skipUnsuccessfulSchedulingKeyCheck bool, considerPriorityClassPriority bool, + prioritiseLargerJobs bool, maxQueueLookBack uint, ) (*QueueScheduler, error) { for queue := range jobIteratorByQueue { @@ -51,7 +52,7 @@ func NewQueueScheduler( for queue, it := range jobIteratorByQueue { gangIteratorsByQueue[queue] = NewQueuedGangIterator(sctx, it, maxQueueLookBack, true) } - candidateGangIterator, err := NewCandidateGangIterator(sctx.Pool, sctx, sctx.FairnessCostProvider, gangIteratorsByQueue, considerPriorityClassPriority) + candidateGangIterator, err := NewCandidateGangIterator(sctx.Pool, sctx, sctx.FairnessCostProvider, gangIteratorsByQueue, considerPriorityClassPriority, prioritiseLargerJobs) if err != nil { return nil, err } @@ -343,6 +344,7 @@ func NewCandidateGangIterator( fairnessCostProvider fairness.FairnessCostProvider, iteratorsByQueue map[string]*QueuedGangIterator, considerPriority bool, + prioritiseLargerJobs bool, ) (*CandidateGangIterator, error) { it := &CandidateGangIterator{ pool: pool, @@ -350,12 +352,14 @@ func NewCandidateGangIterator( fairnessCostProvider: fairnessCostProvider, onlyYieldEvictedByQueue: make(map[string]bool), pq: QueueCandidateGangIteratorPQ{ - considerPriority: considerPriority, - items: make([]*QueueCandidateGangIteratorItem, 0, len(iteratorsByQueue)), + considerPriority: considerPriority, + prioritiseLargerJobs: prioritiseLargerJobs, + items: make([]*QueueCandidateGangIteratorItem, 0, len(iteratorsByQueue)), }, } for queue, queueIt := range iteratorsByQueue { - if _, err := it.updateAndPushPQItem(it.newPQItem(queue, queueIt)); err != nil { + queueContext := queueIt.schedulingContext.QueueSchedulingContexts[queue] + if _, err := it.updateAndPushPQItem(it.newPQItem(queue, queueContext.AdjustedFairShare, queueIt)); err != nil { return nil, err } } @@ -406,13 +410,14 @@ func (it *CandidateGangIterator) Peek() (*schedulercontext.GangSchedulingContext return nil, 0.0, nil } first := it.pq.items[0] - return first.gctx, first.queueCost, nil + return first.gctx, first.proposedQueueCost, nil } -func (it *CandidateGangIterator) newPQItem(queue string, queueIt *QueuedGangIterator) *QueueCandidateGangIteratorItem { +func (it *CandidateGangIterator) newPQItem(queue string, queueFairShare float64, queueIt *QueuedGangIterator) *QueueCandidateGangIteratorItem { return &QueueCandidateGangIteratorItem{ - queue: queue, - it: queueIt, + queue: queue, + fairShare: queueFairShare, + it: queueIt, } } @@ -435,7 +440,9 @@ func (it *CandidateGangIterator) updateAndPushPQItem(item *QueueCandidateGangIte func (it *CandidateGangIterator) updatePQItem(item *QueueCandidateGangIteratorItem) error { item.gctx = nil - item.queueCost = 0 + item.proposedQueueCost = 0 + item.currentQueueCost = 0 + item.itemSize = 0 gctx, err := item.it.Peek() if err != nil { return err @@ -444,11 +451,15 @@ func (it *CandidateGangIterator) updatePQItem(item *QueueCandidateGangIteratorIt return nil } item.gctx = gctx - cost, err := it.queueCostWithGctx(gctx) + queue, err := it.getQueue(gctx) if err != nil { return err } - item.queueCost = cost + item.proposedQueueCost = it.fairnessCostProvider.WeightedCostFromAllocation(queue.GetAllocation().Add(gctx.TotalResourceRequests), queue.GetWeight()) + item.currentQueueCost = it.fairnessCostProvider.WeightedCostFromAllocation(queue.GetAllocation(), queue.GetWeight()) + // We multiply here, as queue weights are a fraction + // So for the same job size, highly weighted queues jobs will look larger + item.itemSize = it.fairnessCostProvider.UnweightedCostFromAllocation(gctx.TotalResourceRequests) * queue.GetWeight() // The PQItem needs to have a priority class priority for the whole gang. This may not be uniform as different // Gang members may have been scheduled at different priorities due to home/away preemption. We therefore take the @@ -473,24 +484,24 @@ func (it *CandidateGangIterator) updatePQItem(item *QueueCandidateGangIteratorIt return nil } -// queueCostWithGctx returns the cost associated with a queue if gctx were to be scheduled. -func (it *CandidateGangIterator) queueCostWithGctx(gctx *schedulercontext.GangSchedulingContext) (float64, error) { +// returns the queue of the supplied gctx +func (it *CandidateGangIterator) getQueue(gctx *schedulercontext.GangSchedulingContext) (fairness.Queue, error) { gangQueue := gctx.Queue if len(gctx.JobSchedulingContexts) > 0 && !gctx.JobSchedulingContexts[0].IsHomeJob(it.pool) { gangQueue = schedulercontext.CalculateAwayQueueName(gctx.Queue) } queue, ok := it.queueRepository.GetQueue(gangQueue) if !ok { - return 0, errors.Errorf("unknown queue %s", gangQueue) + return nil, errors.Errorf("unknown queue %s", gangQueue) } - - return it.fairnessCostProvider.WeightedCostFromAllocation(queue.GetAllocation().Add(gctx.TotalResourceRequests), queue.GetWeight()), nil + return queue, nil } // QueueCandidateGangIteratorPQ is a priority queue used by CandidateGangIterator to determine from which queue to schedule the next job. type QueueCandidateGangIteratorPQ struct { - considerPriority bool - items []*QueueCandidateGangIteratorItem + considerPriority bool + prioritiseLargerJobs bool + items []*QueueCandidateGangIteratorItem } type QueueCandidateGangIteratorItem struct { @@ -501,9 +512,16 @@ type QueueCandidateGangIteratorItem struct { // Most recent value produced by the iterator. // Cached here to avoid repeating scheduling checks unnecessarily. gctx *schedulercontext.GangSchedulingContext - // Cost associated with the queue if the topmost gang in the queue were to be scheduled. - // Used to order queues fairly. - queueCost float64 + // Cost associated with the queue if the top most gang in the queue were to be scheduled. + proposedQueueCost float64 + // Current cost associated with the queue + currentQueueCost float64 + // The fairshare of the queue + // used to compare with proposedQueueCost to determine if scheduling the next item will put the queue over its fairshare + fairShare float64 + // The size of top most gang + // Used to determine which job is larger + itemSize float64 priorityClassPriority int32 // The index of the item in the heap. // maintained by the heap.Interface methods. @@ -513,18 +531,51 @@ type QueueCandidateGangIteratorItem struct { func (pq *QueueCandidateGangIteratorPQ) Len() int { return len(pq.items) } func (pq *QueueCandidateGangIteratorPQ) Less(i, j int) bool { - // Consider priority class priority first - if pq.considerPriority && pq.items[i].priorityClassPriority != pq.items[j].priorityClassPriority { - return pq.items[i].priorityClassPriority > pq.items[j].priorityClassPriority - } + item1 := pq.items[i] + item2 := pq.items[j] - // Then queue cost - if pq.items[i].queueCost != pq.items[j].queueCost { - return pq.items[i].queueCost < pq.items[j].queueCost + // Consider priority class priority first + if pq.considerPriority && item1.priorityClassPriority != item2.priorityClassPriority { + return item1.priorityClassPriority > item2.priorityClassPriority + } + + if pq.prioritiseLargerJobs { + if item1.proposedQueueCost <= item1.fairShare && item2.proposedQueueCost <= item2.fairShare { + // If adding the items results in neither queue exceeding its fairshare + // Take the largest job if the queues are equal current cost (which is the case if all jobs get evicted / on an empty farm) + // The reason we prefer larger jobs is: + // - It reduces fragmentation - a typical strategy is to schedule larger jobs first as smaller jobs can fit in around larger jobs + // - It makes it easier for larger jobs to get on and helps to reduce to bias towards smaller jobs. + // Particularly helpful if users have a single large gang they want to get on, as they'll get considered first + if item1.currentQueueCost == item2.currentQueueCost && item1.itemSize != item2.itemSize { + return item1.itemSize > item2.itemSize + } + // Otherwise let whichever queue has the lowest current cost go first, regardless of job size + // This is so that: + // - We interleave smaller jobs and don't just schedule a queue of large jobs first until it hits its fairshare + // - So we don't block queues with larger jobs from getting on as they make a bigger step than queues with smaller jobs + if item1.currentQueueCost != item2.currentQueueCost { + return item1.currentQueueCost < item2.currentQueueCost + } + } else if item1.proposedQueueCost > item1.fairShare && item2.proposedQueueCost > item2.fairShare { + // If adding the items results in both queues being above their fairshare + // take the item that results in the smallest amount over the fairshare + if item1.proposedQueueCost != item2.proposedQueueCost { + return item1.proposedQueueCost < item2.proposedQueueCost + } + } else if item1.proposedQueueCost <= item1.fairShare { + return true + } else if item2.proposedQueueCost <= item2.fairShare { + return false + } + } else { + if item1.proposedQueueCost != item2.proposedQueueCost { + return item1.proposedQueueCost < item2.proposedQueueCost + } } // Tie-break by queue name. - return pq.items[i].queue < pq.items[j].queue + return item1.queue < item2.queue } func (pq *QueueCandidateGangIteratorPQ) Swap(i, j int) { diff --git a/internal/scheduler/scheduling/queue_scheduler_test.go b/internal/scheduler/scheduling/queue_scheduler_test.go index f1ef18c9faa..2f022931b08 100644 --- a/internal/scheduler/scheduling/queue_scheduler_test.go +++ b/internal/scheduler/scheduling/queue_scheduler_test.go @@ -2,6 +2,7 @@ package scheduling import ( "fmt" + "sort" "testing" "github.com/stretchr/testify/assert" @@ -545,7 +546,7 @@ func TestQueueScheduler(t *testing.T) { it := jobRepo.GetJobIterator(q.Name) jobIteratorByQueue[q.Name] = it } - sch, err := NewQueueScheduler(sctx, constraints, testfixtures.TestEmptyFloatingResources, nodeDb, jobIteratorByQueue, false, false, tc.SchedulingConfig.MaxQueueLookback) + sch, err := NewQueueScheduler(sctx, constraints, testfixtures.TestEmptyFloatingResources, nodeDb, jobIteratorByQueue, false, false, true, tc.SchedulingConfig.MaxQueueLookback) require.NoError(t, err) result, err := sch.Schedule(armadacontext.Background()) @@ -693,3 +694,133 @@ func NewNodeDb(config configuration.SchedulingConfig, stringInterner *stringinte } return nodeDb, nil } + +func TestQueueCandidateGangIteratorPQ_Ordering_BelowFairShare_EvenCurrentCost(t *testing.T) { + queueA := &QueueCandidateGangIteratorItem{ + queue: "A", + proposedQueueCost: 2, + currentQueueCost: 0, + fairShare: 5, + itemSize: 2, + } + queueB := &QueueCandidateGangIteratorItem{ + queue: "B", + proposedQueueCost: 3, + currentQueueCost: 0, + fairShare: 5, + itemSize: 3, + } + queueC := &QueueCandidateGangIteratorItem{ + queue: "C", + proposedQueueCost: 1, + currentQueueCost: 0, + fairShare: 5, + itemSize: 1, + } + pq := &QueueCandidateGangIteratorPQ{prioritiseLargerJobs: true, items: []*QueueCandidateGangIteratorItem{queueA, queueB, queueC}} + + sort.Sort(pq) + // Should be in order of biggest job as currentQueueCosts are all equal + expectedOrder := []*QueueCandidateGangIteratorItem{queueB, queueA, queueC} + assert.Equal(t, expectedOrder, pq.items) +} + +func TestQueueCandidateGangIteratorPQ_Ordering_BelowFairShare_UnevenCurrentCost(t *testing.T) { + queueA := &QueueCandidateGangIteratorItem{ + queue: "A", + proposedQueueCost: 4, + currentQueueCost: 2, + fairShare: 5, + itemSize: 2, + } + queueB := &QueueCandidateGangIteratorItem{ + queue: "B", + proposedQueueCost: 3, + currentQueueCost: 2, + fairShare: 5, + itemSize: 1, + } + queueC := &QueueCandidateGangIteratorItem{ + queue: "C", + proposedQueueCost: 2, + currentQueueCost: 1, + fairShare: 5, + itemSize: 1, + } + pq := &QueueCandidateGangIteratorPQ{prioritiseLargerJobs: true, items: []*QueueCandidateGangIteratorItem{queueA, queueB, queueC}} + + sort.Sort(pq) + // Should be in order lowest current queue cost, then when current queue cost is equal it should be in order of largest job + expectedOrder := []*QueueCandidateGangIteratorItem{queueC, queueA, queueB} + assert.Equal(t, expectedOrder, pq.items) +} + +func TestQueueCandidateGangIteratorPQ_Ordering_AboveFairShare(t *testing.T) { + queueA := &QueueCandidateGangIteratorItem{ + queue: "A", + proposedQueueCost: 8, + currentQueueCost: 6, + fairShare: 5, + itemSize: 2, + } + queueB := &QueueCandidateGangIteratorItem{ + queue: "B", + proposedQueueCost: 7, + currentQueueCost: 4, + fairShare: 5, + itemSize: 3, + } + queueC := &QueueCandidateGangIteratorItem{ + queue: "C", + proposedQueueCost: 9, + currentQueueCost: 8, + fairShare: 5, + itemSize: 1, + } + pq := &QueueCandidateGangIteratorPQ{prioritiseLargerJobs: true, items: []*QueueCandidateGangIteratorItem{queueA, queueB, queueC}} + + sort.Sort(pq) + // Should be in order of smallest amount over their fairshare + expectedOrder := []*QueueCandidateGangIteratorItem{queueB, queueA, queueC} + assert.Equal(t, expectedOrder, pq.items) +} + +func TestQueueCandidateGangIteratorPQ_Ordering_MixedFairShare(t *testing.T) { + aboveFairShare := &QueueCandidateGangIteratorItem{ + queue: "A", + proposedQueueCost: 8, + currentQueueCost: 6, + fairShare: 5, + itemSize: 2, + } + belowFairShare := &QueueCandidateGangIteratorItem{ + queue: "B", + proposedQueueCost: 3, + currentQueueCost: 2, + fairShare: 5, + itemSize: 1, + } + pq := &QueueCandidateGangIteratorPQ{prioritiseLargerJobs: true, items: []*QueueCandidateGangIteratorItem{aboveFairShare, belowFairShare}} + + sort.Sort(pq) + expectedOrder := []*QueueCandidateGangIteratorItem{belowFairShare, aboveFairShare} + assert.Equal(t, expectedOrder, pq.items) +} + +func TestQueueCandidateGangIteratorPQ_Fallback(t *testing.T) { + queueA := &QueueCandidateGangIteratorItem{ + queue: "A", + } + queueB := &QueueCandidateGangIteratorItem{ + queue: "B", + } + queueC := &QueueCandidateGangIteratorItem{ + queue: "C", + } + pq := &QueueCandidateGangIteratorPQ{prioritiseLargerJobs: true, items: []*QueueCandidateGangIteratorItem{queueB, queueC, queueA}} + + sort.Sort(pq) + // Will fallback to ordering by queue name in the case all queues are the same sizes etc + expectedOrder := []*QueueCandidateGangIteratorItem{queueA, queueB, queueC} + assert.Equal(t, expectedOrder, pq.items) +} diff --git a/internal/scheduler/scheduling/scheduling_algo.go b/internal/scheduler/scheduling/scheduling_algo.go index be358dee02c..6840a87782a 100644 --- a/internal/scheduler/scheduling/scheduling_algo.go +++ b/internal/scheduler/scheduling/scheduling_algo.go @@ -521,6 +521,7 @@ func (l *FairSchedulingAlgo) SchedulePool( fsctx.schedulingContext, constraints, l.floatingResourceTypes, + l.schedulingConfig.EnablePreferLargeJobOrdering, protectedFractionOfFairShare, l.schedulingConfig.MaxQueueLookback, fsctx.Txn, diff --git a/internal/scheduler/scheduling/scheduling_algo_test.go b/internal/scheduler/scheduling/scheduling_algo_test.go index fcfc83cee25..18e69ffd7f2 100644 --- a/internal/scheduler/scheduling/scheduling_algo_test.go +++ b/internal/scheduler/scheduling/scheduling_algo_test.go @@ -395,11 +395,11 @@ func TestSchedule(t *testing.T) { schedulingConfig: testfixtures.TestSchedulingConfig(), executors: []*schedulerobjects.Executor{testfixtures.Test1Node32CoreExecutor("executor1")}, queues: []*api.Queue{{Name: "queue1", PriorityFactor: 0.01}, {Name: "queue2", PriorityFactor: 0.01}}, - queuedJobs: testfixtures.N16Cpu128GiJobs("queue2", testfixtures.PriorityClass0, 1), + queuedJobs: testfixtures.N32Cpu256GiJobs("queue2", testfixtures.PriorityClass0, 1), scheduledJobsByExecutorIndexAndNodeIndex: map[int]map[int]scheduledJobs{ 0: { 0: scheduledJobs{ - jobs: testfixtures.WithGangAnnotationsJobs(testfixtures.N16Cpu128GiJobs("queue1", testfixtures.PriorityClass0, 2)), + jobs: testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu16GiJobs("queue1", testfixtures.PriorityClass0, 2)), acknowledged: true, }, }, diff --git a/internal/scheduler/simulator/simulator.go b/internal/scheduler/simulator/simulator.go index 9ae751d4d1f..e7edd7605eb 100644 --- a/internal/scheduler/simulator/simulator.go +++ b/internal/scheduler/simulator/simulator.go @@ -596,6 +596,7 @@ func (s *Simulator) handleScheduleEvent(ctx *armadacontext.Context) error { sctx, constraints, s.floatingResourceTypes, + s.schedulingConfig.EnablePreferLargeJobOrdering, s.schedulingConfig.GetProtectedFractionOfFairShare(pool), s.schedulingConfig.MaxQueueLookback, txn, diff --git a/internal/scheduler/testfixtures/testfixtures.go b/internal/scheduler/testfixtures/testfixtures.go index e18c5c35073..19d625dc179 100644 --- a/internal/scheduler/testfixtures/testfixtures.go +++ b/internal/scheduler/testfixtures/testfixtures.go @@ -205,6 +205,7 @@ func TestSchedulingConfigWithPools(pools []schedulerconfiguration.PoolConfig) sc IndexedNodeLabels: TestIndexedNodeLabels, IndexedTaints: TestIndexedTaints, WellKnownNodeTypes: TestWellKnownNodeTypes, + EnablePreferLargeJobOrdering: true, DominantResourceFairnessResourcesToConsider: TestResourceNames, ExecutorTimeout: 15 * time.Minute, MaxUnacknowledgedJobsPerExecutor: math.MaxInt, From d5612f5d3b1cdc892aaf50c13600d73bbc735b2f Mon Sep 17 00:00:00 2001 From: Chris Martin Date: Mon, 2 Dec 2024 15:51:35 +0000 Subject: [PATCH 10/12] POC of Market Based Pricing (#4070) * F/chrisma/internal price poc (#293) * proto * price to scheduler * wip * Update README.md * wip * wip * wip --------- Co-authored-by: Chris Martin Co-authored-by: Chris Martin * wip * wip * wip * lint * lint * lint * lint * lint * lint * add spot price * lint * gang preemption test * lint * revert files * fix kind * fixes following testing * revert infra change * revert infra change * fix test * revert run config * typos --------- Co-authored-by: Christopher Martin --- .run/Executor.run.xml | 4 +- README.md | 1 + e2e/setup/kind.yaml | 3 + internal/common/eventutil/eventutil.go | 15 +- .../scheduler/configuration/configuration.go | 1 + internal/scheduler/database/job_repository.go | 2 + .../database/migrations/018_add_price.sql | 1 + internal/scheduler/database/models.go | 1 + internal/scheduler/database/query.sql.go | 26 +- internal/scheduler/database/query/query.sql | 4 +- internal/scheduler/jobdb/comparison.go | 68 +- internal/scheduler/jobdb/job.go | 17 + internal/scheduler/jobdb/job_test.go | 7 +- internal/scheduler/jobdb/jobdb.go | 77 +- internal/scheduler/jobdb/jobdb_test.go | 33 +- internal/scheduler/jobdb/reconciliation.go | 1 + internal/scheduler/metrics/cycle_metrics.go | 12 + internal/scheduler/scheduler_test.go | 8 + .../scheduling/context/scheduling.go | 1 + internal/scheduler/scheduling/jobiteration.go | 75 +- .../scheduler/scheduling/jobiteration_test.go | 20 +- .../scheduling/marketPriorityQueue.go | 214 +++++ ..._driven_preempting_queue_scheduler_test.go | 654 +++++++++++++++ .../scheduling/preempting_queue_scheduler.go | 45 +- .../preempting_queue_scheduler_test.go | 3 + .../scheduling/preemption_description_test.go | 2 +- .../scheduler/scheduling/queue_scheduler.go | 67 +- .../scheduling/queue_scheduler_test.go | 4 +- .../scheduler/scheduling/scheduling_algo.go | 48 +- internal/scheduler/simulator/simulator.go | 2 + .../scheduler/testfixtures/testfixtures.go | 14 + internal/scheduleringester/instructions.go | 6 + .../server/submit/conversion/conversions.go | 7 + .../submit/validation/submit_request.go | 12 + .../submit/validation/submit_request_test.go | 44 + pkg/api/api.swagger.go | 20 +- pkg/api/api.swagger.json | 20 +- pkg/api/submit.pb.go | 784 +++++++++++------ pkg/api/submit.proto | 11 +- pkg/armadaevents/events.pb.go | 787 +++++++++++------- pkg/armadaevents/events.proto | 6 + 41 files changed, 2466 insertions(+), 661 deletions(-) create mode 100644 internal/scheduler/database/migrations/018_add_price.sql create mode 100644 internal/scheduler/scheduling/marketPriorityQueue.go create mode 100644 internal/scheduler/scheduling/market_driven_preempting_queue_scheduler_test.go diff --git a/.run/Executor.run.xml b/.run/Executor.run.xml index 1bac201ed06..8fb53f874a5 100644 --- a/.run/Executor.run.xml +++ b/.run/Executor.run.xml @@ -8,7 +8,7 @@ - + @@ -17,4 +17,4 @@ - + \ No newline at end of file diff --git a/README.md b/README.md index 19cedbc4da9..8f01c88945d 100644 --- a/README.md +++ b/README.md @@ -86,3 +86,4 @@ For more information about contributing to Armada see [CONTRIBUTING.md](https:// ## Discussion If you are interested in discussing Armada you can find us on [![slack](https://img.shields.io/badge/slack-armada-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/?redir=%2Farchives%2FC03T9CBCEMC) + diff --git a/e2e/setup/kind.yaml b/e2e/setup/kind.yaml index 747be28e941..a88ce6b4a16 100644 --- a/e2e/setup/kind.yaml +++ b/e2e/setup/kind.yaml @@ -3,6 +3,8 @@ apiVersion: kind.x-k8s.io/v1alpha4 name: armada-test featureGates: "KubeletInUserNamespace": true +networking: + apiServerAddress: 0.0.0.0 nodes: - role: worker image: kindest/node:v1.26.15 @@ -28,3 +30,4 @@ nodes: - containerPort: 6443 # control plane hostPort: 6443 # exposes control plane on localhost:6443 protocol: TCP + diff --git a/internal/common/eventutil/eventutil.go b/internal/common/eventutil/eventutil.go index 6a0263621c4..27fc7ef6f55 100644 --- a/internal/common/eventutil/eventutil.go +++ b/internal/common/eventutil/eventutil.go @@ -160,6 +160,13 @@ func ApiJobFromLogSubmitJob(ownerId string, groups []string, queueName string, j podSpecs = k8sPodSpecs } + var priceInfo *api.ExperimentalPriceInfo + if e.ExperimentalPriceInfo != nil { + priceInfo = &api.ExperimentalPriceInfo{ + BidPrice: e.ExperimentalPriceInfo.BidPrice, + } + } + return &api.Job{ Id: e.JobId, ClientId: e.DeduplicationId, @@ -170,10 +177,10 @@ func ApiJobFromLogSubmitJob(ownerId string, groups []string, queueName string, j Labels: e.ObjectMeta.Labels, Annotations: e.ObjectMeta.Annotations, - K8SIngress: k8sIngresses, - K8SService: k8sServices, - - Priority: float64(e.Priority), + K8SIngress: k8sIngresses, + K8SService: k8sServices, + ExperimentalPriceInfo: priceInfo, + Priority: float64(e.Priority), PodSpec: podSpec, PodSpecs: podSpecs, diff --git a/internal/scheduler/configuration/configuration.go b/internal/scheduler/configuration/configuration.go index 9c0ea3e8bba..4563a7dc24c 100644 --- a/internal/scheduler/configuration/configuration.go +++ b/internal/scheduler/configuration/configuration.go @@ -278,6 +278,7 @@ type PoolConfig struct { Name string `validate:"required"` AwayPools []string ProtectedFractionOfFairShare *float64 + MarketDriven bool } func (sc *SchedulingConfig) GetProtectedFractionOfFairShare(poolName string) float64 { diff --git a/internal/scheduler/database/job_repository.go b/internal/scheduler/database/job_repository.go index 9e84614d329..6e68303611d 100644 --- a/internal/scheduler/database/job_repository.go +++ b/internal/scheduler/database/job_repository.go @@ -114,6 +114,7 @@ func (r *PostgresJobRepository) FetchInitialJobs(ctx *armadacontext.Context) ([] JobSet: row.JobSet, Queue: row.Queue, Priority: row.Priority, + BidPrice: row.BidPrice, Submitted: row.Submitted, Validated: row.Validated, Queued: row.Queued, @@ -228,6 +229,7 @@ func (r *PostgresJobRepository) FetchJobUpdates(ctx *armadacontext.Context, jobS JobSet: row.JobSet, Queue: row.Queue, Priority: row.Priority, + BidPrice: row.BidPrice, Submitted: row.Submitted, Validated: row.Validated, Queued: row.Queued, diff --git a/internal/scheduler/database/migrations/018_add_price.sql b/internal/scheduler/database/migrations/018_add_price.sql new file mode 100644 index 00000000000..2d30cc7fc03 --- /dev/null +++ b/internal/scheduler/database/migrations/018_add_price.sql @@ -0,0 +1 @@ +ALTER TABLE jobs ADD COLUMN bid_price double precision NOT NULL DEFAULT 0; diff --git a/internal/scheduler/database/models.go b/internal/scheduler/database/models.go index 5b80e9facff..1da66b44d5c 100644 --- a/internal/scheduler/database/models.go +++ b/internal/scheduler/database/models.go @@ -46,6 +46,7 @@ type Job struct { LastModified time.Time `db:"last_modified"` Validated bool `db:"validated"` Pools []string `db:"pools"` + BidPrice float64 `db:"bid_price"` } type JobRunError struct { diff --git a/internal/scheduler/database/query.sql.go b/internal/scheduler/database/query.sql.go index cfb1fb84a40..07140565c35 100644 --- a/internal/scheduler/database/query.sql.go +++ b/internal/scheduler/database/query.sql.go @@ -387,7 +387,7 @@ func (q *Queries) SelectExecutorUpdateTimes(ctx context.Context) ([]SelectExecut } const selectInitialJobs = `-- name: SelectInitialJobs :many -SELECT job_id, job_set, queue, priority, submitted, queued, queued_version, validated, cancel_requested, cancel_by_jobset_requested, cancelled, succeeded, failed, scheduling_info, scheduling_info_version, pools, serial FROM jobs WHERE serial > $1 AND cancelled = 'false' AND succeeded = 'false' and failed = 'false' ORDER BY serial LIMIT $2 +SELECT job_id, job_set, queue, priority, bid_price, submitted, queued, queued_version, validated, cancel_requested, cancel_by_jobset_requested, cancelled, succeeded, failed, scheduling_info, scheduling_info_version, pools, serial FROM jobs WHERE serial > $1 AND cancelled = 'false' AND succeeded = 'false' and failed = 'false' ORDER BY serial LIMIT $2 ` type SelectInitialJobsParams struct { @@ -400,6 +400,7 @@ type SelectInitialJobsRow struct { JobSet string `db:"job_set"` Queue string `db:"queue"` Priority int64 `db:"priority"` + BidPrice float64 `db:"bid_price"` Submitted int64 `db:"submitted"` Queued bool `db:"queued"` QueuedVersion int32 `db:"queued_version"` @@ -429,6 +430,7 @@ func (q *Queries) SelectInitialJobs(ctx context.Context, arg SelectInitialJobsPa &i.JobSet, &i.Queue, &i.Priority, + &i.BidPrice, &i.Submitted, &i.Queued, &i.QueuedVersion, @@ -511,7 +513,7 @@ func (q *Queries) SelectInitialRuns(ctx context.Context, arg SelectInitialRunsPa } const selectJobsByExecutorAndQueues = `-- name: SelectJobsByExecutorAndQueues :many -SELECT j.job_id, j.job_set, j.queue, j.user_id, j.submitted, j.groups, j.priority, j.queued, j.queued_version, j.cancel_requested, j.cancelled, j.cancel_by_jobset_requested, j.succeeded, j.failed, j.submit_message, j.scheduling_info, j.scheduling_info_version, j.serial, j.last_modified, j.validated, j.pools +SELECT j.job_id, j.job_set, j.queue, j.user_id, j.submitted, j.groups, j.priority, j.queued, j.queued_version, j.cancel_requested, j.cancelled, j.cancel_by_jobset_requested, j.succeeded, j.failed, j.submit_message, j.scheduling_info, j.scheduling_info_version, j.serial, j.last_modified, j.validated, j.pools, j.bid_price FROM runs jr JOIN jobs j ON jr.job_id = j.job_id @@ -556,6 +558,7 @@ func (q *Queries) SelectJobsByExecutorAndQueues(ctx context.Context, arg SelectJ &i.LastModified, &i.Validated, &i.Pools, + &i.BidPrice, ); err != nil { return nil, err } @@ -641,7 +644,7 @@ func (q *Queries) SelectLatestJobSerial(ctx context.Context) (int64, error) { } const selectLeasedJobsByQueue = `-- name: SelectLeasedJobsByQueue :many -SELECT j.job_id, j.job_set, j.queue, j.user_id, j.submitted, j.groups, j.priority, j.queued, j.queued_version, j.cancel_requested, j.cancelled, j.cancel_by_jobset_requested, j.succeeded, j.failed, j.submit_message, j.scheduling_info, j.scheduling_info_version, j.serial, j.last_modified, j.validated, j.pools +SELECT j.job_id, j.job_set, j.queue, j.user_id, j.submitted, j.groups, j.priority, j.queued, j.queued_version, j.cancel_requested, j.cancelled, j.cancel_by_jobset_requested, j.succeeded, j.failed, j.submit_message, j.scheduling_info, j.scheduling_info_version, j.serial, j.last_modified, j.validated, j.pools, j.bid_price FROM runs jr JOIN jobs j ON jr.job_id = j.job_id @@ -685,6 +688,7 @@ func (q *Queries) SelectLeasedJobsByQueue(ctx context.Context, queue []string) ( &i.LastModified, &i.Validated, &i.Pools, + &i.BidPrice, ); err != nil { return nil, err } @@ -697,7 +701,7 @@ func (q *Queries) SelectLeasedJobsByQueue(ctx context.Context, queue []string) ( } const selectNewJobs = `-- name: SelectNewJobs :many -SELECT job_id, job_set, queue, user_id, submitted, groups, priority, queued, queued_version, cancel_requested, cancelled, cancel_by_jobset_requested, succeeded, failed, submit_message, scheduling_info, scheduling_info_version, serial, last_modified, validated, pools FROM jobs WHERE serial > $1 ORDER BY serial LIMIT $2 +SELECT job_id, job_set, queue, user_id, submitted, groups, priority, queued, queued_version, cancel_requested, cancelled, cancel_by_jobset_requested, succeeded, failed, submit_message, scheduling_info, scheduling_info_version, serial, last_modified, validated, pools, bid_price FROM jobs WHERE serial > $1 ORDER BY serial LIMIT $2 ` type SelectNewJobsParams struct { @@ -736,6 +740,7 @@ func (q *Queries) SelectNewJobs(ctx context.Context, arg SelectNewJobsParams) ([ &i.LastModified, &i.Validated, &i.Pools, + &i.BidPrice, ); err != nil { return nil, err } @@ -860,7 +865,7 @@ func (q *Queries) SelectNewRunsForJobs(ctx context.Context, arg SelectNewRunsFor } const selectPendingJobsByQueue = `-- name: SelectPendingJobsByQueue :many -SELECT j.job_id, j.job_set, j.queue, j.user_id, j.submitted, j.groups, j.priority, j.queued, j.queued_version, j.cancel_requested, j.cancelled, j.cancel_by_jobset_requested, j.succeeded, j.failed, j.submit_message, j.scheduling_info, j.scheduling_info_version, j.serial, j.last_modified, j.validated, j.pools +SELECT j.job_id, j.job_set, j.queue, j.user_id, j.submitted, j.groups, j.priority, j.queued, j.queued_version, j.cancel_requested, j.cancelled, j.cancel_by_jobset_requested, j.succeeded, j.failed, j.submit_message, j.scheduling_info, j.scheduling_info_version, j.serial, j.last_modified, j.validated, j.pools, j.bid_price FROM runs jr JOIN jobs j ON jr.job_id = j.job_id @@ -904,6 +909,7 @@ func (q *Queries) SelectPendingJobsByQueue(ctx context.Context, queue []string) &i.LastModified, &i.Validated, &i.Pools, + &i.BidPrice, ); err != nil { return nil, err } @@ -916,7 +922,7 @@ func (q *Queries) SelectPendingJobsByQueue(ctx context.Context, queue []string) } const selectQueuedJobsByQueue = `-- name: SelectQueuedJobsByQueue :many -SELECT j.job_id, j.job_set, j.queue, j.user_id, j.submitted, j.groups, j.priority, j.queued, j.queued_version, j.cancel_requested, j.cancelled, j.cancel_by_jobset_requested, j.succeeded, j.failed, j.submit_message, j.scheduling_info, j.scheduling_info_version, j.serial, j.last_modified, j.validated, j.pools +SELECT j.job_id, j.job_set, j.queue, j.user_id, j.submitted, j.groups, j.priority, j.queued, j.queued_version, j.cancel_requested, j.cancelled, j.cancel_by_jobset_requested, j.succeeded, j.failed, j.submit_message, j.scheduling_info, j.scheduling_info_version, j.serial, j.last_modified, j.validated, j.pools, j.bid_price FROM jobs j WHERE j.queue = ANY($1::text[]) AND j.queued = true @@ -953,6 +959,7 @@ func (q *Queries) SelectQueuedJobsByQueue(ctx context.Context, queue []string) ( &i.LastModified, &i.Validated, &i.Pools, + &i.BidPrice, ); err != nil { return nil, err } @@ -990,7 +997,7 @@ func (q *Queries) SelectRunErrorsById(ctx context.Context, runIds []string) ([]J } const selectRunningJobsByQueue = `-- name: SelectRunningJobsByQueue :many -SELECT j.job_id, j.job_set, j.queue, j.user_id, j.submitted, j.groups, j.priority, j.queued, j.queued_version, j.cancel_requested, j.cancelled, j.cancel_by_jobset_requested, j.succeeded, j.failed, j.submit_message, j.scheduling_info, j.scheduling_info_version, j.serial, j.last_modified, j.validated, j.pools +SELECT j.job_id, j.job_set, j.queue, j.user_id, j.submitted, j.groups, j.priority, j.queued, j.queued_version, j.cancel_requested, j.cancelled, j.cancel_by_jobset_requested, j.succeeded, j.failed, j.submit_message, j.scheduling_info, j.scheduling_info_version, j.serial, j.last_modified, j.validated, j.pools, j.bid_price FROM runs jr JOIN jobs j ON jr.job_id = j.job_id @@ -1034,6 +1041,7 @@ func (q *Queries) SelectRunningJobsByQueue(ctx context.Context, queue []string) &i.LastModified, &i.Validated, &i.Pools, + &i.BidPrice, ); err != nil { return nil, err } @@ -1046,7 +1054,7 @@ func (q *Queries) SelectRunningJobsByQueue(ctx context.Context, queue []string) } const selectUpdatedJobs = `-- name: SelectUpdatedJobs :many -SELECT job_id, job_set, queue, priority, submitted, queued, queued_version, validated, cancel_requested, cancel_by_jobset_requested, cancelled, succeeded, failed, scheduling_info, scheduling_info_version, pools, serial FROM jobs WHERE serial > $1 ORDER BY serial LIMIT $2 +SELECT job_id, job_set, queue, priority, bid_price, submitted, queued, queued_version, validated, cancel_requested, cancel_by_jobset_requested, cancelled, succeeded, failed, scheduling_info, scheduling_info_version, pools, serial FROM jobs WHERE serial > $1 ORDER BY serial LIMIT $2 ` type SelectUpdatedJobsParams struct { @@ -1059,6 +1067,7 @@ type SelectUpdatedJobsRow struct { JobSet string `db:"job_set"` Queue string `db:"queue"` Priority int64 `db:"priority"` + BidPrice float64 `db:"bid_price"` Submitted int64 `db:"submitted"` Queued bool `db:"queued"` QueuedVersion int32 `db:"queued_version"` @@ -1088,6 +1097,7 @@ func (q *Queries) SelectUpdatedJobs(ctx context.Context, arg SelectUpdatedJobsPa &i.JobSet, &i.Queue, &i.Priority, + &i.BidPrice, &i.Submitted, &i.Queued, &i.QueuedVersion, diff --git a/internal/scheduler/database/query/query.sql b/internal/scheduler/database/query/query.sql index ad4da1a55f2..61bfc5284f1 100644 --- a/internal/scheduler/database/query/query.sql +++ b/internal/scheduler/database/query/query.sql @@ -5,10 +5,10 @@ SELECT * FROM jobs WHERE serial > $1 ORDER BY serial LIMIT $2; SELECT job_id FROM jobs; -- name: SelectInitialJobs :many -SELECT job_id, job_set, queue, priority, submitted, queued, queued_version, validated, cancel_requested, cancel_by_jobset_requested, cancelled, succeeded, failed, scheduling_info, scheduling_info_version, pools, serial FROM jobs WHERE serial > $1 AND cancelled = 'false' AND succeeded = 'false' and failed = 'false' ORDER BY serial LIMIT $2; +SELECT job_id, job_set, queue, priority, bid_price, submitted, queued, queued_version, validated, cancel_requested, cancel_by_jobset_requested, cancelled, succeeded, failed, scheduling_info, scheduling_info_version, pools, serial FROM jobs WHERE serial > $1 AND cancelled = 'false' AND succeeded = 'false' and failed = 'false' ORDER BY serial LIMIT $2; -- name: SelectUpdatedJobs :many -SELECT job_id, job_set, queue, priority, submitted, queued, queued_version, validated, cancel_requested, cancel_by_jobset_requested, cancelled, succeeded, failed, scheduling_info, scheduling_info_version, pools, serial FROM jobs WHERE serial > $1 ORDER BY serial LIMIT $2; +SELECT job_id, job_set, queue, priority, bid_price, submitted, queued, queued_version, validated, cancel_requested, cancel_by_jobset_requested, cancelled, succeeded, failed, scheduling_info, scheduling_info_version, pools, serial FROM jobs WHERE serial > $1 ORDER BY serial LIMIT $2; -- name: UpdateJobPriorityByJobSet :exec UPDATE jobs SET priority = $1 WHERE job_set = $2 and queue = $3 and cancelled = false and succeeded = false and failed = false; diff --git a/internal/scheduler/jobdb/comparison.go b/internal/scheduler/jobdb/comparison.go index f8876edef3c..126db44af4d 100644 --- a/internal/scheduler/jobdb/comparison.go +++ b/internal/scheduler/jobdb/comparison.go @@ -1,8 +1,9 @@ package jobdb type ( - JobPriorityComparer struct{} - JobIdHasher struct{} + JobPriorityComparer struct{} + MarketJobPriorityComparer struct{} + JobIdHasher struct{} ) func (JobIdHasher) Hash(j *Job) uint32 { @@ -21,6 +22,10 @@ func (JobPriorityComparer) Compare(job, other *Job) int { return SchedulingOrderCompare(job, other) } +func (MarketJobPriorityComparer) Compare(job, other *Job) int { + return MarketSchedulingOrderCompare(job, other) +} + // SchedulingOrderCompare defines the order in which jobs in a particular queue should be scheduled, func (job *Job) SchedulingOrderCompare(other *Job) int { // We need this cast for now to expose this method via an interface. @@ -93,3 +98,62 @@ func SchedulingOrderCompare(job, other *Job) int { } panic("We should never get here. Since we check for job id equality at the top of this function.") } + +func MarketSchedulingOrderCompare(job, other *Job) int { + // Jobs with equal id are always considered equal. + // This ensures at most one job with a particular id can exist in the jobDb. + if job.id == other.id { + return 0 + } + + // Next we sort on bidPrice + if job.bidPrice > other.bidPrice { + return -1 + } else if job.bidPrice < other.bidPrice { + return 1 + } + + // PriorityClassPriority indicates urgency. + // Hence, jobs of higher priorityClassPriority come first. + if job.priorityClass.Priority > other.priorityClass.Priority { + return -1 + } else if job.priorityClass.Priority < other.priorityClass.Priority { + return 1 + } + + // Jobs higher in queue-priority come first. + if job.priority < other.priority { + return -1 + } else if job.priority > other.priority { + return 1 + } + + // If both jobs are active, order by time since the job was scheduled. + // This ensures jobs that have been running for longer are rescheduled first, + // which reduces wasted compute time when preempting. + jobIsActive := job.activeRun != nil && !job.activeRun.InTerminalState() + otherIsActive := other.activeRun != nil && !other.activeRun.InTerminalState() + if jobIsActive && otherIsActive { + if job.activeRunTimestamp < other.activeRunTimestamp { + return -1 + } else if job.activeRunTimestamp > other.activeRunTimestamp { + return 1 + } + } + + // Jobs that have been queuing for longer are scheduled first. + if job.submittedTime < other.submittedTime { + return -1 + } else if job.submittedTime > other.submittedTime { + return 1 + } + + // Tie-break by jobId, which must be unique. + // This ensures there is a total order between jobs, i.e., no jobs are equal from an ordering point of view. + if job.id < other.id { + return -1 + } else if job.id > other.id { + return 1 + } + panic("We should never get here. Since we check for job id equality at the top of this function.") +} diff --git a/internal/scheduler/jobdb/job.go b/internal/scheduler/jobdb/job.go index f9f295e8d05..a1b264bb62a 100644 --- a/internal/scheduler/jobdb/job.go +++ b/internal/scheduler/jobdb/job.go @@ -32,6 +32,8 @@ type Job struct { jobSet string // Per-queue priority of this job. priority uint32 + // BidPrice the user is willing to pay to have this job scheduled + bidPrice float64 // Requested per queue priority of this job. // This is used when syncing the postgres database with the scheduler-internal database. requestedPriority uint32 @@ -300,6 +302,9 @@ func (job *Job) Equal(other *Job) bool { if job.priority != other.priority { return false } + if job.bidPrice != other.bidPrice { + return false + } if job.requestedPriority != other.requestedPriority { return false } @@ -378,6 +383,11 @@ func (job *Job) Priority() uint32 { return job.priority } +// BidPrice returns the bidPrice of the job. +func (job *Job) BidPrice() float64 { + return job.bidPrice +} + // PriorityClass returns the priority class of the job. func (job *Job) PriorityClass() types.PriorityClass { return job.priorityClass @@ -413,6 +423,13 @@ func (job *Job) WithPriority(priority uint32) *Job { return j } +// WithBidPrice returns a copy of the job with the bidPrice updated. +func (job *Job) WithBidPrice(price float64) *Job { + j := copyJob(*job) + j.bidPrice = price + return j +} + // WithPools returns a copy of the job with the pools updated. func (job *Job) WithPools(pools []string) *Job { j := copyJob(*job) diff --git a/internal/scheduler/jobdb/job_test.go b/internal/scheduler/jobdb/job_test.go index 0cc5ef8bc48..49f195112b5 100644 --- a/internal/scheduler/jobdb/job_test.go +++ b/internal/scheduler/jobdb/job_test.go @@ -39,6 +39,7 @@ var baseJob, _ = jobDb.NewJob( "test-jobSet", "test-queue", 2, + 0.0, jobSchedulingInfo, true, 0, @@ -321,10 +322,10 @@ func TestJob_TestWithCreated(t *testing.T) { } func TestJob_DeepCopy(t *testing.T) { - original, err := jobDb.NewJob("test-job", "test-jobSet", "test-queue", 2, jobSchedulingInfo, true, 0, false, false, false, 3, false, []string{}) + original, err := jobDb.NewJob("test-job", "test-jobSet", "test-queue", 2, 0.0, jobSchedulingInfo, true, 0, false, false, false, 3, false, []string{}) assert.Nil(t, err) original = original.WithUpdatedRun(baseJobRun.DeepCopy()) - expected, err := jobDb.NewJob("test-job", "test-jobSet", "test-queue", 2, jobSchedulingInfo, true, 0, false, false, false, 3, false, []string{}) + expected, err := jobDb.NewJob("test-job", "test-jobSet", "test-queue", 2, 0.0, jobSchedulingInfo, true, 0, false, false, false, 3, false, []string{}) assert.Nil(t, err) expected = expected.WithUpdatedRun(baseJobRun.DeepCopy()) @@ -414,7 +415,7 @@ func TestJobSchedulingInfoFieldsInitialised(t *testing.T) { assert.Nil(t, infoWithNilFields.GetPodRequirements().NodeSelector) assert.Nil(t, infoWithNilFields.GetPodRequirements().Annotations) - job, err := jobDb.NewJob("test-job", "test-jobSet", "test-queue", 2, infoWithNilFieldsCopy, true, 0, false, false, false, 3, false, []string{}) + job, err := jobDb.NewJob("test-job", "test-jobSet", "test-queue", 2, 0.0, infoWithNilFieldsCopy, true, 0, false, false, false, 3, false, []string{}) assert.Nil(t, err) assert.NotNil(t, job.NodeSelector()) assert.NotNil(t, job.Annotations()) diff --git a/internal/scheduler/jobdb/jobdb.go b/internal/scheduler/jobdb/jobdb.go index 2b408304e38..9252aef11ba 100644 --- a/internal/scheduler/jobdb/jobdb.go +++ b/internal/scheduler/jobdb/jobdb.go @@ -19,17 +19,58 @@ import ( "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" ) +type JobSortOrder int + +const ( + PriceOrder JobSortOrder = iota + FairShareOrder +) + type JobIterator interface { Next() (*Job, bool) Done() bool } +type jobQueue struct { + fairShareQueue immutable.SortedSet[*Job] + marketQueue immutable.SortedSet[*Job] +} + +func emptyJobQueue() jobQueue { + return jobQueue{ + fairShareQueue: immutable.NewSortedSet[*Job](JobPriorityComparer{}), + marketQueue: immutable.NewSortedSet[*Job](MarketJobPriorityComparer{}), + } +} + +func (jq jobQueue) add(j *Job) jobQueue { + return jobQueue{ + fairShareQueue: jq.fairShareQueue.Add(j), + marketQueue: jq.marketQueue.Add(j), + } +} + +func (jq jobQueue) delete(j *Job) jobQueue { + return jobQueue{ + fairShareQueue: jq.fairShareQueue.Delete(j), + marketQueue: jq.marketQueue.Delete(j), + } +} + +func (jq jobQueue) has(j *Job) bool { + return jq.fairShareQueue.Has(j) +} + +func (jq jobQueue) len() int { + return jq.marketQueue.Len() +} + var emptyList = immutable.NewSortedSet[*Job](JobPriorityComparer{}) type JobDb struct { jobsById *immutable.Map[string, *Job] jobsByRunId *immutable.Map[string, string] - jobsByQueue map[string]immutable.SortedSet[*Job] + jobsByQueue map[string]jobQueue unvalidatedJobs *immutable.Set[*Job] // Configured priority classes. priorityClasses map[string]types.PriorityClass @@ -92,7 +133,7 @@ func NewJobDbWithSchedulingKeyGenerator( return &JobDb{ jobsById: immutable.NewMap[string, *Job](nil), jobsByRunId: immutable.NewMap[string, string](nil), - jobsByQueue: map[string]immutable.SortedSet[*Job]{}, + jobsByQueue: map[string]jobQueue{}, unvalidatedJobs: &unvalidatedJobs, priorityClasses: priorityClasses, defaultPriorityClass: defaultPriorityClass, @@ -134,6 +175,7 @@ func (jobDb *JobDb) NewJob( jobSet string, queue string, priority uint32, + bidPrice float64, schedulingInfo *schedulerobjects.JobSchedulingInfo, queued bool, queuedVersion int32, @@ -157,6 +199,7 @@ func (jobDb *JobDb) NewJob( queue: jobDb.stringInterner.Intern(queue), jobSet: jobDb.stringInterner.Intern(jobSet), priority: priority, + bidPrice: bidPrice, queued: queued, queuedVersion: queuedVersion, requestedPriority: priority, @@ -257,7 +300,7 @@ type Txn struct { // Note that a job may have multiple runs, i.e., the mapping is many-to-one. jobsByRunId *immutable.Map[string, string] // Queued jobs for each queue. Stored in the order in which they should be scheduled. - jobsByQueue map[string]immutable.SortedSet[*Job] + jobsByQueue map[string]jobQueue // Jobs that require submit checking unvalidatedJobs *immutable.Set[*Job] // The jobDb from which this transaction was created. @@ -302,7 +345,7 @@ func (txn *Txn) Assert(assertOnlyActiveJobs bool) error { if job.Queued() { if queue, ok := txn.jobsByQueue[job.queue]; !ok { return errors.Errorf("jobDb contains queued job %s but there is no sorted set for this queue", job) - } else if !queue.Has(job) { + } else if !queue.has(job) { return errors.Errorf("jobDb contains queued job %s but this job is not in the queue sorted set", job) } } @@ -315,7 +358,7 @@ func (txn *Txn) Assert(assertOnlyActiveJobs bool) error { } } for queue, queueIt := range txn.jobsByQueue { - it := queueIt.Iterator() + it := queueIt.fairShareQueue.Iterator() for { job, ok := it.Next() if !ok { @@ -391,7 +434,7 @@ func (txn *Txn) Upsert(jobs []*Job) error { if ok { existingQueue, ok := txn.jobsByQueue[existingJob.queue] if ok { - txn.jobsByQueue[existingJob.queue] = existingQueue.Delete(existingJob) + txn.jobsByQueue[existingJob.queue] = existingQueue.delete(existingJob) } if !existingJob.Validated() { newUnvalidatedJobs := txn.unvalidatedJobs.Delete(existingJob) @@ -449,10 +492,9 @@ func (txn *Txn) Upsert(jobs []*Job) error { if job.Queued() { newQueue, ok := txn.jobsByQueue[job.queue] if !ok { - q := emptyList - newQueue = q + newQueue = emptyJobQueue() } - newQueue = newQueue.Add(job) + newQueue = newQueue.add(job) txn.jobsByQueue[job.queue] = newQueue } } @@ -493,17 +535,20 @@ func (txn *Txn) HasQueuedJobs(queue string) bool { if !ok { return false } - return queuedJobs.Len() > 0 + return queuedJobs.len() > 0 } -// QueuedJobs returns true if the queue has any jobs in the running state or false otherwise -func (txn *Txn) QueuedJobs(queue string) JobIterator { +// QueuedJobs returns an iterator over all queued jobs ordered for fair share shceduling +func (txn *Txn) QueuedJobs(queue string, sortOrder JobSortOrder) JobIterator { jobQueue, ok := txn.jobsByQueue[queue] - if ok { - return jobQueue.Iterator() - } else { + if !ok { return emptyList.Iterator() } + if sortOrder == FairShareOrder { + return jobQueue.fairShareQueue.Iterator() + } else { + return jobQueue.marketQueue.Iterator() + } } // UnvalidatedJobs returns an iterator for jobs that have not yet been validated @@ -545,7 +590,7 @@ func (txn *Txn) delete(jobId string) { } queue, ok := txn.jobsByQueue[job.queue] if ok { - newQueue := queue.Delete(job) + newQueue := queue.delete(job) txn.jobsByQueue[job.queue] = newQueue } newUnvalidatedJobs := txn.unvalidatedJobs.Delete(job) diff --git a/internal/scheduler/jobdb/jobdb_test.go b/internal/scheduler/jobdb/jobdb_test.go index e8a88ab7ab0..125d8efdf8f 100644 --- a/internal/scheduler/jobdb/jobdb_test.go +++ b/internal/scheduler/jobdb/jobdb_test.go @@ -129,6 +129,35 @@ func TestJobDb_TestHasQueuedJobs(t *testing.T) { assert.False(t, txn.HasQueuedJobs("non-existent-queue")) } +func TestJobDb_TestQueuedJobsWithPriceOrdering(t *testing.T) { + a := newJob().WithQueued(true).WithBidPrice(100.0).WithSubmittedTime(4) + b := newJob().WithQueued(true).WithBidPrice(99.0).WithSubmittedTime(3) + c := newJob().WithQueued(true).WithBidPrice(101.0).WithSubmittedTime(2) + d := newJob().WithQueued(true).WithBidPrice(100.0).WithSubmittedTime(1) + + txn := jobDb.WriteTxn() + err := txn.Upsert([]*Job{a, b, c, d}) + require.NoError(t, err) + + iter := txn.QueuedJobs("test-queue", PriceOrder) + + job, ok := iter.Next() + require.True(t, ok) + assert.Equal(t, c, job) + + job, ok = iter.Next() + require.True(t, ok) + assert.Equal(t, d, job) + + job, ok = iter.Next() + require.True(t, ok) + assert.Equal(t, a, job) + + job, ok = iter.Next() + require.True(t, ok) + assert.Equal(t, b, job) +} + func TestJobDb_TestQueuedJobs(t *testing.T) { jobDb := NewTestJobDb() jobs := make([]*Job, 10) @@ -145,7 +174,7 @@ func TestJobDb_TestQueuedJobs(t *testing.T) { require.NoError(t, err) collect := func() []*Job { retrieved := make([]*Job, 0) - iter := txn.QueuedJobs(jobs[0].Queue()) + iter := txn.QueuedJobs(jobs[0].Queue(), FairShareOrder) for !iter.Done() { j, _ := iter.Next() retrieved = append(retrieved, j) @@ -268,7 +297,7 @@ func TestJobDb_SchedulingKeyIsPopulated(t *testing.T) { }, } jobDb := NewTestJobDb() - job, err := jobDb.NewJob("jobId", "jobSet", "queue", 1, jobSchedulingInfo, false, 0, false, false, false, 2, false, []string{}) + job, err := jobDb.NewJob("jobId", "jobSet", "queue", 1, 0.0, jobSchedulingInfo, false, 0, false, false, false, 2, false, []string{}) assert.Nil(t, err) assert.Equal(t, SchedulingKeyFromJob(jobDb.schedulingKeyGenerator, job), job.SchedulingKey()) } diff --git a/internal/scheduler/jobdb/reconciliation.go b/internal/scheduler/jobdb/reconciliation.go index 37a8c0e3193..0d275b0072a 100644 --- a/internal/scheduler/jobdb/reconciliation.go +++ b/internal/scheduler/jobdb/reconciliation.go @@ -260,6 +260,7 @@ func (jobDb *JobDb) schedulerJobFromDatabaseJob(dbJob *database.Job) (*Job, erro dbJob.JobSet, dbJob.Queue, uint32(dbJob.Priority), + dbJob.BidPrice, schedulingInfo, dbJob.Queued, dbJob.QueuedVersion, diff --git a/internal/scheduler/metrics/cycle_metrics.go b/internal/scheduler/metrics/cycle_metrics.go index c2f32ad60e7..5c03b07d075 100644 --- a/internal/scheduler/metrics/cycle_metrics.go +++ b/internal/scheduler/metrics/cycle_metrics.go @@ -37,6 +37,7 @@ type cycleMetrics struct { loopNumber *prometheus.GaugeVec evictedJobs *prometheus.GaugeVec evictedResources *prometheus.GaugeVec + spotPrice *prometheus.GaugeVec allResettableMetrics []resettableMetric } @@ -193,6 +194,14 @@ func newCycleMetrics() *cycleMetrics { poolQueueAndResourceLabels, ) + spotPrice := prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: prefix + "spot_price", + Help: "spot price for given pool", + }, + poolLabels, + ) + return &cycleMetrics{ leaderMetricsEnabled: true, scheduledJobs: scheduledJobs, @@ -213,6 +222,7 @@ func newCycleMetrics() *cycleMetrics { loopNumber: loopNumber, evictedJobs: evictedJobs, evictedResources: evictedResources, + spotPrice: spotPrice, allResettableMetrics: []resettableMetric{ scheduledJobs, premptedJobs, @@ -231,6 +241,7 @@ func newCycleMetrics() *cycleMetrics { loopNumber, evictedJobs, evictedResources, + spotPrice, }, reconciliationCycleTime: reconciliationCycleTime, } @@ -277,6 +288,7 @@ func (m *cycleMetrics) ReportSchedulerResult(result scheduling.SchedulerResult) m.cappedDemand.WithLabelValues(pool, queue).Set(cappedDemand) } m.fairnessError.WithLabelValues(pool).Set(schedContext.FairnessError()) + m.spotPrice.WithLabelValues(pool).Set(schedContext.SpotPrice) } for _, jobCtx := range result.ScheduledJobs { diff --git a/internal/scheduler/scheduler_test.go b/internal/scheduler/scheduler_test.go index 95dbab6fc8e..f369680d24c 100644 --- a/internal/scheduler/scheduler_test.go +++ b/internal/scheduler/scheduler_test.go @@ -122,6 +122,7 @@ var queuedJob = testfixtures.NewJob( "testJobset", "testQueue", uint32(10), + 0.0, schedulingInfo, true, 0, @@ -137,6 +138,7 @@ var leasedJob = testfixtures.NewJob( "testJobset", "testQueue", 0, + 0.0, schedulingInfo, false, 1, @@ -152,6 +154,7 @@ var preemptibleLeasedJob = testfixtures.NewJob( "testJobset", "testQueue", 0, + 0.0, preemptibleSchedulingInfo, false, 1, @@ -167,6 +170,7 @@ var cancelledJob = testfixtures.NewJob( "testJobset", "testQueue", 0, + 0.0, schedulingInfo, false, 1, @@ -182,6 +186,7 @@ var returnedOnceLeasedJob = testfixtures.NewJob( "testJobset", "testQueue", uint32(10), + 0.0, schedulingInfo, false, 3, @@ -240,6 +245,7 @@ var leasedFailFastJob = testfixtures.NewJob( "testJobset", "testQueue", uint32(10), + 0.0, failFastSchedulingInfo, false, 1, @@ -262,6 +268,7 @@ var ( "testJobset", "testQueue", uint32(10), + 0.0, schedulingInfo, true, 2, @@ -1783,6 +1790,7 @@ func jobDbJobFromDbJob(resourceListFactory *internaltypes.ResourceListFactory, j job.JobSet, job.Queue, uint32(job.Priority), + job.BidPrice, &schedulingInfo, job.Queued, job.QueuedVersion, diff --git a/internal/scheduler/scheduling/context/scheduling.go b/internal/scheduler/scheduling/context/scheduling.go index 284f543cfd9..8e62158f65c 100644 --- a/internal/scheduler/scheduling/context/scheduling.go +++ b/internal/scheduler/scheduling/context/scheduling.go @@ -60,6 +60,7 @@ type SchedulingContext struct { // Used to immediately reject new jobs with identical reqirements. // Maps to the JobSchedulingContext of a previous job attempted to schedule with the same key. UnfeasibleSchedulingKeys map[schedulerobjects.SchedulingKey]*JobSchedulingContext + SpotPrice float64 } func NewSchedulingContext( diff --git a/internal/scheduler/scheduling/jobiteration.go b/internal/scheduler/scheduling/jobiteration.go index 88a3f8e7818..b5f5476ed87 100644 --- a/internal/scheduler/scheduling/jobiteration.go +++ b/internal/scheduler/scheduling/jobiteration.go @@ -16,7 +16,7 @@ type JobContextIterator interface { } type JobRepository interface { - QueuedJobs(queueName string) jobdb.JobIterator + QueuedJobs(queueName string, order jobdb.JobSortOrder) jobdb.JobIterator GetById(id string) *jobdb.Job } @@ -44,15 +44,17 @@ type InMemoryJobRepository struct { jctxsByQueue map[string][]*schedulercontext.JobSchedulingContext jctxsById map[string]*schedulercontext.JobSchedulingContext currentPool string + sortOrder func(a, b *jobdb.Job) int // Protects the above fields. mu sync.Mutex } -func NewInMemoryJobRepository(pool string) *InMemoryJobRepository { +func NewInMemoryJobRepository(pool string, sortOrder func(a, b *jobdb.Job) int) *InMemoryJobRepository { return &InMemoryJobRepository{ currentPool: pool, jctxsByQueue: make(map[string][]*schedulercontext.JobSchedulingContext), jctxsById: make(map[string]*schedulercontext.JobSchedulingContext), + sortOrder: sortOrder, } } @@ -77,7 +79,7 @@ func (repo *InMemoryJobRepository) EnqueueMany(jctxs []*schedulercontext.JobSche // sortQueue sorts jobs in a specified queue by the order in which they should be scheduled. func (repo *InMemoryJobRepository) sortQueue(queue string) { slices.SortFunc(repo.jctxsByQueue[queue], func(a, b *schedulercontext.JobSchedulingContext) int { - return a.Job.SchedulingOrderCompare(b.Job) + return repo.sortOrder(a.Job, b.Job) }) } @@ -115,9 +117,9 @@ type QueuedJobsIterator struct { ctx *armadacontext.Context } -func NewQueuedJobsIterator(ctx *armadacontext.Context, queue string, pool string, repo JobRepository) *QueuedJobsIterator { +func NewQueuedJobsIterator(ctx *armadacontext.Context, queue string, pool string, repo JobRepository, order jobdb.JobSortOrder) *QueuedJobsIterator { return &QueuedJobsIterator{ - jobIter: repo.QueuedJobs(queue), + jobIter: repo.QueuedJobs(queue, order), pool: pool, ctx: ctx, } @@ -167,3 +169,66 @@ func (it *MultiJobsIterator) Next() (*schedulercontext.JobSchedulingContext, err return v, err } } + +// MarketDrivenMultiJobsIterator combines two iterators by price +type MarketDrivenMultiJobsIterator struct { + it1 JobContextIterator + it2 JobContextIterator + + // TODO: ideally we add peek() to JobContextIterator and remove these + it1Value *schedulercontext.JobSchedulingContext + it2Value *schedulercontext.JobSchedulingContext +} + +func NewMarketDrivenMultiJobsIterator(it1, it2 JobContextIterator) *MarketDrivenMultiJobsIterator { + return &MarketDrivenMultiJobsIterator{ + it1: it1, + it2: it2, + } +} + +func (it *MarketDrivenMultiJobsIterator) Next() (*schedulercontext.JobSchedulingContext, error) { + if it.it1Value == nil { + j, err := it.it1.Next() + if err != nil { + return nil, err + } + it.it1Value = j + } + + if it.it2Value == nil { + j, err := it.it2.Next() + if err != nil { + return nil, err + } + it.it2Value = j + } + + j1 := it.it1Value + j2 := it.it2Value + // Both iterators active. + if it.it1Value != nil && j2 != nil { + if (jobdb.MarketSchedulingOrderCompare(j1.Job, j2.Job)) < 0 { + it.it1Value = nil + return j1, nil + } else { + it.it2Value = nil + return j2, nil + } + } + + // Only first iterator has job + if j1 != nil { + it.it1Value = nil + return j1, nil + } + + // Only second iterator has job + if j2 != nil { + it.it2Value = nil + return j2, nil + } + + // If we get to here then both iterators exhausted + return nil, nil +} diff --git a/internal/scheduler/scheduling/jobiteration_test.go b/internal/scheduler/scheduling/jobiteration_test.go index 060cb6d70bb..f74444ab1b2 100644 --- a/internal/scheduler/scheduling/jobiteration_test.go +++ b/internal/scheduler/scheduling/jobiteration_test.go @@ -29,7 +29,7 @@ func TestInMemoryJobRepository(t *testing.T) { for i, job := range jobs { jctxs[i] = &schedulercontext.JobSchedulingContext{Job: job, KubernetesResourceRequirements: job.KubernetesResourceRequirements()} } - repo := NewInMemoryJobRepository(testfixtures.TestPool) + repo := NewInMemoryJobRepository(testfixtures.TestPool, jobdb.SchedulingOrderCompare) repo.EnqueueMany(jctxs) expected := []*jobdb.Job{ jobs[4], jobs[1], jobs[2], jobs[0], jobs[5], jobs[3], @@ -64,7 +64,7 @@ func TestMultiJobsIterator_TwoQueues(t *testing.T) { ctx := armadacontext.Background() its := make([]JobContextIterator, 3) for i, queue := range []string{"A", "B", "C"} { - it := NewQueuedJobsIterator(ctx, queue, testfixtures.TestPool, repo) + it := NewQueuedJobsIterator(ctx, queue, testfixtures.TestPool, repo, jobdb.FairShareOrder) its[i] = it } it := NewMultiJobsIterator(its...) @@ -93,7 +93,7 @@ func TestQueuedJobsIterator_OneQueue(t *testing.T) { expected = append(expected, job.Id()) } ctx := armadacontext.Background() - it := NewQueuedJobsIterator(ctx, "A", testfixtures.TestPool, repo) + it := NewQueuedJobsIterator(ctx, "A", testfixtures.TestPool, repo, jobdb.FairShareOrder) actual := make([]string, 0) for { jctx, err := it.Next() @@ -115,7 +115,7 @@ func TestQueuedJobsIterator_ExceedsBufferSize(t *testing.T) { expected = append(expected, job.Id()) } ctx := armadacontext.Background() - it := NewQueuedJobsIterator(ctx, "A", testfixtures.TestPool, repo) + it := NewQueuedJobsIterator(ctx, "A", testfixtures.TestPool, repo, jobdb.FairShareOrder) actual := make([]string, 0) for { jctx, err := it.Next() @@ -137,7 +137,7 @@ func TestQueuedJobsIterator_ManyJobs(t *testing.T) { expected = append(expected, job.Id()) } ctx := armadacontext.Background() - it := NewQueuedJobsIterator(ctx, "A", testfixtures.TestPool, repo) + it := NewQueuedJobsIterator(ctx, "A", testfixtures.TestPool, repo, jobdb.FairShareOrder) actual := make([]string, 0) for { jctx, err := it.Next() @@ -164,7 +164,7 @@ func TestCreateQueuedJobsIterator_TwoQueues(t *testing.T) { repo.Enqueue(job) } ctx := armadacontext.Background() - it := NewQueuedJobsIterator(ctx, "A", testfixtures.TestPool, repo) + it := NewQueuedJobsIterator(ctx, "A", testfixtures.TestPool, repo, jobdb.FairShareOrder) actual := make([]string, 0) for { jctx, err := it.Next() @@ -187,7 +187,7 @@ func TestCreateQueuedJobsIterator_RespectsTimeout(t *testing.T) { ctx, cancel := armadacontext.WithTimeout(armadacontext.Background(), time.Millisecond) time.Sleep(20 * time.Millisecond) defer cancel() - it := NewQueuedJobsIterator(ctx, "A", testfixtures.TestPool, repo) + it := NewQueuedJobsIterator(ctx, "A", testfixtures.TestPool, repo, jobdb.FairShareOrder) job, err := it.Next() assert.Nil(t, job) assert.ErrorIs(t, err, context.DeadlineExceeded) @@ -205,7 +205,7 @@ func TestCreateQueuedJobsIterator_NilOnEmpty(t *testing.T) { repo.Enqueue(job) } ctx := armadacontext.Background() - it := NewQueuedJobsIterator(ctx, "A", testfixtures.TestPool, repo) + it := NewQueuedJobsIterator(ctx, "A", testfixtures.TestPool, repo, jobdb.FairShareOrder) for job, err := it.Next(); job != nil; job, err = it.Next() { require.NoError(t, err) } @@ -237,7 +237,7 @@ type mockJobRepository struct { jobsById map[string]*jobdb.Job } -func (repo *mockJobRepository) QueuedJobs(queueName string) jobdb.JobIterator { +func (repo *mockJobRepository) QueuedJobs(queueName string, _ jobdb.JobSortOrder) jobdb.JobIterator { q := repo.jobsByQueue[queueName] return &mockJobIterator{jobs: q} } @@ -266,7 +266,7 @@ func (repo *mockJobRepository) Enqueue(job *jobdb.Job) { } func (repo *mockJobRepository) GetJobIterator(ctx *armadacontext.Context, queue string) JobContextIterator { - return NewQueuedJobsIterator(ctx, queue, testfixtures.TestPool, repo) + return NewQueuedJobsIterator(ctx, queue, testfixtures.TestPool, repo, jobdb.FairShareOrder) } func jobFromPodSpec(queue string, req *schedulerobjects.PodRequirements) *jobdb.Job { diff --git a/internal/scheduler/scheduling/marketPriorityQueue.go b/internal/scheduler/scheduling/marketPriorityQueue.go new file mode 100644 index 00000000000..cf3cd866bc1 --- /dev/null +++ b/internal/scheduler/scheduling/marketPriorityQueue.go @@ -0,0 +1,214 @@ +package scheduling + +import ( + "container/heap" + "time" + + "github.com/armadaproject/armada/internal/scheduler/internaltypes" + schedulercontext "github.com/armadaproject/armada/internal/scheduler/scheduling/context" + "github.com/armadaproject/armada/internal/scheduler/scheduling/fairness" +) + +type MarketIteratorPQ struct { + items []*MarketIteratorPQItem +} + +type MarketBasedCandidateGangIterator struct { + pool string + queueRepository fairness.QueueRepository + // If true, this iterator only yields gangs where all jobs are evicted. + onlyYieldEvicted bool + // If, e.g., onlyYieldEvictedByQueue["A"] is true, + // this iterator only yields gangs where all jobs are evicted for queue A. + onlyYieldEvictedByQueue map[string]bool + // Priority queue containing per-queue iterators. + // Determines the order in which queues are processed. + pq MarketIteratorPQ +} + +func NewMarketCandidateGangIterator( + pool string, + queueRepository fairness.QueueRepository, + iteratorsByQueue map[string]*QueuedGangIterator, +) (*MarketBasedCandidateGangIterator, error) { + it := &MarketBasedCandidateGangIterator{ + pool: pool, + queueRepository: queueRepository, + onlyYieldEvictedByQueue: make(map[string]bool), + pq: MarketIteratorPQ{ + items: make([]*MarketIteratorPQItem, 0, len(iteratorsByQueue)), + }, + } + for queue, queueIt := range iteratorsByQueue { + if _, err := it.updateAndPushPQItem(it.newPQItem(queue, queueIt)); err != nil { + return nil, err + } + } + return it, nil +} + +func (it *MarketBasedCandidateGangIterator) newPQItem(queue string, queueIt *QueuedGangIterator) *MarketIteratorPQItem { + return &MarketIteratorPQItem{ + queue: queue, + it: queueIt, + } +} + +func (it *MarketBasedCandidateGangIterator) GetAllocationForQueue(queue string) (internaltypes.ResourceList, bool) { + q, ok := it.queueRepository.GetQueue(queue) + if !ok { + return internaltypes.ResourceList{}, false + } + return q.GetAllocation(), true +} + +// Clear removes the first item in the iterator. +// If it.onlyYieldEvicted is true, any consecutive non-evicted jobs are also removed. +func (it *MarketBasedCandidateGangIterator) Clear() error { + if it.pq.Len() == 0 { + return nil + } + item := heap.Pop(&it.pq).(*MarketIteratorPQItem) + if err := item.it.Clear(); err != nil { + return err + } + if _, err := it.updateAndPushPQItem(item); err != nil { + return err + } + + // If set to only yield evicted gangs, drop any queues for which the next gang is non-evicted here. + // We assume here that all evicted jobs appear before non-evicted jobs in the queue. + // Hence, it's safe to drop a queue if the first job is non-evicted. + if it.onlyYieldEvicted { + for it.pq.Len() > 0 && !it.pq.items[0].gctx.AllJobsEvicted { + heap.Pop(&it.pq) + } + } else { + // Same check as above on a per-queue basis. + for it.pq.Len() > 0 && it.onlyYieldEvictedByQueue[it.pq.items[0].gctx.Queue] && !it.pq.items[0].gctx.AllJobsEvicted { + heap.Pop(&it.pq) + } + } + return nil +} + +func (it *MarketBasedCandidateGangIterator) Peek() (*schedulercontext.GangSchedulingContext, float64, error) { + if it.pq.Len() == 0 { + // No queued jobs left. + return nil, 0.0, nil + } + first := it.pq.items[0] + + // The second return value here is the queue cost which we don't have here. + // This is only used for metrics so should be fine. + return first.gctx, 0.0, nil +} + +func (it *MarketBasedCandidateGangIterator) updateAndPushPQItem(item *MarketIteratorPQItem) (bool, error) { + if err := it.updatePQItem(item); err != nil { + return false, err + } + if item.gctx == nil { + return false, nil + } + if it.onlyYieldEvicted && !item.gctx.AllJobsEvicted { + return false, nil + } + if it.onlyYieldEvictedByQueue[item.gctx.Queue] && !item.gctx.AllJobsEvicted { + return false, nil + } + heap.Push(&it.pq, item) + return true, nil +} + +func (it *MarketBasedCandidateGangIterator) updatePQItem(item *MarketIteratorPQItem) error { + item.gctx = nil + item.price = 0 + gctx, err := item.it.Peek() + if err != nil { + return err + } + if gctx == nil { + return nil + } + + job := gctx.JobSchedulingContexts[0].Job + item.gctx = gctx + item.price = job.BidPrice() + if !job.Queued() && job.LatestRun() != nil { + item.runtime = time.Now().UnixNano() - job.LatestRun().Created() + } else { + item.runtime = 0 + } + item.submittedTime = job.SubmitTime().UnixNano() + + return nil +} + +func (it *MarketBasedCandidateGangIterator) OnlyYieldEvicted() { + it.onlyYieldEvicted = true +} + +func (it *MarketBasedCandidateGangIterator) OnlyYieldEvictedForQueue(queue string) { + it.onlyYieldEvictedByQueue[queue] = true +} + +type MarketIteratorPQItem struct { + queue string + price float64 + runtime int64 + submittedTime int64 + // Most recent value produced by the iterator. + // Cached here to avoid repeating scheduling checks unnecessarily. + gctx *schedulercontext.GangSchedulingContext + // Iterator for this queue. + it *QueuedGangIterator + // The index of the item in the heap. + // maintained by the heap.Interface methods. + index int +} + +func (pq *MarketIteratorPQ) Len() int { return len(pq.items) } + +func (pq *MarketIteratorPQ) Less(i, j int) bool { + // First by price + if pq.items[i].price != pq.items[j].price { + return pq.items[i].price > pq.items[j].price + } + + // Then by runtime (highest first) + if pq.items[i].runtime != pq.items[j].runtime { + return pq.items[i].runtime > pq.items[j].runtime + } + + // Then by submitted time (lowest first) + if pq.items[i].submittedTime != pq.items[j].submittedTime { + return pq.items[i].submittedTime < pq.items[j].submittedTime + } + + // Tie-break by queue name. + return pq.items[i].queue < pq.items[j].queue +} + +func (pq *MarketIteratorPQ) Swap(i, j int) { + pq.items[i], pq.items[j] = pq.items[j], pq.items[i] + pq.items[i].index = i + pq.items[j].index = j +} + +func (pq *MarketIteratorPQ) Push(x any) { + n := pq.Len() + item := x.(*MarketIteratorPQItem) + item.index = n + pq.items = append(pq.items, item) +} + +func (pq *MarketIteratorPQ) Pop() any { + old := pq.items + n := len(old) + item := old[n-1] + old[n-1] = nil // avoid memory leak + item.index = -1 // for safety + pq.items = old[0 : n-1] + return item +} diff --git a/internal/scheduler/scheduling/market_driven_preempting_queue_scheduler_test.go b/internal/scheduler/scheduling/market_driven_preempting_queue_scheduler_test.go new file mode 100644 index 00000000000..2a63f5ec6ea --- /dev/null +++ b/internal/scheduler/scheduling/market_driven_preempting_queue_scheduler_test.go @@ -0,0 +1,654 @@ +package scheduling + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" + "golang.org/x/time/rate" + + "github.com/armadaproject/armada/internal/common/armadacontext" + armadamaps "github.com/armadaproject/armada/internal/common/maps" + armadaslices "github.com/armadaproject/armada/internal/common/slices" + "github.com/armadaproject/armada/internal/common/stringinterner" + "github.com/armadaproject/armada/internal/common/types" + "github.com/armadaproject/armada/internal/scheduler/configuration" + "github.com/armadaproject/armada/internal/scheduler/internaltypes" + "github.com/armadaproject/armada/internal/scheduler/jobdb" + "github.com/armadaproject/armada/internal/scheduler/nodedb" + "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" + schedulerconstraints "github.com/armadaproject/armada/internal/scheduler/scheduling/constraints" + "github.com/armadaproject/armada/internal/scheduler/scheduling/context" + "github.com/armadaproject/armada/internal/scheduler/scheduling/fairness" + "github.com/armadaproject/armada/internal/scheduler/testfixtures" + "github.com/armadaproject/armada/pkg/api" +) + +func TestMarketDrivenPreemptingQueueScheduler(t *testing.T) { + type SchedulingRound struct { + // Map from queue name to pod requirements for that queue. + JobsByQueue map[string][]*jobdb.Job + // For each queue, indices of jobs expected to be scheduled. + ExpectedScheduledIndices map[string][]int + // For each queue, indices of jobs expected to be preempted. + // E.g., ExpectedPreemptedIndices["A"][0] is the indices of jobs declared for queue A in round 0. + ExpectedPreemptedIndices map[string]map[int][]int + // For each queue, indices of jobs to unbind before scheduling, to, simulate jobs terminating. + // E.g., IndicesToUnbind["A"][0] is the indices of jobs declared for queue A in round 0. + IndicesToUnbind map[string]map[int][]int + // Indices of nodes that should be cordoned before scheduling. + NodeIndicesToCordon []int + } + tests := map[string]struct { + SchedulingConfig configuration.SchedulingConfig + // Nodes to be considered by the scheduler. + Nodes []*schedulerobjects.Node + // Each item corresponds to a call to Reschedule(). + Rounds []SchedulingRound + // Map from queue to the priority factor associated with that queue. + PriorityFactorByQueue map[string]float64 + // Map of nodeId to jobs running on those nodes + InitialRunningJobs map[int][]*jobdb.Job + }{ + "three users, highest price jobs from single queue get on": { + SchedulingConfig: testfixtures.TestSchedulingConfig(), + Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), + Rounds: []SchedulingRound{ + { + JobsByQueue: map[string][]*jobdb.Job{ + "A": testfixtures.N1Cpu4GiJobsWithPrice("A", 100.0, 32), + "B": testfixtures.N1Cpu4GiJobsWithPrice("B", 101.0, 32), + "C": testfixtures.N1Cpu4GiJobsWithPrice("C", 99.0, 32), + }, + ExpectedScheduledIndices: map[string][]int{ + "B": testfixtures.IntRange(0, 31), + }, + }, + { + // The system should be in steady-state; nothing should be scheduled/preempted. + JobsByQueue: map[string][]*jobdb.Job{ + "A": testfixtures.N1Cpu4GiJobsWithPrice("A", 100.0, 32), + "C": testfixtures.N1Cpu4GiJobsWithPrice("C", 99.0, 32), + }, + }, + }, + PriorityFactorByQueue: map[string]float64{"A": 1, "B": 1, "C": 1}, + }, + "three users, highest price jobs between queues get on": { + SchedulingConfig: testfixtures.TestSchedulingConfig(), + Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), + Rounds: []SchedulingRound{ + { + JobsByQueue: map[string][]*jobdb.Job{ + "A": append( + testfixtures.N1Cpu4GiJobsWithPrice("A", 100.0, 11), + testfixtures.N1Cpu4GiJobsWithPrice("A", 99.0, 21)..., + ), + "B": append( + testfixtures.N1Cpu4GiJobsWithPrice("B", 100.0, 11), + testfixtures.N1Cpu4GiJobsWithPrice("B", 99.0, 21)..., + ), + "C": append( + testfixtures.N1Cpu4GiJobsWithPrice("C", 100.0, 11), + testfixtures.N1Cpu4GiJobsWithPrice("C", 99.0, 21)..., + ), + }, + ExpectedScheduledIndices: map[string][]int{ + "A": testfixtures.IntRange(0, 10), + "B": testfixtures.IntRange(0, 10), + "C": testfixtures.IntRange(0, 9), + }, + }, + { + // The system should be in steady-state; nothing should be scheduled/preempted. + JobsByQueue: map[string][]*jobdb.Job{ + "A": testfixtures.N1Cpu4GiJobsWithPrice("A", 99.0, 21), + "B": testfixtures.N1Cpu4GiJobsWithPrice("B", 99.0, 21), + "C": testfixtures.N1Cpu4GiJobsWithPrice("C", 99.0, 21), + }, + }, + }, + PriorityFactorByQueue: map[string]float64{"A": 1, "B": 1, "C": 1}, + }, + "Two users, no preemption if price lower": { + SchedulingConfig: testfixtures.TestSchedulingConfig(), + Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), + Rounds: []SchedulingRound{ + { + JobsByQueue: map[string][]*jobdb.Job{ + "A": testfixtures.N1Cpu4GiJobsWithPrice("A", 100.0, 32), + }, + ExpectedScheduledIndices: map[string][]int{ + "A": testfixtures.IntRange(0, 31), + }, + }, + { + JobsByQueue: map[string][]*jobdb.Job{ + "B": testfixtures.N1Cpu4GiJobsWithPrice("B", 99.0, 32), + }, + }, + }, + PriorityFactorByQueue: map[string]float64{"A": 1, "B": 1}, + }, + "Two users, preemption if price higher": { + SchedulingConfig: testfixtures.TestSchedulingConfig(), + Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), + Rounds: []SchedulingRound{ + { + JobsByQueue: map[string][]*jobdb.Job{ + "A": testfixtures.N1Cpu4GiJobsWithPrice("A", 100.0, 32), + }, + ExpectedScheduledIndices: map[string][]int{ + "A": testfixtures.IntRange(0, 31), + }, + }, + { + JobsByQueue: map[string][]*jobdb.Job{ + "B": testfixtures.N1Cpu4GiJobsWithPrice("B", 101.0, 32), + }, + ExpectedScheduledIndices: map[string][]int{ + "B": testfixtures.IntRange(0, 31), + }, + ExpectedPreemptedIndices: map[string]map[int][]int{ + "A": { + 0: testfixtures.IntRange(0, 31), + }, + }, + }, + }, + PriorityFactorByQueue: map[string]float64{"A": 1, "B": 1}, + }, + "Two users, partial preemption if price higher": { + SchedulingConfig: testfixtures.TestSchedulingConfig(), + Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), + Rounds: []SchedulingRound{ + { + JobsByQueue: map[string][]*jobdb.Job{ + "A": testfixtures.N1Cpu4GiJobsWithPrice("A", 100.0, 32), + }, + ExpectedScheduledIndices: map[string][]int{ + "A": testfixtures.IntRange(0, 31), + }, + }, + { + JobsByQueue: map[string][]*jobdb.Job{ + "B": append( + testfixtures.N1Cpu4GiJobsWithPrice("B", 99.0, 16), + testfixtures.N1Cpu4GiJobsWithPrice("B", 101.0, 16)..., + ), + }, + ExpectedScheduledIndices: map[string][]int{ + "B": testfixtures.IntRange(16, 31), + }, + ExpectedPreemptedIndices: map[string]map[int][]int{ + "A": { + 0: testfixtures.IntRange(16, 31), + }, + }, + }, + }, + PriorityFactorByQueue: map[string]float64{"A": 1, "B": 1}, + }, + "Self Preemption If Price Is Higher": { + SchedulingConfig: testfixtures.TestSchedulingConfig(), + Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), + Rounds: []SchedulingRound{ + { + JobsByQueue: map[string][]*jobdb.Job{ + "A": testfixtures.N1Cpu4GiJobsWithPrice("A", 100.0, 32), + }, + ExpectedScheduledIndices: map[string][]int{ + "A": testfixtures.IntRange(0, 31), + }, + }, + { + JobsByQueue: map[string][]*jobdb.Job{ + "A": append( + testfixtures.N1Cpu4GiJobsWithPrice("A", 99.0, 16), + testfixtures.N1Cpu4GiJobsWithPrice("A", 101.0, 16)..., + ), + }, + ExpectedScheduledIndices: map[string][]int{ + "A": testfixtures.IntRange(16, 31), + }, + ExpectedPreemptedIndices: map[string]map[int][]int{ + "A": { + 0: testfixtures.IntRange(16, 31), + }, + }, + }, + }, + PriorityFactorByQueue: map[string]float64{"A": 1}, + }, + "Two Users. Self preemption plus cross user preemption": { + SchedulingConfig: testfixtures.TestSchedulingConfig(), + Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), + Rounds: []SchedulingRound{ + { + JobsByQueue: map[string][]*jobdb.Job{ + "A": testfixtures.N1Cpu4GiJobsWithPrice("A", 100.0, 32), + }, + ExpectedScheduledIndices: map[string][]int{ + "A": testfixtures.IntRange(0, 31), + }, + }, + { + JobsByQueue: map[string][]*jobdb.Job{ + "A": testfixtures.N1Cpu4GiJobsWithPrice("A", 102.0, 16), + "B": testfixtures.N1Cpu4GiJobsWithPrice("B", 101.0, 32), + }, + ExpectedScheduledIndices: map[string][]int{ + "A": testfixtures.IntRange(0, 15), + "B": testfixtures.IntRange(0, 15), + }, + ExpectedPreemptedIndices: map[string]map[int][]int{ + "A": { + 0: testfixtures.IntRange(0, 31), + }, + }, + }, + }, + PriorityFactorByQueue: map[string]float64{"A": 1, "B": 1}, + }, + "gang preemption": { + SchedulingConfig: testfixtures.TestSchedulingConfig(), + Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), + Rounds: []SchedulingRound{ + { + JobsByQueue: map[string][]*jobdb.Job{ + "A": testfixtures.N1Cpu4GiJobsWithPrice("A", 100.0, 16), + "B": testfixtures.N1Cpu4GiJobsWithPrice("B", 100.0, 16), + }, + ExpectedScheduledIndices: map[string][]int{ + "A": testfixtures.IntRange(0, 15), + "B": testfixtures.IntRange(0, 15), + }, + }, + { + // Schedule a gang filling the remaining space on both nodes. + JobsByQueue: map[string][]*jobdb.Job{ + "C": testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobsWithPrice("C", 101.0, 32)), + }, + ExpectedScheduledIndices: map[string][]int{ + "C": testfixtures.IntRange(0, 31), + }, + ExpectedPreemptedIndices: map[string]map[int][]int{ + "A": { + 0: testfixtures.IntRange(0, 15), + }, + "B": { + 0: testfixtures.IntRange(0, 15), + }, + }, + }, + { + // Schedule jobs that requires preempting one job in the gang, + // and assert that all jobs in the gang are preempted. + JobsByQueue: map[string][]*jobdb.Job{ + "A": testfixtures.N1Cpu4GiJobsWithPrice("A", 102.0, 17), + }, + ExpectedScheduledIndices: map[string][]int{ + "A": testfixtures.IntRange(0, 16), + }, + ExpectedPreemptedIndices: map[string]map[int][]int{ + "C": { + 1: testfixtures.IntRange(0, 31), + }, + }, + }, + }, + PriorityFactorByQueue: map[string]float64{"A": 1, "B": 1, "C": 1}, + }, + } + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + priorities := types.AllowedPriorities(tc.SchedulingConfig.PriorityClasses) + + jobDb := jobdb.NewJobDb(tc.SchedulingConfig.PriorityClasses, tc.SchedulingConfig.DefaultPriorityClassName, stringinterner.New(1024), testfixtures.TestResourceListFactory) + jobDbTxn := jobDb.WriteTxn() + + // Add all the initial jobs, creating runs for them + for nodeIdx, jobs := range tc.InitialRunningJobs { + node := tc.Nodes[nodeIdx] + for _, job := range jobs { + err := jobDbTxn.Upsert([]*jobdb.Job{ + job.WithQueued(false). + WithNewRun(node.GetExecutor(), node.GetId(), node.GetName(), node.GetPool(), job.PriorityClass().Priority), + }) + require.NoError(t, err) + } + } + + // Accounting across scheduling rounds. + roundByJobId := make(map[string]int) + indexByJobId := make(map[string]int) + allocatedByQueueAndPriorityClass := make(map[string]map[string]internaltypes.ResourceList) + nodeIdByJobId := make(map[string]string) + var jobIdsByGangId map[string]map[string]bool + var gangIdByJobId map[string]string + + // Scheduling rate-limiters persist between rounds. + // We control the rate at which time passes between scheduling rounds. + schedulingStarted := time.Now() + schedulingInterval := time.Second + limiter := rate.NewLimiter( + rate.Limit(tc.SchedulingConfig.MaximumSchedulingRate), + tc.SchedulingConfig.MaximumSchedulingBurst, + ) + limiterByQueue := make(map[string]*rate.Limiter) + for queue := range tc.PriorityFactorByQueue { + limiterByQueue[queue] = rate.NewLimiter( + rate.Limit(tc.SchedulingConfig.MaximumPerQueueSchedulingRate), + tc.SchedulingConfig.MaximumPerQueueSchedulingBurst, + ) + } + + demandByQueue := map[string]internaltypes.ResourceList{} + + // Run the scheduler. + cordonedNodes := map[int]bool{} + ctx := armadacontext.Background() + for i, round := range tc.Rounds { + ctx.FieldLogger = ctx.WithField("round", i) + ctx.Infof("starting scheduling round %d", i) + + jobsByNode := map[string][]*jobdb.Job{} + for _, job := range jobDbTxn.GetAll() { + if job.LatestRun() != nil && !job.LatestRun().InTerminalState() { + node := job.LatestRun().NodeId() + jobsByNode[node] = append(jobsByNode[node], job) + } + } + + nodeDb, err := NewNodeDb(tc.SchedulingConfig, stringinterner.New(1024)) + require.NoError(t, err) + nodeDbTxn := nodeDb.Txn(true) + for _, node := range tc.Nodes { + dbNode, err := testfixtures.TestNodeFactory.FromSchedulerObjectsNode(node) + require.NoError(t, err) + err = nodeDb.CreateAndInsertWithJobDbJobsWithTxn(nodeDbTxn, jobsByNode[node.Name], dbNode) + require.NoError(t, err) + } + nodeDbTxn.Commit() + + // Enqueue jobs that should be considered in this round. + var queuedJobs []*jobdb.Job + for queue, jobs := range round.JobsByQueue { + for j, job := range jobs { + job = job.WithQueued(true) + require.Equal(t, queue, job.Queue()) + queuedJobs = append(queuedJobs, job.WithQueued(true)) + roundByJobId[job.Id()] = i + indexByJobId[job.Id()] = j + demandByQueue[job.Queue()] = demandByQueue[job.Queue()].Add(job.AllResourceRequirements()) + } + } + err = jobDbTxn.Upsert(queuedJobs) + require.NoError(t, err) + + // Unbind jobs from nodes, to simulate those jobs terminating between rounds. + for queue, reqIndicesByRoundIndex := range round.IndicesToUnbind { + for roundIndex, reqIndices := range reqIndicesByRoundIndex { + for _, reqIndex := range reqIndices { + job := tc.Rounds[roundIndex].JobsByQueue[queue][reqIndex] + nodeId := nodeIdByJobId[job.Id()] + node, err := nodeDb.GetNode(nodeId) + require.NoError(t, err) + node, err = nodeDb.UnbindJobFromNode(job, node) + require.NoError(t, err) + err = nodeDb.Upsert(node) + require.NoError(t, err) + if gangId, ok := gangIdByJobId[job.Id()]; ok { + delete(gangIdByJobId, job.Id()) + delete(jobIdsByGangId[gangId], job.Id()) + } + demandByQueue[job.Queue()] = demandByQueue[job.Queue()].Subtract(job.AllResourceRequirements()) + } + } + } + + // Cordon nodes. + for _, idx := range round.NodeIndicesToCordon { + cordonedNodes[idx] = true + } + for idx, isCordoned := range cordonedNodes { + if isCordoned { + node, err := nodeDb.GetNode(tc.Nodes[idx].Id) + require.NoError(t, err) + ctx.Infof("Cordoned node %s", node.GetId()) + taints := append(slices.Clone(node.GetTaints()), internaltypes.UnschedulableTaint()) + node = testNodeWithTaints(node, taints) + err = nodeDb.Upsert(node) + require.NoError(t, err) + } + } + + // If not provided, set total resources equal to the aggregate over tc.Nodes. + totalResources := nodeDb.TotalKubernetesResources() + + fairnessCostProvider, err := fairness.NewDominantResourceFairness( + nodeDb.TotalKubernetesResources(), + tc.SchedulingConfig, + ) + require.NoError(t, err) + sctx := context.NewSchedulingContext( + testfixtures.TestPool, + fairnessCostProvider, + limiter, + totalResources, + ) + sctx.Started = schedulingStarted.Add(time.Duration(i) * schedulingInterval) + + for queue, priorityFactor := range tc.PriorityFactorByQueue { + weight := 1 / priorityFactor + queueDemand := demandByQueue[queue] + err := sctx.AddQueueSchedulingContext( + queue, + weight, + allocatedByQueueAndPriorityClass[queue], + queueDemand, + queueDemand, + limiterByQueue[queue], + ) + require.NoError(t, err) + } + constraints := schedulerconstraints.NewSchedulingConstraints( + "pool", + totalResources, + tc.SchedulingConfig, + armadaslices.Map( + maps.Keys(tc.PriorityFactorByQueue), + func(qn string) *api.Queue { return &api.Queue{Name: qn} }, + )) + sctx.UpdateFairShares() + sch := NewPreemptingQueueScheduler( + sctx, + constraints, + testfixtures.TestEmptyFloatingResources, + true, + tc.SchedulingConfig.ProtectedFractionOfFairShare, + tc.SchedulingConfig.MaxQueueLookback, + jobDbTxn, + nodeDb, + nodeIdByJobId, + jobIdsByGangId, + gangIdByJobId, + true, + ) + + result, err := sch.Schedule(ctx) + require.NoError(t, err) + jobIdsByGangId = sch.jobIdsByGangId + gangIdByJobId = sch.gangIdByJobId + + // Test resource accounting. + for _, jctx := range result.PreemptedJobs { + job := jctx.Job + m := allocatedByQueueAndPriorityClass[job.Queue()] + if m == nil { + m = make(map[string]internaltypes.ResourceList) + allocatedByQueueAndPriorityClass[job.Queue()] = m + } + m[job.PriorityClassName()] = m[job.PriorityClassName()].Subtract(job.AllResourceRequirements()) + } + for _, jctx := range result.ScheduledJobs { + job := jctx.Job + m := allocatedByQueueAndPriorityClass[job.Queue()] + if m == nil { + m = make(map[string]internaltypes.ResourceList) + allocatedByQueueAndPriorityClass[job.Queue()] = m + } + m[job.PriorityClassName()] = m[job.PriorityClassName()].Add(job.AllResourceRequirements()) + } + for queue, qctx := range sctx.QueueSchedulingContexts { + m := allocatedByQueueAndPriorityClass[queue] + assert.Equal(t, internaltypes.RlMapRemoveZeros(m), internaltypes.RlMapRemoveZeros(qctx.AllocatedByPriorityClass)) + } + + // Test that jobs are mapped to nodes correctly. + for _, jctx := range result.PreemptedJobs { + job := jctx.Job + nodeId, ok := result.NodeIdByJobId[job.Id()] + assert.True(t, ok) + assert.NotEmpty(t, nodeId) + + // Check that preempted jobs are preempted from the node they were previously scheduled onto. + expectedNodeId := nodeIdByJobId[job.Id()] + assert.Equal(t, expectedNodeId, nodeId, "job %s preempted from unexpected node", job.Id()) + } + for _, jctx := range result.ScheduledJobs { + job := jctx.Job + nodeId, ok := result.NodeIdByJobId[job.Id()] + assert.True(t, ok) + assert.NotEmpty(t, nodeId) + + node, err := nodeDb.GetNode(nodeId) + require.NoError(t, err) + assert.NotEmpty(t, node) + + // Check that the job can actually go onto this node. + matches, reason, err := nodedb.StaticJobRequirementsMet(node, jctx) + require.NoError(t, err) + assert.Empty(t, reason) + assert.True(t, matches) + + // Check that scheduled jobs are consistently assigned to the same node. + // (We don't allow moving jobs between nodes.) + if expectedNodeId, ok := nodeIdByJobId[job.Id()]; ok { + assert.Equal(t, expectedNodeId, nodeId, "job %s scheduled onto unexpected node", job.Id()) + } else { + nodeIdByJobId[job.Id()] = nodeId + } + } + for jobId, nodeId := range result.NodeIdByJobId { + if expectedNodeId, ok := nodeIdByJobId[jobId]; ok { + assert.Equal(t, expectedNodeId, nodeId, "job %s preempted from/scheduled onto unexpected node", jobId) + } + } + + // Expected scheduled jobs. + jobIdsByQueue := jobIdsByQueueFromJobContexts(result.ScheduledJobs) + scheduledQueues := armadamaps.MapValues(round.ExpectedScheduledIndices, func(v []int) bool { return true }) + maps.Copy(scheduledQueues, armadamaps.MapValues(jobIdsByQueue, func(v []string) bool { return true })) + for queue := range scheduledQueues { + expected := round.ExpectedScheduledIndices[queue] + jobIds := jobIdsByQueue[queue] + actual := make([]int, 0) + for _, jobId := range jobIds { + actual = append(actual, indexByJobId[jobId]) + } + slices.Sort(actual) + slices.Sort(expected) + assert.Equal(t, expected, actual, "scheduling from queue %s", queue) + } + + // Expected preempted jobs. + jobIdsByQueue = jobIdsByQueueFromJobContexts(result.PreemptedJobs) + preemptedQueues := armadamaps.MapValues(round.ExpectedPreemptedIndices, func(v map[int][]int) bool { return true }) + maps.Copy(preemptedQueues, armadamaps.MapValues(jobIdsByQueue, func(v []string) bool { return true })) + for queue := range preemptedQueues { + expected := round.ExpectedPreemptedIndices[queue] + jobIds := jobIdsByQueue[queue] + actual := make(map[int][]int) + for _, jobId := range jobIds { + i := roundByJobId[jobId] + j := indexByJobId[jobId] + actual[i] = append(actual[i], j) + } + for _, s := range expected { + slices.Sort(s) + } + for _, s := range actual { + slices.Sort(s) + } + assert.Equal(t, expected, actual, "preempting from queue %s", queue) + } + + // We expect there to be no oversubscribed nodes. + it, err := nodedb.NewNodesIterator(nodeDb.Txn(false)) + require.NoError(t, err) + for node := it.NextNode(); node != nil; node = it.NextNode() { + for _, p := range priorities { + for _, r := range node.AllocatableByPriority[p].GetResources() { + assert.True(t, r.RawValue >= 0, "resource %s oversubscribed by %d on node %s", r.Name, r.RawValue, node.GetId()) + } + } + } + + err = jobDbTxn.BatchDelete(armadaslices.Map(queuedJobs, func(job *jobdb.Job) string { return job.Id() })) + require.NoError(t, err) + + var preemptedJobs []*jobdb.Job + for _, jctx := range result.PreemptedJobs { + job := jctx.Job + preemptedJobs = append( + preemptedJobs, + job. + WithUpdatedRun(job.LatestRun().WithFailed(true)). + WithQueued(false). + WithFailed(true), + ) + } + err = jobDbTxn.Upsert(preemptedJobs) + require.NoError(t, err) + + // Jobs may arrive out of order here; sort them, so that runs + // are created in the right order (this influences the order in + // which jobs are preempted). + slices.SortFunc( + result.ScheduledJobs, + func(a, b *context.JobSchedulingContext) int { + if a.Job.SubmitTime().Before(b.Job.SubmitTime()) { + return -1 + } else if b.Job.SubmitTime().Before(a.Job.SubmitTime()) { + return 1 + } else { + return 0 + } + }, + ) + var scheduledJobs []*jobdb.Job + for _, jctx := range result.ScheduledJobs { + job := jctx.Job + jobId := job.Id() + node, err := nodeDb.GetNode(result.NodeIdByJobId[jobId]) + require.NotNil(t, node) + require.NoError(t, err) + priority, ok := nodeDb.GetScheduledAtPriority(jobId) + require.True(t, ok) + scheduledJobs = append( + scheduledJobs, + job.WithQueuedVersion(job.QueuedVersion()+1). + WithQueued(false). + WithNewRun(node.GetExecutor(), node.GetId(), node.GetName(), node.GetPool(), priority), + ) + } + err = jobDbTxn.Upsert(scheduledJobs) + require.NoError(t, err) + } + }) + } +} diff --git a/internal/scheduler/scheduling/preempting_queue_scheduler.go b/internal/scheduler/scheduling/preempting_queue_scheduler.go index c3056ffd384..47207a5b8e0 100644 --- a/internal/scheduler/scheduling/preempting_queue_scheduler.go +++ b/internal/scheduler/scheduling/preempting_queue_scheduler.go @@ -41,6 +41,7 @@ type PreemptingQueueScheduler struct { jobIdsByGangId map[string]map[string]bool // Maps job ids of gang jobs to the id of that gang. gangIdByJobId map[string]string + marketDriven bool } func NewPreemptingQueueScheduler( @@ -55,6 +56,7 @@ func NewPreemptingQueueScheduler( initialNodeIdByJobId map[string]string, initialJobIdsByGangId map[string]map[string]bool, initialGangIdByJobId map[string]string, + marketDriven bool, ) *PreemptingQueueScheduler { if initialNodeIdByJobId == nil { initialNodeIdByJobId = make(map[string]string) @@ -81,6 +83,7 @@ func NewPreemptingQueueScheduler( nodeIdByJobId: maps.Clone(initialNodeIdByJobId), jobIdsByGangId: initialJobIdsByGangId, gangIdByJobId: maps.Clone(initialGangIdByJobId), + marketDriven: marketDriven, } } @@ -122,6 +125,10 @@ func (sch *PreemptingQueueScheduler) Schedule(ctx *armadacontext.Context) (*Sche ctx.Errorf("can't evict job %s: nodeSelector not initialised", job.Id()) return false } + // If we are in market mode then everything is evictable + if sch.marketDriven { + return true + } if qctx, ok := sch.schedulingContext.QueueSchedulingContexts[job.Queue()]; ok { actualShare := sch.schedulingContext.FairnessCostProvider.UnweightedCostFromQueue(qctx) fairShare := math.Max(qctx.AdjustedFairShare, qctx.FairShare) @@ -257,7 +264,7 @@ func (sch *PreemptingQueueScheduler) Schedule(ctx *armadacontext.Context) (*Sche func (sch *PreemptingQueueScheduler) evict(ctx *armadacontext.Context, evictor *Evictor) (*EvictorResult, *InMemoryJobRepository, error) { if evictor == nil { - return &EvictorResult{}, NewInMemoryJobRepository(sch.schedulingContext.Pool), nil + return &EvictorResult{}, NewInMemoryJobRepository(sch.schedulingContext.Pool, jobdb.MarketSchedulingOrderCompare), nil } txn := sch.nodeDb.Txn(true) defer txn.Abort() @@ -301,7 +308,12 @@ func (sch *PreemptingQueueScheduler) evict(ctx *armadacontext.Context, evictor * if err := sch.evictionAssertions(result); err != nil { return nil, nil, err } - inMemoryJobRepo := NewInMemoryJobRepository(sch.schedulingContext.Pool) + + schedulingOrder := jobdb.SchedulingOrderCompare + if sch.marketDriven { + schedulingOrder = jobdb.MarketSchedulingOrderCompare + } + inMemoryJobRepo := NewInMemoryJobRepository(sch.schedulingContext.Pool, schedulingOrder) inMemoryJobRepo.EnqueueMany(evictedJctxs) txn.Commit() @@ -481,6 +493,7 @@ func (q MinimalQueue) GetWeight() float64 { // addEvictedJobsToNodeDb adds evicted jobs to the NodeDb. // Needed to enable the nodeDb accounting for these when preempting. func (sch *PreemptingQueueScheduler) addEvictedJobsToNodeDb(_ *armadacontext.Context, inMemoryJobRepo *InMemoryJobRepository) error { + sctx := sch.schedulingContext gangItByQueue := make(map[string]*QueuedGangIterator) for _, qctx := range sch.schedulingContext.QueueSchedulingContexts { gangItByQueue[qctx.Queue] = NewQueuedGangIterator( @@ -491,9 +504,18 @@ func (sch *PreemptingQueueScheduler) addEvictedJobsToNodeDb(_ *armadacontext.Con ) } qr := NewMinimalQueueRepositoryFromSchedulingContext(sch.schedulingContext) - candidateGangIterator, err := NewCandidateGangIterator(sch.schedulingContext.Pool, qr, sch.schedulingContext.FairnessCostProvider, gangItByQueue, false, sch.preferLargeJobOrdering) - if err != nil { - return err + var candidateGangIterator CandidateGangIterator + var err error + if sch.marketDriven { + candidateGangIterator, err = NewMarketCandidateGangIterator(sctx.Pool, sctx, gangItByQueue) + if err != nil { + return err + } + } else { + candidateGangIterator, err = NewCostBasedCandidateGangIterator(sctx.Pool, sctx, sctx.FairnessCostProvider, gangItByQueue, false, sch.preferLargeJobOrdering) + if err != nil { + return err + } } txn := sch.nodeDb.Txn(true) defer txn.Abort() @@ -528,14 +550,22 @@ func (sch *PreemptingQueueScheduler) schedule( skipUnsuccessfulSchedulingKeyCheck bool, considerPriorityCLassPriority bool, ) (*SchedulerResult, error) { + sortOrder := jobdb.FairShareOrder + if sch.marketDriven { + sortOrder = jobdb.PriceOrder + } jobIteratorByQueue := make(map[string]JobContextIterator) for _, qctx := range sch.schedulingContext.QueueSchedulingContexts { evictedIt := inMemoryJobRepo.GetJobIterator(qctx.Queue) if jobRepo == nil || reflect.ValueOf(jobRepo).IsNil() { jobIteratorByQueue[qctx.Queue] = evictedIt } else { - queueIt := NewQueuedJobsIterator(ctx, qctx.Queue, sch.schedulingContext.Pool, jobRepo) - jobIteratorByQueue[qctx.Queue] = NewMultiJobsIterator(evictedIt, queueIt) + queueIt := NewQueuedJobsIterator(ctx, qctx.Queue, sch.schedulingContext.Pool, jobRepo, sortOrder) + if sch.marketDriven { + jobIteratorByQueue[qctx.Queue] = NewMarketDrivenMultiJobsIterator(evictedIt, queueIt) + } else { + jobIteratorByQueue[qctx.Queue] = NewMultiJobsIterator(evictedIt, queueIt) + } } } @@ -552,6 +582,7 @@ func (sch *PreemptingQueueScheduler) schedule( considerPriorityCLassPriority, sch.preferLargeJobOrdering, sch.maxQueueLookBack, + sch.marketDriven, ) if err != nil { return nil, err diff --git a/internal/scheduler/scheduling/preempting_queue_scheduler_test.go b/internal/scheduler/scheduling/preempting_queue_scheduler_test.go index 86e1a688951..76e6dc492fb 100644 --- a/internal/scheduler/scheduling/preempting_queue_scheduler_test.go +++ b/internal/scheduler/scheduling/preempting_queue_scheduler_test.go @@ -2071,6 +2071,7 @@ func TestPreemptingQueueScheduler(t *testing.T) { nodeIdByJobId, jobIdsByGangId, gangIdByJobId, + false, ) result, err := sch.Schedule(ctx) @@ -2424,6 +2425,7 @@ func BenchmarkPreemptingQueueScheduler(b *testing.B) { nil, nil, nil, + false, ) result, err := sch.Schedule(ctx) require.NoError(b, err) @@ -2487,6 +2489,7 @@ func BenchmarkPreemptingQueueScheduler(b *testing.B) { nil, nil, nil, + false, ) result, err := sch.Schedule(ctx) require.NoError(b, err) diff --git a/internal/scheduler/scheduling/preemption_description_test.go b/internal/scheduler/scheduling/preemption_description_test.go index 952f3d75441..22482677e78 100644 --- a/internal/scheduler/scheduling/preemption_description_test.go +++ b/internal/scheduler/scheduling/preemption_description_test.go @@ -136,7 +136,7 @@ func makeJob(t *testing.T, jobId string, isGang bool) *jobdb.Job { }, } - job, err := testfixtures.JobDb.NewJob(jobId, "jobset", "queue", 1, schedulingInfo, false, 1, false, false, false, 0, true, []string{}) + job, err := testfixtures.JobDb.NewJob(jobId, "jobset", "queue", 1, 0.0, schedulingInfo, false, 1, false, false, false, 0, true, []string{}) require.NoError(t, err) return job } diff --git a/internal/scheduler/scheduling/queue_scheduler.go b/internal/scheduler/scheduling/queue_scheduler.go index ebdbf6071be..27a716414d3 100644 --- a/internal/scheduler/scheduling/queue_scheduler.go +++ b/internal/scheduler/scheduling/queue_scheduler.go @@ -20,11 +20,19 @@ import ( "github.com/armadaproject/armada/internal/scheduler/scheduling/fairness" ) +type CandidateGangIterator interface { + Peek() (*schedulercontext.GangSchedulingContext, float64, error) + Clear() error + GetAllocationForQueue(queue string) (internaltypes.ResourceList, bool) + OnlyYieldEvicted() + OnlyYieldEvictedForQueue(queue string) +} + // QueueScheduler is responsible for choosing the order in which to attempt scheduling queued gangs. // Relies on GangScheduler for scheduling once a gang is chosen. type QueueScheduler struct { schedulingContext *schedulercontext.SchedulingContext - candidateGangIterator *CandidateGangIterator + candidateGangIterator CandidateGangIterator gangScheduler *GangScheduler } @@ -38,6 +46,7 @@ func NewQueueScheduler( considerPriorityClassPriority bool, prioritiseLargerJobs bool, maxQueueLookBack uint, + marketDriven bool, ) (*QueueScheduler, error) { for queue := range jobIteratorByQueue { if _, ok := sctx.QueueSchedulingContexts[queue]; !ok { @@ -52,10 +61,19 @@ func NewQueueScheduler( for queue, it := range jobIteratorByQueue { gangIteratorsByQueue[queue] = NewQueuedGangIterator(sctx, it, maxQueueLookBack, true) } - candidateGangIterator, err := NewCandidateGangIterator(sctx.Pool, sctx, sctx.FairnessCostProvider, gangIteratorsByQueue, considerPriorityClassPriority, prioritiseLargerJobs) - if err != nil { - return nil, err + var candidateGangIterator CandidateGangIterator + if marketDriven { + candidateGangIterator, err = NewMarketCandidateGangIterator(sctx.Pool, sctx, gangIteratorsByQueue) + if err != nil { + return nil, err + } + } else { + candidateGangIterator, err = NewCostBasedCandidateGangIterator(sctx.Pool, sctx, sctx.FairnessCostProvider, gangIteratorsByQueue, considerPriorityClassPriority, prioritiseLargerJobs) + if err != nil { + return nil, err + } } + return &QueueScheduler{ schedulingContext: sctx, candidateGangIterator: candidateGangIterator, @@ -96,7 +114,6 @@ func (sch *QueueScheduler) Schedule(ctx *armadacontext.Context) (*SchedulerResul return nil, err default: } - start := time.Now() scheduledOk, unschedulableReason, err := sch.gangScheduler.Schedule(ctx, gctx) if err != nil { @@ -142,10 +159,10 @@ func (sch *QueueScheduler) Schedule(ctx *armadacontext.Context) (*SchedulerResul stats.LastGangScheduledSampleJobId = gctx.JobIds()[0] stats.LastGangScheduledQueueCost = queueCostInclGang stats.LastGangScheduledQueuePosition = loopNumber - queue, queueOK := sch.candidateGangIterator.queueRepository.GetQueue(gctx.Queue) - if queueOK { + allocation, ok := sch.candidateGangIterator.GetAllocationForQueue(gctx.Queue) + if ok { stats.LastGangScheduledResources = gctx.TotalResourceRequests - stats.LastGangScheduledQueueResources = queue.GetAllocation() + stats.LastGangScheduledQueueResources = allocation } else { stats.LastGangScheduledResources = internaltypes.ResourceList{} stats.LastGangScheduledQueueResources = internaltypes.ResourceList{} @@ -321,10 +338,10 @@ func (it *QueuedGangIterator) hitLookbackLimit() bool { return it.jobsSeen > it.maxLookback } -// CandidateGangIterator determines which gang to try scheduling next across queues. +// CostBasedCandidateGangIterator determines which gang to try scheduling next across queues. // Specifically, it yields the next gang in the queue with smallest fraction of its fair share, // where the fraction of fair share computation includes the yielded gang. -type CandidateGangIterator struct { +type CostBasedCandidateGangIterator struct { pool string queueRepository fairness.QueueRepository fairnessCostProvider fairness.FairnessCostProvider @@ -338,15 +355,23 @@ type CandidateGangIterator struct { pq QueueCandidateGangIteratorPQ } -func NewCandidateGangIterator( +func (it *CostBasedCandidateGangIterator) GetAllocationForQueue(queue string) (internaltypes.ResourceList, bool) { + q, ok := it.queueRepository.GetQueue(queue) + if !ok { + return internaltypes.ResourceList{}, false + } + return q.GetAllocation(), true +} + +func NewCostBasedCandidateGangIterator( pool string, queueRepository fairness.QueueRepository, fairnessCostProvider fairness.FairnessCostProvider, iteratorsByQueue map[string]*QueuedGangIterator, considerPriority bool, prioritiseLargerJobs bool, -) (*CandidateGangIterator, error) { - it := &CandidateGangIterator{ +) (*CostBasedCandidateGangIterator, error) { + it := &CostBasedCandidateGangIterator{ pool: pool, queueRepository: queueRepository, fairnessCostProvider: fairnessCostProvider, @@ -366,17 +391,17 @@ func NewCandidateGangIterator( return it, nil } -func (it *CandidateGangIterator) OnlyYieldEvicted() { +func (it *CostBasedCandidateGangIterator) OnlyYieldEvicted() { it.onlyYieldEvicted = true } -func (it *CandidateGangIterator) OnlyYieldEvictedForQueue(queue string) { +func (it *CostBasedCandidateGangIterator) OnlyYieldEvictedForQueue(queue string) { it.onlyYieldEvictedByQueue[queue] = true } // Clear removes the first item in the iterator. // If it.onlyYieldEvicted is true, any consecutive non-evicted jobs are also removed. -func (it *CandidateGangIterator) Clear() error { +func (it *CostBasedCandidateGangIterator) Clear() error { if it.pq.Len() == 0 { return nil } @@ -404,7 +429,7 @@ func (it *CandidateGangIterator) Clear() error { return nil } -func (it *CandidateGangIterator) Peek() (*schedulercontext.GangSchedulingContext, float64, error) { +func (it *CostBasedCandidateGangIterator) Peek() (*schedulercontext.GangSchedulingContext, float64, error) { if it.pq.Len() == 0 { // No queued jobs left. return nil, 0.0, nil @@ -413,7 +438,7 @@ func (it *CandidateGangIterator) Peek() (*schedulercontext.GangSchedulingContext return first.gctx, first.proposedQueueCost, nil } -func (it *CandidateGangIterator) newPQItem(queue string, queueFairShare float64, queueIt *QueuedGangIterator) *QueueCandidateGangIteratorItem { +func (it *CostBasedCandidateGangIterator) newPQItem(queue string, queueFairShare float64, queueIt *QueuedGangIterator) *QueueCandidateGangIteratorItem { return &QueueCandidateGangIteratorItem{ queue: queue, fairShare: queueFairShare, @@ -421,7 +446,7 @@ func (it *CandidateGangIterator) newPQItem(queue string, queueFairShare float64, } } -func (it *CandidateGangIterator) updateAndPushPQItem(item *QueueCandidateGangIteratorItem) (bool, error) { +func (it *CostBasedCandidateGangIterator) updateAndPushPQItem(item *QueueCandidateGangIteratorItem) (bool, error) { if err := it.updatePQItem(item); err != nil { return false, err } @@ -438,7 +463,7 @@ func (it *CandidateGangIterator) updateAndPushPQItem(item *QueueCandidateGangIte return true, nil } -func (it *CandidateGangIterator) updatePQItem(item *QueueCandidateGangIteratorItem) error { +func (it *CostBasedCandidateGangIterator) updatePQItem(item *QueueCandidateGangIteratorItem) error { item.gctx = nil item.proposedQueueCost = 0 item.currentQueueCost = 0 @@ -485,7 +510,7 @@ func (it *CandidateGangIterator) updatePQItem(item *QueueCandidateGangIteratorIt } // returns the queue of the supplied gctx -func (it *CandidateGangIterator) getQueue(gctx *schedulercontext.GangSchedulingContext) (fairness.Queue, error) { +func (it *CostBasedCandidateGangIterator) getQueue(gctx *schedulercontext.GangSchedulingContext) (fairness.Queue, error) { gangQueue := gctx.Queue if len(gctx.JobSchedulingContexts) > 0 && !gctx.JobSchedulingContexts[0].IsHomeJob(it.pool) { gangQueue = schedulercontext.CalculateAwayQueueName(gctx.Queue) diff --git a/internal/scheduler/scheduling/queue_scheduler_test.go b/internal/scheduler/scheduling/queue_scheduler_test.go index 2f022931b08..fdb6f47cc2c 100644 --- a/internal/scheduler/scheduling/queue_scheduler_test.go +++ b/internal/scheduler/scheduling/queue_scheduler_test.go @@ -503,7 +503,7 @@ func TestQueueScheduler(t *testing.T) { } indexByJobId[job.Id()] = i } - jobRepo := NewInMemoryJobRepository(testfixtures.TestPool) + jobRepo := NewInMemoryJobRepository(testfixtures.TestPool, jobdb.SchedulingOrderCompare) jobRepo.EnqueueMany( context.JobSchedulingContextsFromJobs(tc.Jobs), ) @@ -546,7 +546,7 @@ func TestQueueScheduler(t *testing.T) { it := jobRepo.GetJobIterator(q.Name) jobIteratorByQueue[q.Name] = it } - sch, err := NewQueueScheduler(sctx, constraints, testfixtures.TestEmptyFloatingResources, nodeDb, jobIteratorByQueue, false, false, true, tc.SchedulingConfig.MaxQueueLookback) + sch, err := NewQueueScheduler(sctx, constraints, testfixtures.TestEmptyFloatingResources, nodeDb, jobIteratorByQueue, false, false, true, tc.SchedulingConfig.MaxQueueLookback, false) require.NoError(t, err) result, err := sch.Schedule(armadacontext.Background()) diff --git a/internal/scheduler/scheduling/scheduling_algo.go b/internal/scheduler/scheduling/scheduling_algo.go index 6840a87782a..c14121fadf9 100644 --- a/internal/scheduler/scheduling/scheduling_algo.go +++ b/internal/scheduler/scheduling/scheduling_algo.go @@ -2,6 +2,7 @@ package scheduling import ( "context" + "math" "time" "github.com/pkg/errors" @@ -127,7 +128,7 @@ func (l *FairSchedulingAlgo) Schedule( } start := time.Now() - schedulerResult, sctx, err := l.SchedulePool(ctx, fsctx, pool.Name) + schedulerResult, sctx, err := l.SchedulePool(ctx, fsctx, pool.Name, pool.MarketDriven) ctx.Infof("Scheduled on executor pool %s in %v with error %v", pool, time.Now().Sub(start), err) @@ -510,6 +511,7 @@ func (l *FairSchedulingAlgo) SchedulePool( ctx *armadacontext.Context, fsctx *FairSchedulingAlgoContext, pool string, + marketDriven bool, ) (*SchedulerResult, *schedulercontext.SchedulingContext, error) { totalResources := fsctx.nodeDb.TotalKubernetesResources() totalResources = totalResources.Add(l.floatingResourceTypes.GetTotalAvailableForPool(pool)) @@ -529,6 +531,7 @@ func (l *FairSchedulingAlgo) SchedulePool( fsctx.nodeIdByJobId, fsctx.jobIdsByGangId, fsctx.gangIdByJobId, + marketDriven, ) ctx.Infof("Scheduling on pool %s with capacity %s protectedFractionOfFairShare %f", @@ -571,6 +574,13 @@ func (l *FairSchedulingAlgo) SchedulePool( WithQueued(false). WithNewRun(node.GetExecutor(), node.GetId(), node.GetName(), pool, priority) } + + if marketDriven { + fractionAllocated := fsctx.schedulingContext.FairnessCostProvider.UnweightedCostFromAllocation(fsctx.schedulingContext.Allocated) + price := l.calculateSpotPrice(maps.Keys(fsctx.nodeIdByJobId), result.ScheduledJobs, result.PreemptedJobs, fractionAllocated, fsctx.Txn) + fsctx.schedulingContext.SpotPrice = price + } + return result, fsctx.schedulingContext, nil } @@ -699,3 +709,39 @@ func (l *FairSchedulingAlgo) filterLaggingExecutors( } return activeExecutors } + +func (l *FairSchedulingAlgo) calculateSpotPrice(initialRunningJobIds []string, scheduledJobs, preemptedJobs []*schedulercontext.JobSchedulingContext, fractionAllocated float64, txn *jobdb.Txn) float64 { + // If we've allocated less that 95% of available resources then we don't charge. + // TODO: make this configurable + if fractionAllocated < 0.95 { + return 0.0 + } + + allRunningJobIds := make(map[string]bool, len(initialRunningJobIds)) + for _, jobId := range initialRunningJobIds { + allRunningJobIds[jobId] = true + } + + for _, scheduledJob := range scheduledJobs { + allRunningJobIds[scheduledJob.JobId] = true + } + + for _, preemptedJob := range preemptedJobs { + delete(allRunningJobIds, preemptedJob.JobId) + } + + // Find the minimum bid price among running jobs + minPrice := math.MaxFloat64 + for jobId := range allRunningJobIds { + job := txn.GetById(jobId) + if job != nil && job.BidPrice() < minPrice { + minPrice = job.BidPrice() + } + } + + // Return the lowest bid price, or 0 if no valid price was found + if minPrice == math.MaxFloat64 { + return 0.0 + } + return minPrice +} diff --git a/internal/scheduler/simulator/simulator.go b/internal/scheduler/simulator/simulator.go index e7edd7605eb..a2059fa317b 100644 --- a/internal/scheduler/simulator/simulator.go +++ b/internal/scheduler/simulator/simulator.go @@ -604,6 +604,7 @@ func (s *Simulator) handleScheduleEvent(ctx *armadacontext.Context) error { maps.Clone(s.accounting.nodeIdByJobId), maps.Clone(s.accounting.jobIdsByGangId), maps.Clone(s.accounting.gangIdByJobId), + false, ) schedulerCtx := ctx @@ -793,6 +794,7 @@ func (s *Simulator) handleSubmitJob(txn *jobdb.Txn, e *armadaevents.SubmitJob, t eventSequence.JobSetName, eventSequence.Queue, e.Priority, + 0.0, schedulingInfo, true, 0, diff --git a/internal/scheduler/testfixtures/testfixtures.go b/internal/scheduler/testfixtures/testfixtures.go index 19d625dc179..4a44cf346a0 100644 --- a/internal/scheduler/testfixtures/testfixtures.go +++ b/internal/scheduler/testfixtures/testfixtures.go @@ -144,6 +144,7 @@ func NewJob( jobSet string, queue string, priority uint32, + price float64, schedulingInfo *schedulerobjects.JobSchedulingInfo, queued bool, queuedVersion int32, @@ -157,6 +158,7 @@ func NewJob( jobSet, queue, priority, + price, schedulingInfo, queued, queuedVersion, @@ -418,6 +420,16 @@ func WithAnnotationsJobs(annotations map[string]string, jobs []*jobdb.Job) []*jo return jobs } +func N1Cpu4GiJobsWithPrice(queue string, bidPrice float64, n int) []*jobdb.Job { + rv := make([]*jobdb.Job, n) + for i := 0; i < n; i++ { + j := Test1Cpu4GiJob(queue, PriorityClass0) + j = j.WithBidPrice(bidPrice) + rv[i] = j + } + return rv +} + func N1Cpu4GiJobs(queue string, priorityClassName string, n int) []*jobdb.Job { rv := make([]*jobdb.Job, n) for i := 0; i < n; i++ { @@ -475,6 +487,7 @@ func TestJob(queue string, jobId ulid.ULID, priorityClassName string, req *sched queue, // This is the per-queue priority of this job, which is unrelated to `priorityClassName`. 1000, + 0.0, &schedulerobjects.JobSchedulingInfo{ PriorityClassName: priorityClassName, SubmitTime: submitTime, @@ -869,6 +882,7 @@ func TestQueuedJobDbJob() *jobdb.Job { TestJobset, TestQueue, 0, + 0.0, &schedulerobjects.JobSchedulingInfo{ PriorityClassName: TestDefaultPriorityClass, SubmitTime: BaseTime, diff --git a/internal/scheduleringester/instructions.go b/internal/scheduleringester/instructions.go index 86c2c3bea6d..2c971a95df8 100644 --- a/internal/scheduleringester/instructions.go +++ b/internal/scheduleringester/instructions.go @@ -164,6 +164,11 @@ func (c *JobSetEventsInstructionConverter) handleSubmitJob(job *armadaevents.Sub return nil, err } + bidPrice := 0.0 + if job.ExperimentalPriceInfo != nil { + bidPrice = job.ExperimentalPriceInfo.BidPrice + } + return []DbOperation{InsertJobs{jobId: &schedulerdb.Job{ JobID: jobId, JobSet: meta.jobset, @@ -174,6 +179,7 @@ func (c *JobSetEventsInstructionConverter) handleSubmitJob(job *armadaevents.Sub QueuedVersion: 0, Submitted: submitTime.UnixNano(), Priority: int64(job.Priority), + BidPrice: bidPrice, SubmitMessage: compressedSubmitJobBytes, SchedulingInfo: schedulingInfoBytes, SchedulingInfoVersion: int32(schedulingInfo.Version), diff --git a/internal/server/submit/conversion/conversions.go b/internal/server/submit/conversion/conversions.go index 7e6fe3b3277..8368fb3c65c 100644 --- a/internal/server/submit/conversion/conversions.go +++ b/internal/server/submit/conversion/conversions.go @@ -47,6 +47,13 @@ func SubmitJobFromApiRequest( Objects: ingressesAndServices, Scheduler: jobReq.Scheduler, } + + if jobReq.ExperimentalPriceInfo != nil { + msg.ExperimentalPriceInfo = &armadaevents.ExperimentalPriceInfo{ + BidPrice: jobReq.ExperimentalPriceInfo.BidPrice, + } + } + postProcess(msg, config) return msg } diff --git a/internal/server/submit/validation/submit_request.go b/internal/server/submit/validation/submit_request.go index 7ae87082e84..b9cb762814b 100644 --- a/internal/server/submit/validation/submit_request.go +++ b/internal/server/submit/validation/submit_request.go @@ -37,6 +37,7 @@ var ( validatePorts, validateClientId, validateTolerations, + validatePrice, } ) @@ -226,6 +227,17 @@ func validatePriorityClasses(j *api.JobSubmitRequestItem, config configuration.S return nil } +// Ensures that if a request specifies a BidPrice, that the price is non-negative +func validatePrice(j *api.JobSubmitRequestItem, _ configuration.SubmissionConfig) error { + if j.ExperimentalPriceInfo == nil { + return nil + } + if j.ExperimentalPriceInfo.BidPrice < 0 { + return fmt.Errorf("price %.2f is invalid Prices must be greater than zero", j.ExperimentalPriceInfo.BidPrice) + } + return nil +} + // Ensures that the JobSubmitRequestItem's limits and requests are equal. // Also checks that any resources defined are above minimum values set in config func validateResources(j *api.JobSubmitRequestItem, config configuration.SubmissionConfig) error { diff --git a/internal/server/submit/validation/submit_request_test.go b/internal/server/submit/validation/submit_request_test.go index 21f3dfa1bcb..8840e1cc275 100644 --- a/internal/server/submit/validation/submit_request_test.go +++ b/internal/server/submit/validation/submit_request_test.go @@ -836,6 +836,50 @@ func TestValidateQueue(t *testing.T) { } } +func TestValidatePrice(t *testing.T) { + tests := map[string]struct { + item *api.JobSubmitRequestItem + expectSuccess bool + }{ + "nil ExperimentalPriceInfo is ok": { + item: &api.JobSubmitRequestItem{}, + expectSuccess: true, + }, + "zero price is ok": { + item: &api.JobSubmitRequestItem{ + ExperimentalPriceInfo: &api.ExperimentalPriceInfo{}, + }, + expectSuccess: true, + }, + "positive price is ok": { + item: &api.JobSubmitRequestItem{ + ExperimentalPriceInfo: &api.ExperimentalPriceInfo{ + BidPrice: 1.0, + }, + }, + expectSuccess: true, + }, + "negative price is rejected": { + item: &api.JobSubmitRequestItem{ + ExperimentalPriceInfo: &api.ExperimentalPriceInfo{ + BidPrice: -1.0, + }, + }, + expectSuccess: false, + }, + } + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + err := validatePrice(tc.item, configuration.SubmissionConfig{}) + if tc.expectSuccess { + assert.NoError(t, err) + } else { + assert.Error(t, err) + } + }) + } +} + func TestValidateResources(t *testing.T) { oneCpu := v1.ResourceList{ v1.ResourceCPU: resource.MustParse("1"), diff --git a/pkg/api/api.swagger.go b/pkg/api/api.swagger.go index 239c355c8fa..43ad4e68d03 100644 --- a/pkg/api/api.swagger.go +++ b/pkg/api/api.swagger.go @@ -788,6 +788,15 @@ func SwaggerJsonTemplate() string { " }\n" + " }\n" + " },\n" + + " \"apiExperimentalPriceInfo\": {\n" + + " \"type\": \"object\",\n" + + " \"properties\": {\n" + + " \"bidPrice\": {\n" + + " \"type\": \"number\",\n" + + " \"format\": \"double\"\n" + + " }\n" + + " }\n" + + " },\n" + " \"apiIngressConfig\": {\n" + " \"type\": \"object\",\n" + " \"properties\": {\n" + @@ -846,6 +855,9 @@ func SwaggerJsonTemplate() string { " \"type\": \"string\",\n" + " \"format\": \"date-time\"\n" + " },\n" + + " \"experimentalPriceInfo\": {\n" + + " \"$ref\": \"#/definitions/apiExperimentalPriceInfo\"\n" + + " },\n" + " \"id\": {\n" + " \"type\": \"string\"\n" + " },\n" + @@ -905,11 +917,6 @@ func SwaggerJsonTemplate() string { " \"type\": \"string\"\n" + " }\n" + " },\n" + - " \"queueTtlSeconds\": {\n" + - " \"description\": \"Queuing TTL for this job in seconds. If this job queues for more than this duration it will be cancelled. Zero indicates an infinite lifetime.\",\n" + - " \"type\": \"string\",\n" + - " \"format\": \"int64\"\n" + - " },\n" + " \"requiredNodeLabels\": {\n" + " \"type\": \"object\",\n" + " \"additionalProperties\": {\n" + @@ -1734,6 +1741,9 @@ func SwaggerJsonTemplate() string { " \"clientId\": {\n" + " \"type\": \"string\"\n" + " },\n" + + " \"experimentalPriceInfo\": {\n" + + " \"$ref\": \"#/definitions/apiExperimentalPriceInfo\"\n" + + " },\n" + " \"ingress\": {\n" + " \"type\": \"array\",\n" + " \"items\": {\n" + diff --git a/pkg/api/api.swagger.json b/pkg/api/api.swagger.json index f56ab357ce5..a4fc33ff558 100644 --- a/pkg/api/api.swagger.json +++ b/pkg/api/api.swagger.json @@ -777,6 +777,15 @@ } } }, + "apiExperimentalPriceInfo": { + "type": "object", + "properties": { + "bidPrice": { + "type": "number", + "format": "double" + } + } + }, "apiIngressConfig": { "type": "object", "properties": { @@ -835,6 +844,9 @@ "type": "string", "format": "date-time" }, + "experimentalPriceInfo": { + "$ref": "#/definitions/apiExperimentalPriceInfo" + }, "id": { "type": "string" }, @@ -894,11 +906,6 @@ "type": "string" } }, - "queueTtlSeconds": { - "description": "Queuing TTL for this job in seconds. If this job queues for more than this duration it will be cancelled. Zero indicates an infinite lifetime.", - "type": "string", - "format": "int64" - }, "requiredNodeLabels": { "type": "object", "additionalProperties": { @@ -1723,6 +1730,9 @@ "clientId": { "type": "string" }, + "experimentalPriceInfo": { + "$ref": "#/definitions/apiExperimentalPriceInfo" + }, "ingress": { "type": "array", "items": { diff --git a/pkg/api/submit.pb.go b/pkg/api/submit.pb.go index 7bdb6b5b2c1..bb1e7e82503 100644 --- a/pkg/api/submit.pb.go +++ b/pkg/api/submit.pb.go @@ -146,7 +146,8 @@ type JobSubmitRequestItem struct { Services []*ServiceConfig `protobuf:"bytes,10,rep,name=services,proto3" json:"services,omitempty"` // Indicates which scheduler should manage this job. // If empty, the default scheduler is used. - Scheduler string `protobuf:"bytes,11,opt,name=scheduler,proto3" json:"scheduler,omitempty"` + Scheduler string `protobuf:"bytes,11,opt,name=scheduler,proto3" json:"scheduler,omitempty"` + ExperimentalPriceInfo *ExperimentalPriceInfo `protobuf:"bytes,13,opt,name=experimentalPriceInfo,proto3" json:"experimentalPriceInfo,omitempty"` } func (m *JobSubmitRequestItem) Reset() { *m = JobSubmitRequestItem{} } @@ -261,6 +262,13 @@ func (m *JobSubmitRequestItem) GetScheduler() string { return "" } +func (m *JobSubmitRequestItem) GetExperimentalPriceInfo() *ExperimentalPriceInfo { + if m != nil { + return m.ExperimentalPriceInfo + } + return nil +} + type IngressConfig struct { Type IngressType `protobuf:"varint,1,opt,name=type,proto3,enum=api.IngressType" json:"type,omitempty"` // Deprecated: Do not use. Ports []uint32 `protobuf:"varint,2,rep,packed,name=ports,proto3" json:"ports,omitempty"` @@ -758,9 +766,8 @@ type Job struct { K8SService []*v1.Service `protobuf:"bytes,18,rep,name=k8s_service,json=k8sService,proto3" json:"k8sService,omitempty"` // Indicates which scheduler should manage this job. // If empty, the default scheduler is used. - Scheduler string `protobuf:"bytes,20,opt,name=scheduler,proto3" json:"scheduler,omitempty"` - // Queuing TTL for this job in seconds. If this job queues for more than this duration it will be cancelled. Zero indicates an infinite lifetime. - QueueTtlSeconds int64 `protobuf:"varint,22,opt,name=queue_ttl_seconds,json=queueTtlSeconds,proto3" json:"queueTtlSeconds,omitempty"` + Scheduler string `protobuf:"bytes,20,opt,name=scheduler,proto3" json:"scheduler,omitempty"` + ExperimentalPriceInfo *ExperimentalPriceInfo `protobuf:"bytes,30,opt,name=experimentalPriceInfo,proto3" json:"experimentalPriceInfo,omitempty"` } func (m *Job) Reset() { *m = Job{} } @@ -945,9 +952,53 @@ func (m *Job) GetScheduler() string { return "" } -func (m *Job) GetQueueTtlSeconds() int64 { +func (m *Job) GetExperimentalPriceInfo() *ExperimentalPriceInfo { + if m != nil { + return m.ExperimentalPriceInfo + } + return nil +} + +type ExperimentalPriceInfo struct { + BidPrice float64 `protobuf:"fixed64,1,opt,name=bidPrice,proto3" json:"bidPrice,omitempty"` +} + +func (m *ExperimentalPriceInfo) Reset() { *m = ExperimentalPriceInfo{} } +func (m *ExperimentalPriceInfo) String() string { return proto.CompactTextString(m) } +func (*ExperimentalPriceInfo) ProtoMessage() {} +func (*ExperimentalPriceInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_e998bacb27df16c1, []int{9} +} +func (m *ExperimentalPriceInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExperimentalPriceInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExperimentalPriceInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExperimentalPriceInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExperimentalPriceInfo.Merge(m, src) +} +func (m *ExperimentalPriceInfo) XXX_Size() int { + return m.Size() +} +func (m *ExperimentalPriceInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ExperimentalPriceInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ExperimentalPriceInfo proto.InternalMessageInfo + +func (m *ExperimentalPriceInfo) GetBidPrice() float64 { if m != nil { - return m.QueueTtlSeconds + return m.BidPrice } return 0 } @@ -964,7 +1015,7 @@ func (m *JobReprioritizeRequest) Reset() { *m = JobReprioritizeRequest{} func (m *JobReprioritizeRequest) String() string { return proto.CompactTextString(m) } func (*JobReprioritizeRequest) ProtoMessage() {} func (*JobReprioritizeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_e998bacb27df16c1, []int{9} + return fileDescriptor_e998bacb27df16c1, []int{10} } func (m *JobReprioritizeRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1030,7 +1081,7 @@ func (m *JobReprioritizeResponse) Reset() { *m = JobReprioritizeResponse func (m *JobReprioritizeResponse) String() string { return proto.CompactTextString(m) } func (*JobReprioritizeResponse) ProtoMessage() {} func (*JobReprioritizeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_e998bacb27df16c1, []int{10} + return fileDescriptor_e998bacb27df16c1, []int{11} } func (m *JobReprioritizeResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1075,7 +1126,7 @@ func (m *JobSubmitResponseItem) Reset() { *m = JobSubmitResponseItem{} } func (m *JobSubmitResponseItem) String() string { return proto.CompactTextString(m) } func (*JobSubmitResponseItem) ProtoMessage() {} func (*JobSubmitResponseItem) Descriptor() ([]byte, []int) { - return fileDescriptor_e998bacb27df16c1, []int{11} + return fileDescriptor_e998bacb27df16c1, []int{12} } func (m *JobSubmitResponseItem) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1127,7 +1178,7 @@ func (m *JobSubmitResponse) Reset() { *m = JobSubmitResponse{} } func (m *JobSubmitResponse) String() string { return proto.CompactTextString(m) } func (*JobSubmitResponse) ProtoMessage() {} func (*JobSubmitResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_e998bacb27df16c1, []int{12} + return fileDescriptor_e998bacb27df16c1, []int{13} } func (m *JobSubmitResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1186,7 +1237,7 @@ func (m *Queue) Reset() { *m = Queue{} } func (m *Queue) String() string { return proto.CompactTextString(m) } func (*Queue) ProtoMessage() {} func (*Queue) Descriptor() ([]byte, []int) { - return fileDescriptor_e998bacb27df16c1, []int{13} + return fileDescriptor_e998bacb27df16c1, []int{14} } func (m *Queue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1296,7 +1347,7 @@ func (m *Queue_Permissions) Reset() { *m = Queue_Permissions{} } func (m *Queue_Permissions) String() string { return proto.CompactTextString(m) } func (*Queue_Permissions) ProtoMessage() {} func (*Queue_Permissions) Descriptor() ([]byte, []int) { - return fileDescriptor_e998bacb27df16c1, []int{13, 0} + return fileDescriptor_e998bacb27df16c1, []int{14, 0} } func (m *Queue_Permissions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1348,7 +1399,7 @@ func (m *Queue_Permissions_Subject) Reset() { *m = Queue_Permissions_Sub func (m *Queue_Permissions_Subject) String() string { return proto.CompactTextString(m) } func (*Queue_Permissions_Subject) ProtoMessage() {} func (*Queue_Permissions_Subject) Descriptor() ([]byte, []int) { - return fileDescriptor_e998bacb27df16c1, []int{13, 0, 0} + return fileDescriptor_e998bacb27df16c1, []int{14, 0, 0} } func (m *Queue_Permissions_Subject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1404,7 +1455,7 @@ func (m *PriorityClassResourceLimits) Reset() { *m = PriorityClassResour func (m *PriorityClassResourceLimits) String() string { return proto.CompactTextString(m) } func (*PriorityClassResourceLimits) ProtoMessage() {} func (*PriorityClassResourceLimits) Descriptor() ([]byte, []int) { - return fileDescriptor_e998bacb27df16c1, []int{14} + return fileDescriptor_e998bacb27df16c1, []int{15} } func (m *PriorityClassResourceLimits) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1455,7 +1506,7 @@ func (m *PriorityClassPoolResourceLimits) Reset() { *m = PriorityClassPo func (m *PriorityClassPoolResourceLimits) String() string { return proto.CompactTextString(m) } func (*PriorityClassPoolResourceLimits) ProtoMessage() {} func (*PriorityClassPoolResourceLimits) Descriptor() ([]byte, []int) { - return fileDescriptor_e998bacb27df16c1, []int{15} + return fileDescriptor_e998bacb27df16c1, []int{16} } func (m *PriorityClassPoolResourceLimits) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1500,7 +1551,7 @@ func (m *QueueList) Reset() { *m = QueueList{} } func (m *QueueList) String() string { return proto.CompactTextString(m) } func (*QueueList) ProtoMessage() {} func (*QueueList) Descriptor() ([]byte, []int) { - return fileDescriptor_e998bacb27df16c1, []int{16} + return fileDescriptor_e998bacb27df16c1, []int{17} } func (m *QueueList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1545,7 +1596,7 @@ func (m *CancellationResult) Reset() { *m = CancellationResult{} } func (m *CancellationResult) String() string { return proto.CompactTextString(m) } func (*CancellationResult) ProtoMessage() {} func (*CancellationResult) Descriptor() ([]byte, []int) { - return fileDescriptor_e998bacb27df16c1, []int{17} + return fileDescriptor_e998bacb27df16c1, []int{18} } func (m *CancellationResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1590,7 +1641,7 @@ func (m *QueueGetRequest) Reset() { *m = QueueGetRequest{} } func (m *QueueGetRequest) String() string { return proto.CompactTextString(m) } func (*QueueGetRequest) ProtoMessage() {} func (*QueueGetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_e998bacb27df16c1, []int{18} + return fileDescriptor_e998bacb27df16c1, []int{19} } func (m *QueueGetRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1635,7 +1686,7 @@ func (m *QueueCordonRequest) Reset() { *m = QueueCordonRequest{} } func (m *QueueCordonRequest) String() string { return proto.CompactTextString(m) } func (*QueueCordonRequest) ProtoMessage() {} func (*QueueCordonRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_e998bacb27df16c1, []int{19} + return fileDescriptor_e998bacb27df16c1, []int{20} } func (m *QueueCordonRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1680,7 +1731,7 @@ func (m *QueueUncordonRequest) Reset() { *m = QueueUncordonRequest{} } func (m *QueueUncordonRequest) String() string { return proto.CompactTextString(m) } func (*QueueUncordonRequest) ProtoMessage() {} func (*QueueUncordonRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_e998bacb27df16c1, []int{20} + return fileDescriptor_e998bacb27df16c1, []int{21} } func (m *QueueUncordonRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1725,7 +1776,7 @@ func (m *StreamingQueueGetRequest) Reset() { *m = StreamingQueueGetReque func (m *StreamingQueueGetRequest) String() string { return proto.CompactTextString(m) } func (*StreamingQueueGetRequest) ProtoMessage() {} func (*StreamingQueueGetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_e998bacb27df16c1, []int{21} + return fileDescriptor_e998bacb27df16c1, []int{22} } func (m *StreamingQueueGetRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1770,7 +1821,7 @@ func (m *QueueDeleteRequest) Reset() { *m = QueueDeleteRequest{} } func (m *QueueDeleteRequest) String() string { return proto.CompactTextString(m) } func (*QueueDeleteRequest) ProtoMessage() {} func (*QueueDeleteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_e998bacb27df16c1, []int{22} + return fileDescriptor_e998bacb27df16c1, []int{23} } func (m *QueueDeleteRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1816,7 +1867,7 @@ func (m *JobSetInfo) Reset() { *m = JobSetInfo{} } func (m *JobSetInfo) String() string { return proto.CompactTextString(m) } func (*JobSetInfo) ProtoMessage() {} func (*JobSetInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_e998bacb27df16c1, []int{23} + return fileDescriptor_e998bacb27df16c1, []int{24} } func (m *JobSetInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1875,7 +1926,7 @@ func (m *QueueUpdateResponse) Reset() { *m = QueueUpdateResponse{} } func (m *QueueUpdateResponse) String() string { return proto.CompactTextString(m) } func (*QueueUpdateResponse) ProtoMessage() {} func (*QueueUpdateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_e998bacb27df16c1, []int{24} + return fileDescriptor_e998bacb27df16c1, []int{25} } func (m *QueueUpdateResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1926,7 +1977,7 @@ func (m *BatchQueueUpdateResponse) Reset() { *m = BatchQueueUpdateRespon func (m *BatchQueueUpdateResponse) String() string { return proto.CompactTextString(m) } func (*BatchQueueUpdateResponse) ProtoMessage() {} func (*BatchQueueUpdateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_e998bacb27df16c1, []int{25} + return fileDescriptor_e998bacb27df16c1, []int{26} } func (m *BatchQueueUpdateResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1971,7 +2022,7 @@ func (m *QueueCreateResponse) Reset() { *m = QueueCreateResponse{} } func (m *QueueCreateResponse) String() string { return proto.CompactTextString(m) } func (*QueueCreateResponse) ProtoMessage() {} func (*QueueCreateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_e998bacb27df16c1, []int{26} + return fileDescriptor_e998bacb27df16c1, []int{27} } func (m *QueueCreateResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2022,7 +2073,7 @@ func (m *BatchQueueCreateResponse) Reset() { *m = BatchQueueCreateRespon func (m *BatchQueueCreateResponse) String() string { return proto.CompactTextString(m) } func (*BatchQueueCreateResponse) ProtoMessage() {} func (*BatchQueueCreateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_e998bacb27df16c1, []int{27} + return fileDescriptor_e998bacb27df16c1, []int{28} } func (m *BatchQueueCreateResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2066,7 +2117,7 @@ func (m *EndMarker) Reset() { *m = EndMarker{} } func (m *EndMarker) String() string { return proto.CompactTextString(m) } func (*EndMarker) ProtoMessage() {} func (*EndMarker) Descriptor() ([]byte, []int) { - return fileDescriptor_e998bacb27df16c1, []int{28} + return fileDescriptor_e998bacb27df16c1, []int{29} } func (m *EndMarker) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2106,7 +2157,7 @@ func (m *StreamingQueueMessage) Reset() { *m = StreamingQueueMessage{} } func (m *StreamingQueueMessage) String() string { return proto.CompactTextString(m) } func (*StreamingQueueMessage) ProtoMessage() {} func (*StreamingQueueMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_e998bacb27df16c1, []int{29} + return fileDescriptor_e998bacb27df16c1, []int{30} } func (m *StreamingQueueMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2189,7 +2240,7 @@ func (m *QueuePreemptRequest) Reset() { *m = QueuePreemptRequest{} } func (m *QueuePreemptRequest) String() string { return proto.CompactTextString(m) } func (*QueuePreemptRequest) ProtoMessage() {} func (*QueuePreemptRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_e998bacb27df16c1, []int{30} + return fileDescriptor_e998bacb27df16c1, []int{31} } func (m *QueuePreemptRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2243,7 +2294,7 @@ func (m *QueueCancelRequest) Reset() { *m = QueueCancelRequest{} } func (m *QueueCancelRequest) String() string { return proto.CompactTextString(m) } func (*QueueCancelRequest) ProtoMessage() {} func (*QueueCancelRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_e998bacb27df16c1, []int{31} + return fileDescriptor_e998bacb27df16c1, []int{32} } func (m *QueueCancelRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2313,6 +2364,7 @@ func init() { proto.RegisterMapType((map[string]string)(nil), "api.Job.AnnotationsEntry") proto.RegisterMapType((map[string]string)(nil), "api.Job.LabelsEntry") proto.RegisterMapType((map[string]string)(nil), "api.Job.RequiredNodeLabelsEntry") + proto.RegisterType((*ExperimentalPriceInfo)(nil), "api.ExperimentalPriceInfo") proto.RegisterType((*JobReprioritizeRequest)(nil), "api.JobReprioritizeRequest") proto.RegisterType((*JobReprioritizeResponse)(nil), "api.JobReprioritizeResponse") proto.RegisterMapType((map[string]string)(nil), "api.JobReprioritizeResponse.ReprioritizationResultsEntry") @@ -2350,204 +2402,207 @@ func init() { func init() { proto.RegisterFile("pkg/api/submit.proto", fileDescriptor_e998bacb27df16c1) } var fileDescriptor_e998bacb27df16c1 = []byte{ - // 3144 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x73, 0x1c, 0x47, - 0xd9, 0xd7, 0x68, 0xad, 0x8f, 0x7d, 0x56, 0x1f, 0xab, 0xb6, 0x2c, 0x8f, 0xd6, 0x8e, 0x56, 0x9e, - 0x24, 0x8e, 0xec, 0xf8, 0x5d, 0xc5, 0xca, 0x9b, 0xc2, 0x36, 0x01, 0xe3, 0x5d, 0xad, 0x6d, 0xc9, - 0xb6, 0xac, 0x48, 0x56, 0x3e, 0x28, 0x8a, 0x65, 0x76, 0xa7, 0xb5, 0x1a, 0x69, 0x77, 0x66, 0x33, - 0x33, 0x2b, 0x23, 0xa8, 0x5c, 0x28, 0x8a, 0x1c, 0xb8, 0xa4, 0xe0, 0x48, 0x15, 0xe1, 0xc0, 0x29, - 0x9c, 0xb9, 0x50, 0xfc, 0x01, 0x14, 0x5c, 0x42, 0x71, 0x81, 0xcb, 0x16, 0x95, 0x00, 0xa9, 0xda, - 0x1b, 0x77, 0x0e, 0x54, 0x3f, 0xdd, 0x33, 0xd3, 0xb3, 0x5f, 0xda, 0x95, 0xad, 0xe4, 0xc2, 0x4d, - 0xf3, 0xeb, 0xe7, 0xb3, 0xfb, 0xe9, 0xe7, 0xa3, 0x57, 0x30, 0x5b, 0x3b, 0x28, 0x2f, 0xeb, 0x35, - 0x73, 0xd9, 0xad, 0x17, 0xab, 0xa6, 0x97, 0xa9, 0x39, 0xb6, 0x67, 0x93, 0x98, 0x5e, 0x33, 0x53, - 0x17, 0xca, 0xb6, 0x5d, 0xae, 0xd0, 0x65, 0x84, 0x8a, 0xf5, 0xdd, 0x65, 0x5a, 0xad, 0x79, 0x47, - 0x9c, 0x22, 0x95, 0x6e, 0x5d, 0xf4, 0xcc, 0x2a, 0x75, 0x3d, 0xbd, 0x5a, 0x13, 0x04, 0xda, 0xc1, - 0x0d, 0x37, 0x63, 0xda, 0x28, 0xbb, 0x64, 0x3b, 0x74, 0xf9, 0xf0, 0xfa, 0x72, 0x99, 0x5a, 0xd4, - 0xd1, 0x3d, 0x6a, 0x08, 0x9a, 0x25, 0x89, 0xc6, 0xa2, 0xde, 0x53, 0xdb, 0x39, 0x30, 0xad, 0x72, - 0x27, 0xca, 0x8b, 0x42, 0x1d, 0xa3, 0xd4, 0x2d, 0xcb, 0xf6, 0x74, 0xcf, 0xb4, 0x2d, 0x57, 0xac, - 0x06, 0x4e, 0xec, 0x51, 0xbd, 0xe2, 0xed, 0x71, 0x54, 0xfb, 0x38, 0x0e, 0xb3, 0xeb, 0x76, 0x71, - 0x1b, 0x1d, 0xdb, 0xa2, 0xef, 0xd7, 0xa9, 0xeb, 0xad, 0x79, 0xb4, 0x4a, 0x56, 0x60, 0xbc, 0xe6, - 0x98, 0xb6, 0x63, 0x7a, 0x47, 0xaa, 0xb2, 0xa8, 0x2c, 0x29, 0xd9, 0xb9, 0x66, 0x23, 0x4d, 0x7c, - 0xec, 0x9a, 0x5d, 0x35, 0x3d, 0xf4, 0x75, 0x2b, 0xa0, 0x23, 0x6f, 0x40, 0xdc, 0xd2, 0xab, 0xd4, - 0xad, 0xe9, 0x25, 0xaa, 0xc6, 0x16, 0x95, 0xa5, 0x78, 0xf6, 0x7c, 0xb3, 0x91, 0x3e, 0x1b, 0x80, - 0x12, 0x57, 0x48, 0x49, 0x5e, 0x87, 0x78, 0xa9, 0x62, 0x52, 0xcb, 0x2b, 0x98, 0x86, 0x3a, 0x8e, - 0x6c, 0xa8, 0x8b, 0x83, 0x6b, 0x86, 0xac, 0xcb, 0xc7, 0xc8, 0x36, 0x8c, 0x56, 0xf4, 0x22, 0xad, - 0xb8, 0xea, 0x99, 0xc5, 0xd8, 0x52, 0x62, 0xe5, 0xe5, 0x8c, 0x5e, 0x33, 0x33, 0x9d, 0x5c, 0xc9, - 0x3c, 0x44, 0xba, 0xbc, 0xe5, 0x39, 0x47, 0xd9, 0xd9, 0x66, 0x23, 0x9d, 0xe4, 0x8c, 0x92, 0x58, - 0x21, 0x8a, 0x94, 0x21, 0x21, 0x6d, 0x9c, 0x3a, 0x82, 0x92, 0xaf, 0x76, 0x97, 0x7c, 0x27, 0x24, - 0xe6, 0xe2, 0xe7, 0x9b, 0x8d, 0xf4, 0x39, 0x49, 0x84, 0xa4, 0x43, 0x96, 0x4c, 0x3e, 0x54, 0x60, - 0xd6, 0xa1, 0xef, 0xd7, 0x4d, 0x87, 0x1a, 0x05, 0xcb, 0x36, 0x68, 0x41, 0x38, 0x33, 0x8a, 0x2a, - 0xaf, 0x77, 0x57, 0xb9, 0x25, 0xb8, 0x36, 0x6c, 0x83, 0xca, 0x8e, 0x69, 0xcd, 0x46, 0xfa, 0xa2, - 0xd3, 0xb6, 0x18, 0x1a, 0xa0, 0x2a, 0x5b, 0xa4, 0x7d, 0x9d, 0x3c, 0x86, 0xf1, 0x9a, 0x6d, 0x14, - 0xdc, 0x1a, 0x2d, 0xa9, 0xc3, 0x8b, 0xca, 0x52, 0x62, 0xe5, 0x42, 0x86, 0x47, 0x1c, 0xda, 0xc0, - 0xa2, 0x32, 0x73, 0x78, 0x3d, 0xb3, 0x69, 0x1b, 0xdb, 0x35, 0x5a, 0xc2, 0xf3, 0x9c, 0xa9, 0xf1, - 0x8f, 0x88, 0xec, 0x31, 0x01, 0x92, 0x4d, 0x88, 0xfb, 0x02, 0x5d, 0x75, 0x0c, 0xdd, 0xe9, 0x29, - 0x91, 0x87, 0x15, 0xff, 0x70, 0x23, 0x61, 0x25, 0x30, 0x92, 0x83, 0x31, 0xd3, 0x2a, 0x3b, 0xd4, - 0x75, 0xd5, 0x38, 0xca, 0x23, 0x28, 0x68, 0x8d, 0x63, 0x39, 0xdb, 0xda, 0x35, 0xcb, 0xd9, 0x73, - 0xcc, 0x30, 0x41, 0x26, 0x49, 0xf1, 0x39, 0xc9, 0x5d, 0x18, 0x77, 0xa9, 0x73, 0x68, 0x96, 0xa8, - 0xab, 0x82, 0x24, 0x65, 0x9b, 0x83, 0x42, 0x0a, 0x1a, 0xe3, 0xd3, 0xc9, 0xc6, 0xf8, 0x18, 0x8b, - 0x71, 0xb7, 0xb4, 0x47, 0x8d, 0x7a, 0x85, 0x3a, 0x6a, 0x22, 0x8c, 0xf1, 0x00, 0x94, 0x63, 0x3c, - 0x00, 0x53, 0x3a, 0x24, 0xa4, 0xd3, 0x22, 0x2f, 0x42, 0xec, 0x80, 0xf2, 0x8b, 0x15, 0xcf, 0xce, - 0x34, 0x1b, 0xe9, 0xc9, 0x03, 0x2a, 0xdf, 0x29, 0xb6, 0x4a, 0xae, 0xc0, 0xc8, 0xa1, 0x5e, 0xa9, - 0x53, 0x3c, 0x97, 0x78, 0xf6, 0x6c, 0xb3, 0x91, 0x9e, 0x46, 0x40, 0x22, 0xe4, 0x14, 0xb7, 0x86, - 0x6f, 0x28, 0xa9, 0x5d, 0x48, 0xb6, 0xc6, 0xe3, 0xa9, 0xe8, 0xa9, 0xc2, 0xf9, 0x2e, 0x41, 0x78, - 0x1a, 0xea, 0xb4, 0x7f, 0xc7, 0x60, 0x32, 0x72, 0xd4, 0xe4, 0x16, 0x9c, 0xf1, 0x8e, 0x6a, 0x14, - 0xd5, 0x4c, 0xad, 0x24, 0xe5, 0x60, 0x78, 0x72, 0x54, 0xa3, 0x78, 0xc7, 0xa7, 0x18, 0x45, 0x24, - 0x40, 0x91, 0x87, 0x29, 0xaf, 0xd9, 0x8e, 0xe7, 0xaa, 0xc3, 0x8b, 0xb1, 0xa5, 0x49, 0xae, 0x1c, - 0x01, 0x59, 0x39, 0x02, 0xe4, 0x7b, 0xd1, 0x64, 0x10, 0xc3, 0xa0, 0x79, 0xb1, 0x3d, 0xf4, 0x4e, - 0x9e, 0x05, 0x6e, 0x42, 0xc2, 0xab, 0xb8, 0x05, 0x6a, 0xe9, 0xc5, 0x0a, 0x35, 0xd4, 0x33, 0x8b, - 0xca, 0xd2, 0x78, 0x56, 0x6d, 0x36, 0xd2, 0xb3, 0x1e, 0xdb, 0x51, 0x44, 0x25, 0x5e, 0x08, 0x51, - 0xcc, 0x99, 0xd4, 0xf1, 0x0a, 0x2c, 0x8b, 0xaa, 0x23, 0x52, 0xce, 0xa4, 0x8e, 0xb7, 0xa1, 0x57, - 0x69, 0x24, 0x67, 0x0a, 0x8c, 0xdc, 0x86, 0xc9, 0xba, 0x4b, 0x0b, 0xa5, 0x4a, 0xdd, 0xf5, 0xa8, - 0xb3, 0xb6, 0xa9, 0x8e, 0xa2, 0xc6, 0x54, 0xb3, 0x91, 0x9e, 0xab, 0xbb, 0x34, 0xe7, 0xe3, 0x12, - 0xf3, 0x84, 0x8c, 0x7f, 0x59, 0x21, 0xa6, 0x79, 0x30, 0x19, 0xb9, 0x97, 0xe4, 0x46, 0x87, 0x23, - 0x17, 0x14, 0x78, 0xe4, 0xa4, 0xfd, 0xc8, 0x07, 0x3e, 0x70, 0xed, 0x6f, 0x0a, 0x24, 0x5b, 0x73, - 0x2e, 0xe3, 0x7f, 0xbf, 0x4e, 0xeb, 0x54, 0x38, 0x88, 0xfc, 0x08, 0xc8, 0xfc, 0x08, 0x90, 0xff, - 0x07, 0xd8, 0xb7, 0x8b, 0x05, 0x97, 0x62, 0x21, 0x1b, 0x0e, 0x0f, 0x65, 0xdf, 0x2e, 0x6e, 0xd3, - 0x96, 0x42, 0xe6, 0x63, 0xc4, 0x80, 0x19, 0xc6, 0xe5, 0x70, 0x7d, 0x05, 0x46, 0xe0, 0x07, 0xdb, - 0x7c, 0xd7, 0x32, 0x90, 0x7d, 0xa1, 0xd9, 0x48, 0xcf, 0xef, 0xdb, 0x45, 0x09, 0x93, 0x3d, 0x9a, - 0x6e, 0x59, 0xd2, 0xfe, 0xac, 0xc0, 0xcc, 0xba, 0x5d, 0xdc, 0x74, 0x28, 0x23, 0xf8, 0xd2, 0x9c, - 0xfb, 0x3f, 0x18, 0x63, 0x5c, 0xa6, 0xc1, 0x5d, 0x8a, 0xf3, 0xfa, 0xbb, 0x6f, 0x17, 0xd7, 0x8c, - 0x48, 0xfd, 0xe5, 0x08, 0xb9, 0x06, 0xa3, 0x0e, 0xd5, 0x5d, 0xdb, 0xc2, 0xbb, 0x20, 0xa8, 0x39, - 0x22, 0x53, 0x73, 0x44, 0xfb, 0x0f, 0x3f, 0xaf, 0x9c, 0x6e, 0x95, 0x68, 0xc5, 0x77, 0xe9, 0x2a, - 0x8c, 0x72, 0x8d, 0xb2, 0x4f, 0x28, 0x5e, 0xf6, 0x09, 0x81, 0x13, 0xfa, 0x14, 0x6c, 0x5a, 0xec, - 0xd8, 0x4d, 0x93, 0xdc, 0x3f, 0x33, 0x90, 0xfb, 0x23, 0x7d, 0xb8, 0xff, 0x4f, 0x05, 0xce, 0xae, - 0xa3, 0x51, 0xd1, 0x1d, 0x88, 0x7a, 0xa5, 0x0c, 0xea, 0xd5, 0xf0, 0xb1, 0x5e, 0xdd, 0x86, 0xd1, - 0x5d, 0xb3, 0xe2, 0x51, 0x07, 0x77, 0x20, 0xb1, 0x32, 0x13, 0x84, 0x29, 0xf5, 0xee, 0xe2, 0x02, - 0xb7, 0x9c, 0x13, 0xc9, 0x96, 0x73, 0x64, 0xc0, 0x63, 0x7e, 0x00, 0x13, 0xb2, 0x6c, 0xf2, 0x75, - 0x18, 0x75, 0x3d, 0xdd, 0xa3, 0xae, 0xaa, 0x2c, 0xc6, 0x96, 0xa6, 0x56, 0x26, 0x03, 0xf5, 0x0c, - 0xe5, 0xc2, 0x38, 0x81, 0x2c, 0x8c, 0x23, 0xda, 0x17, 0xd3, 0x10, 0x5b, 0xb7, 0x8b, 0x64, 0x11, - 0x86, 0x83, 0xcd, 0x49, 0x36, 0x1b, 0xe9, 0x09, 0x53, 0xde, 0x96, 0x61, 0xd3, 0x88, 0x76, 0xa5, - 0x93, 0x7d, 0x76, 0xa5, 0xa7, 0x1e, 0x51, 0x91, 0x16, 0x7b, 0xac, 0xef, 0x16, 0x3b, 0x1b, 0x74, - 0xcb, 0xbc, 0x83, 0x9a, 0xf5, 0xf7, 0x6c, 0x80, 0xe6, 0xf8, 0xed, 0x68, 0x3d, 0x84, 0x68, 0x8a, - 0x3a, 0x79, 0x15, 0x3c, 0xec, 0xd2, 0x0a, 0x27, 0x50, 0xc1, 0x62, 0xa0, 0xe0, 0x79, 0x77, 0xbe, - 0x57, 0x60, 0xc4, 0x7e, 0x6a, 0x51, 0x47, 0x8c, 0x1c, 0xb8, 0xeb, 0x08, 0xc8, 0xbb, 0x8e, 0x00, - 0xa1, 0x70, 0x01, 0xb7, 0xbf, 0x80, 0x9f, 0xee, 0x9e, 0x59, 0x2b, 0xd4, 0x5d, 0xea, 0x14, 0xca, - 0x8e, 0x5d, 0xaf, 0xb9, 0xea, 0x34, 0xde, 0xed, 0xcb, 0xcd, 0x46, 0x5a, 0x43, 0xb2, 0xc7, 0x3e, - 0xd5, 0x8e, 0x4b, 0x9d, 0x7b, 0x48, 0x23, 0xc9, 0x54, 0xbb, 0xd1, 0x90, 0x1f, 0x2b, 0x70, 0xb9, - 0x64, 0x57, 0x6b, 0xac, 0xb7, 0xa0, 0x46, 0xa1, 0x97, 0xca, 0xb3, 0x8b, 0xca, 0xd2, 0x44, 0xf6, - 0xb5, 0x66, 0x23, 0x7d, 0x2d, 0xe4, 0x78, 0xeb, 0x78, 0xe5, 0xda, 0xf1, 0xd4, 0x91, 0xd1, 0xef, - 0x4c, 0x9f, 0xa3, 0x9f, 0x3c, 0x46, 0x8c, 0x3c, 0xf7, 0x31, 0x62, 0xe2, 0x79, 0x8c, 0x11, 0xbf, - 0x52, 0x60, 0x51, 0x34, 0xe4, 0xa6, 0x55, 0x2e, 0x38, 0xd4, 0xb5, 0xeb, 0x4e, 0x89, 0x16, 0x44, - 0x68, 0x54, 0xa9, 0xe5, 0xb9, 0xea, 0x39, 0xb4, 0x7d, 0xa9, 0x93, 0xa6, 0x2d, 0xc1, 0xb0, 0x25, - 0xd1, 0x67, 0xaf, 0x35, 0x1b, 0xe9, 0xa5, 0x50, 0x6a, 0x27, 0x1a, 0xc9, 0x98, 0x85, 0xde, 0x94, - 0xe4, 0x01, 0x8c, 0x95, 0x1c, 0xca, 0x46, 0x7a, 0x6c, 0xcd, 0x12, 0x2b, 0xa9, 0x0c, 0x9f, 0xe9, - 0x33, 0xfe, 0x13, 0x42, 0xe6, 0x89, 0xff, 0x84, 0xc0, 0x27, 0x1e, 0x41, 0x2e, 0x4f, 0x3c, 0x02, - 0x92, 0xc7, 0xa6, 0xa9, 0xe7, 0x32, 0x36, 0x25, 0x9f, 0x61, 0x6c, 0xfa, 0x0e, 0x24, 0x0e, 0x6e, - 0xb8, 0x05, 0xdf, 0xa0, 0x19, 0x14, 0x75, 0x49, 0xde, 0xe6, 0xf0, 0x6d, 0x83, 0x6d, 0xb6, 0xb0, - 0x92, 0x77, 0xc3, 0x07, 0x37, 0xdc, 0xb5, 0x36, 0x13, 0x21, 0x44, 0x59, 0x6a, 0x62, 0xd2, 0x85, - 0x36, 0x95, 0x74, 0x0f, 0x17, 0x61, 0x77, 0x20, 0x57, 0x7c, 0xb7, 0xc8, 0x15, 0x68, 0x74, 0xd8, - 0x9b, 0xed, 0x77, 0xd8, 0x23, 0x6b, 0x30, 0xc3, 0xef, 0xae, 0xe7, 0x55, 0x0a, 0x2e, 0x2d, 0xd9, - 0x96, 0xe1, 0xaa, 0x73, 0x8b, 0xca, 0x52, 0x8c, 0xf7, 0x6d, 0xb8, 0xf8, 0xc4, 0xab, 0x6c, 0xf3, - 0x25, 0xb9, 0x6f, 0x6b, 0x59, 0xfa, 0xdf, 0xdc, 0x78, 0xe2, 0x19, 0xe2, 0x5f, 0x0a, 0xcc, 0xad, - 0xb3, 0x2e, 0x58, 0xe4, 0x28, 0xf3, 0x07, 0xd4, 0xef, 0x90, 0xa4, 0xb6, 0x4c, 0xe9, 0xa3, 0x2d, - 0x3b, 0xf5, 0xa2, 0xfe, 0x26, 0x4c, 0x58, 0xf4, 0x69, 0xa1, 0x25, 0xe9, 0x62, 0xfd, 0xb4, 0xe8, - 0xd3, 0xcd, 0xf6, 0xbc, 0x9b, 0x90, 0x60, 0xed, 0x37, 0xc3, 0x70, 0xbe, 0xcd, 0x51, 0xb7, 0x66, - 0x5b, 0x2e, 0x25, 0xbf, 0x50, 0x40, 0x75, 0xc2, 0x05, 0x3c, 0x62, 0x96, 0xf9, 0xea, 0x15, 0x8f, - 0xfb, 0x9e, 0x58, 0xb9, 0xe9, 0x17, 0xd8, 0x4e, 0x02, 0x32, 0x5b, 0x2d, 0xcc, 0x5b, 0x9c, 0x97, - 0x57, 0xde, 0x97, 0x9b, 0x8d, 0xf4, 0x25, 0xa7, 0x33, 0x85, 0x64, 0xed, 0xf9, 0x2e, 0x24, 0x29, - 0x07, 0x2e, 0xf6, 0x92, 0x7f, 0x2a, 0x61, 0x61, 0xc1, 0x39, 0x69, 0xa0, 0xe2, 0x5e, 0xe2, 0x83, - 0xe7, 0x20, 0x83, 0xc3, 0x15, 0x18, 0xa1, 0x8e, 0x63, 0x3b, 0xb2, 0x4e, 0x04, 0x64, 0x52, 0x04, - 0xb4, 0x0f, 0x70, 0xee, 0x8a, 0xea, 0x23, 0x7b, 0x40, 0xf8, 0xcc, 0xc7, 0xbf, 0xc5, 0xd0, 0xc7, - 0xcf, 0x23, 0xd5, 0x3a, 0xf4, 0x85, 0x36, 0x66, 0x17, 0x9a, 0x8d, 0x74, 0x0a, 0x47, 0xbb, 0x10, - 0x94, 0x77, 0x3a, 0xd9, 0xba, 0xa6, 0x7d, 0x98, 0x80, 0x11, 0x2c, 0xf4, 0xe4, 0x32, 0x9c, 0xc1, - 0xc7, 0x02, 0xee, 0x1d, 0x0e, 0xcc, 0x56, 0xf4, 0xa1, 0x00, 0xd7, 0x49, 0x1e, 0xa6, 0xfd, 0x40, - 0x2c, 0xec, 0xea, 0x25, 0x4f, 0x78, 0xa9, 0x64, 0x2f, 0x36, 0x1b, 0x69, 0xd5, 0x5f, 0xba, 0x8b, - 0x2b, 0x12, 0xf3, 0x54, 0x74, 0x85, 0xdc, 0x84, 0x04, 0xf6, 0x2b, 0xbc, 0x7d, 0x11, 0xd3, 0x1f, - 0x66, 0x5d, 0x06, 0xf3, 0xb6, 0x43, 0xce, 0xba, 0x21, 0xca, 0xae, 0x03, 0x76, 0x39, 0x3e, 0x2f, - 0x1f, 0x9d, 0xf0, 0x3a, 0x20, 0xde, 0xc6, 0x9c, 0x90, 0x60, 0x52, 0x86, 0xe9, 0xa0, 0xb4, 0x57, - 0xcc, 0xaa, 0xe9, 0xf9, 0xef, 0xb8, 0x0b, 0xb8, 0xb1, 0xb8, 0x19, 0x41, 0x2d, 0x7f, 0x88, 0x04, - 0x3c, 0x9a, 0xd9, 0xe6, 0xaa, 0x4e, 0x64, 0x21, 0xd2, 0x9a, 0x4c, 0x45, 0xd7, 0xc8, 0x6f, 0x15, - 0xb8, 0xdc, 0xa2, 0xa9, 0x50, 0x3c, 0x0a, 0x6e, 0x71, 0xa1, 0x54, 0xd1, 0x5d, 0x97, 0x3f, 0xd0, - 0x8c, 0x49, 0xaf, 0xba, 0x9d, 0x0c, 0xc8, 0x1e, 0xf9, 0xb7, 0x39, 0xc7, 0x98, 0x36, 0xf4, 0x2a, - 0xe5, 0x36, 0x2d, 0x37, 0x1b, 0xe9, 0x57, 0x9d, 0xe3, 0x68, 0xa5, 0xad, 0xb8, 0x74, 0x2c, 0x31, - 0xd9, 0x86, 0x44, 0x8d, 0x3a, 0x55, 0xd3, 0x75, 0xb1, 0x8f, 0xe7, 0x2f, 0xce, 0x73, 0x92, 0x6d, - 0x9b, 0xe1, 0x2a, 0xdf, 0x75, 0x89, 0x5c, 0xde, 0x75, 0x09, 0x66, 0x3d, 0x63, 0xc9, 0x76, 0x0c, - 0xdb, 0xa2, 0xfc, 0x09, 0x7f, 0x5c, 0x0c, 0x4b, 0x02, 0x8b, 0x0c, 0x4b, 0x02, 0x23, 0x8f, 0x60, - 0x86, 0xb7, 0xfa, 0x05, 0x83, 0xd6, 0x1c, 0x5a, 0xc2, 0xbe, 0x27, 0x8e, 0x87, 0xbd, 0xc8, 0x02, - 0x9d, 0x2f, 0xae, 0x06, 0x6b, 0x91, 0xd3, 0x48, 0xb6, 0xae, 0x92, 0xd5, 0x60, 0xc6, 0x81, 0x36, - 0x97, 0xfa, 0x9e, 0x72, 0x52, 0x5f, 0x28, 0x90, 0x90, 0x36, 0x80, 0x6c, 0xc1, 0xb8, 0x5b, 0x2f, - 0xee, 0xd3, 0x52, 0x90, 0x30, 0x17, 0x3a, 0x6f, 0x55, 0x66, 0x9b, 0x93, 0x89, 0x66, 0x48, 0xf0, - 0x44, 0x9a, 0x21, 0x81, 0x61, 0xca, 0xa2, 0x4e, 0x91, 0xbf, 0x49, 0xf9, 0x29, 0x8b, 0x01, 0x91, - 0x94, 0xc5, 0x80, 0xd4, 0x7b, 0x30, 0x26, 0xe4, 0xb2, 0x0b, 0x7c, 0x60, 0x5a, 0x86, 0x7c, 0x81, - 0xd9, 0xb7, 0x7c, 0x81, 0xd9, 0x77, 0x70, 0xd1, 0x87, 0x7b, 0x5f, 0xf4, 0x94, 0x09, 0x67, 0x3b, - 0x5c, 0x83, 0x13, 0x24, 0x5d, 0xe5, 0xd8, 0xd2, 0xff, 0x4b, 0x05, 0x2e, 0xf7, 0x17, 0xf1, 0xfd, - 0xa9, 0x7f, 0x20, 0xab, 0xf7, 0x67, 0xc4, 0x88, 0xc0, 0x16, 0x6d, 0xc7, 0x19, 0x78, 0xfa, 0x6d, - 0x96, 0xf6, 0xb3, 0x11, 0xb8, 0xd0, 0xc3, 0x44, 0x36, 0x9e, 0xcc, 0x57, 0xf5, 0xef, 0x9b, 0xd5, - 0x7a, 0x35, 0x9c, 0x4d, 0x76, 0x1d, 0xbd, 0xc4, 0xca, 0xa2, 0x08, 0xbd, 0x6f, 0x1c, 0xe7, 0x68, - 0xe6, 0x11, 0x97, 0xe0, 0xa3, 0x77, 0x05, 0xbf, 0x54, 0xaf, 0xab, 0x9d, 0x29, 0xe4, 0x7a, 0xdd, - 0x85, 0x84, 0xfc, 0x4e, 0x81, 0x4b, 0x5d, 0x4d, 0xc4, 0xdc, 0x67, 0xdb, 0x15, 0x0c, 0xea, 0xc4, - 0x4a, 0xee, 0xa4, 0xa6, 0x66, 0x8f, 0x36, 0x6d, 0xbb, 0xc2, 0x0d, 0x7e, 0xb5, 0xd9, 0x48, 0xbf, - 0x52, 0xed, 0x45, 0x27, 0x99, 0xfd, 0x42, 0x4f, 0x42, 0xd6, 0x6c, 0xf4, 0xda, 0x9c, 0xd3, 0x8a, - 0x7b, 0xed, 0x78, 0x37, 0xfb, 0x53, 0xfd, 0x38, 0x1a, 0xf3, 0x2f, 0xb5, 0xef, 0x2f, 0x13, 0x38, - 0x58, 0xdc, 0x6b, 0xbf, 0x1f, 0x86, 0xf4, 0x31, 0x32, 0xc8, 0xaf, 0xfb, 0x08, 0xcc, 0x3b, 0xfd, - 0x58, 0x73, 0xaa, 0xc1, 0xf9, 0x55, 0x9c, 0xaf, 0x96, 0x87, 0x38, 0xd6, 0x81, 0x87, 0xa6, 0xeb, - 0x91, 0x1b, 0x30, 0x8a, 0xed, 0xbc, 0x5f, 0x27, 0x20, 0xac, 0x13, 0xbc, 0xe6, 0xf0, 0x55, 0xb9, - 0xe6, 0x70, 0x44, 0xdb, 0x01, 0xc2, 0x9f, 0x70, 0x2b, 0x52, 0x0f, 0x4c, 0x6e, 0xc3, 0x64, 0x89, - 0xa3, 0xd4, 0x90, 0x66, 0x15, 0xfc, 0xb5, 0x26, 0x58, 0x88, 0x4e, 0x2c, 0x13, 0x32, 0xae, 0xdd, - 0x84, 0x69, 0xd4, 0x7e, 0x8f, 0x06, 0x0f, 0xfe, 0x7d, 0x36, 0x81, 0xda, 0x9b, 0x40, 0x90, 0x35, - 0x87, 0xb5, 0x7a, 0x50, 0xee, 0x6f, 0xc2, 0x2c, 0x72, 0xef, 0x58, 0xa5, 0x13, 0xf1, 0xdf, 0x06, - 0x75, 0xdb, 0x73, 0xa8, 0x5e, 0x35, 0xad, 0x72, 0xab, 0x07, 0x2f, 0x42, 0xcc, 0xaa, 0x57, 0x51, - 0xc4, 0x24, 0x3f, 0x46, 0xab, 0x5e, 0x95, 0x8f, 0xd1, 0xaa, 0x57, 0x03, 0xf3, 0x57, 0x69, 0x85, - 0x7a, 0x74, 0x50, 0xf5, 0x9f, 0x28, 0x00, 0xfc, 0xc5, 0x79, 0xcd, 0xda, 0xb5, 0xfb, 0x6e, 0x9c, - 0x6f, 0x42, 0x02, 0xcf, 0xd3, 0x28, 0xec, 0xdb, 0x58, 0xdb, 0x95, 0xa5, 0x11, 0xde, 0xf1, 0x72, - 0x78, 0xdd, 0x8e, 0x14, 0x78, 0x08, 0x51, 0xc6, 0x5a, 0xa1, 0xba, 0xeb, 0xb3, 0xc6, 0x42, 0x56, - 0x0e, 0xb7, 0xb2, 0x86, 0xa8, 0xf6, 0x14, 0xce, 0xf2, 0xbd, 0xae, 0x19, 0xba, 0x17, 0x0e, 0x7e, - 0x6f, 0xc8, 0xbf, 0xec, 0x44, 0x63, 0xb1, 0xd7, 0x24, 0x3a, 0xc0, 0x60, 0x53, 0x07, 0x35, 0xab, - 0x7b, 0xa5, 0xbd, 0x4e, 0xda, 0xdf, 0x83, 0xc9, 0x5d, 0xdd, 0xac, 0xf8, 0x6f, 0x98, 0xfe, 0x8d, - 0x50, 0x43, 0x2b, 0xa2, 0x0c, 0x3c, 0xa8, 0x39, 0xcb, 0x5b, 0xad, 0xb7, 0x64, 0x42, 0xc6, 0x03, - 0x7f, 0x73, 0xf8, 0xca, 0xf5, 0x55, 0xf9, 0xdb, 0xa2, 0xfd, 0x78, 0x7f, 0xa3, 0x0c, 0x03, 0xf8, - 0x9b, 0x80, 0x78, 0xde, 0x32, 0x1e, 0xe9, 0xce, 0x01, 0x75, 0xb4, 0x8f, 0x14, 0x38, 0x17, 0xbd, - 0x19, 0x8f, 0xa8, 0xeb, 0xea, 0x65, 0x4a, 0xbe, 0x36, 0x98, 0xff, 0xf7, 0x87, 0xc2, 0x1f, 0x14, - 0x62, 0xd4, 0x32, 0x44, 0x51, 0x99, 0x42, 0xb6, 0x40, 0x1f, 0xbf, 0x5f, 0x54, 0xee, 0x31, 0xef, - 0x0f, 0x6d, 0x31, 0xfa, 0xec, 0x18, 0x8c, 0xd0, 0x43, 0x6a, 0x79, 0xda, 0x4f, 0x14, 0x71, 0x20, - 0x2d, 0x3f, 0x2d, 0xf6, 0x7b, 0x6b, 0xee, 0x85, 0xe3, 0x26, 0x96, 0x0d, 0xea, 0x77, 0xc5, 0xf8, - 0x52, 0xd6, 0xb2, 0x24, 0xbf, 0x94, 0xb5, 0x2c, 0x69, 0x7f, 0x52, 0xfc, 0x9c, 0x15, 0xf9, 0x35, - 0xec, 0xcb, 0xb6, 0x83, 0xac, 0x42, 0x7c, 0x5f, 0xfc, 0x16, 0xc5, 0xc7, 0xde, 0xb6, 0x5f, 0xa8, - 0xf0, 0x09, 0x31, 0xa0, 0x91, 0x9f, 0x10, 0x03, 0xf0, 0x6a, 0x0a, 0x12, 0xd2, 0xbf, 0x34, 0x90, - 0x04, 0x8c, 0x89, 0xcf, 0xe4, 0xd0, 0xd5, 0x2b, 0x90, 0x90, 0x7e, 0xfb, 0x26, 0x13, 0x30, 0xbe, - 0x61, 0x1b, 0x74, 0xd3, 0x76, 0xbc, 0xe4, 0x10, 0xfb, 0xba, 0x4f, 0x75, 0xa3, 0xc2, 0x48, 0x95, - 0xab, 0x1f, 0x2b, 0x30, 0xee, 0xeb, 0x25, 0x00, 0xa3, 0x6f, 0xed, 0xe4, 0x77, 0xf2, 0xab, 0xc9, - 0x21, 0x26, 0x70, 0x33, 0xbf, 0xb1, 0xba, 0xb6, 0x71, 0x2f, 0xa9, 0xb0, 0x8f, 0xad, 0x9d, 0x8d, - 0x0d, 0xf6, 0x31, 0x4c, 0x26, 0x21, 0xbe, 0xbd, 0x93, 0xcb, 0xe5, 0xf3, 0xab, 0xf9, 0xd5, 0x64, - 0x8c, 0x31, 0xdd, 0xbd, 0xb3, 0xf6, 0x30, 0xbf, 0x9a, 0x3c, 0xc3, 0xe8, 0x76, 0x36, 0x1e, 0x6c, - 0x3c, 0x7e, 0x67, 0x23, 0x39, 0xc2, 0xe9, 0xb2, 0x8f, 0xd6, 0x9e, 0x3c, 0xc9, 0xaf, 0x26, 0x47, - 0x19, 0xdd, 0xc3, 0xfc, 0x9d, 0xed, 0xfc, 0x6a, 0x72, 0x8c, 0x2d, 0x6d, 0x6e, 0xe5, 0xf3, 0x8f, - 0x36, 0xd9, 0xd2, 0x38, 0xfb, 0xcc, 0xdd, 0xd9, 0xc8, 0xe5, 0x1f, 0x32, 0x29, 0x71, 0x66, 0xe1, - 0x56, 0x7e, 0x3d, 0x9f, 0x63, 0x8b, 0xb0, 0xf2, 0xc7, 0x11, 0x98, 0xc0, 0x63, 0xf3, 0xdf, 0x5c, - 0x5f, 0x87, 0x04, 0xbf, 0x2c, 0xfc, 0xd9, 0x42, 0x8a, 0xe4, 0xd4, 0x5c, 0xdb, 0x6b, 0x78, 0x9e, - 0xed, 0x9b, 0x36, 0x44, 0x6e, 0xc3, 0x84, 0xc4, 0xe4, 0x92, 0xa9, 0x90, 0x8b, 0xd5, 0xe6, 0xd4, - 0x0b, 0xf8, 0xdd, 0xed, 0xfe, 0x6a, 0x43, 0x4c, 0x2b, 0x4f, 0x49, 0x03, 0x6a, 0x95, 0x98, 0x8e, - 0xd7, 0x1a, 0x4d, 0x7a, 0xda, 0x10, 0xf9, 0x16, 0x24, 0x78, 0x89, 0xe2, 0x5a, 0xcf, 0x87, 0xfc, - 0x91, 0xca, 0xd5, 0xc3, 0x84, 0x0c, 0x8c, 0xdf, 0xa3, 0x1e, 0x67, 0x9f, 0x0d, 0xd9, 0xc3, 0x82, - 0x99, 0x92, 0x5c, 0xd1, 0x86, 0xc8, 0x3a, 0xc4, 0x7d, 0x7a, 0x97, 0x70, 0xfb, 0xba, 0x95, 0xda, - 0x54, 0xaa, 0xc3, 0xb2, 0xc8, 0x37, 0xda, 0xd0, 0x6b, 0x0a, 0xb3, 0x9e, 0xf7, 0x07, 0x6d, 0xd6, - 0x47, 0xda, 0x86, 0x1e, 0xd6, 0xaf, 0xc2, 0xa4, 0xdf, 0x23, 0x70, 0x19, 0xf3, 0x52, 0x85, 0x88, - 0x36, 0x0f, 0x3d, 0xa5, 0x4c, 0x89, 0xe4, 0xf3, 0x58, 0x88, 0x91, 0x12, 0x6f, 0x34, 0x2d, 0xf5, - 0x90, 0x92, 0x85, 0x49, 0x9e, 0x39, 0x1e, 0x77, 0xf0, 0x47, 0x4e, 0x29, 0xdd, 0x65, 0xac, 0xfc, - 0x34, 0x0e, 0xa3, 0xfc, 0xd9, 0x8e, 0xbc, 0x0d, 0xc0, 0xff, 0xc2, 0x02, 0x7f, 0xae, 0xe3, 0x7f, - 0x72, 0xa4, 0xe6, 0x3a, 0xbf, 0xf5, 0x69, 0xf3, 0x3f, 0xfa, 0xcb, 0x3f, 0x7e, 0x3e, 0x7c, 0x56, - 0x9b, 0x5a, 0x3e, 0xbc, 0xbe, 0xbc, 0x6f, 0x17, 0xc5, 0x3f, 0x9e, 0xde, 0x52, 0xae, 0x92, 0x77, - 0x00, 0xb8, 0x35, 0x51, 0xb9, 0x51, 0x0b, 0xb9, 0xe9, 0xed, 0x3d, 0x65, 0xbb, 0x60, 0xde, 0x30, - 0x32, 0xc1, 0xdf, 0x85, 0x89, 0x40, 0xf0, 0x36, 0xf5, 0xc4, 0x1e, 0x76, 0xf8, 0x07, 0x83, 0xae, - 0xfe, 0x5f, 0x44, 0xe1, 0x73, 0xda, 0x8c, 0x10, 0xee, 0x52, 0x4f, 0x92, 0x6f, 0x41, 0x52, 0x7e, - 0x61, 0x46, 0xf3, 0x2f, 0x74, 0x7e, 0x7b, 0xe6, 0x6a, 0x2e, 0xf6, 0x7a, 0x98, 0xd6, 0xd2, 0xa8, - 0x6c, 0x5e, 0x9b, 0xf5, 0x3d, 0x91, 0x1e, 0x99, 0x29, 0xd3, 0xf7, 0x1e, 0x24, 0xc4, 0xd9, 0xa3, - 0xaa, 0x60, 0xab, 0xfb, 0x0c, 0x88, 0x14, 0xca, 0x9f, 0xd5, 0xa6, 0x7d, 0xf9, 0x35, 0xce, 0xc7, - 0x44, 0xdf, 0x1b, 0x3c, 0x45, 0xcd, 0xa2, 0xb8, 0x29, 0x2d, 0xce, 0xc4, 0x61, 0xe5, 0x65, 0x82, - 0x4a, 0xcf, 0x96, 0xb6, 0x5e, 0x42, 0xa1, 0x0b, 0xda, 0x3c, 0x13, 0x5a, 0x64, 0x54, 0xd4, 0x58, - 0xe6, 0x3f, 0xff, 0x89, 0x46, 0x84, 0x29, 0xd9, 0x18, 0x3c, 0xb5, 0x5d, 0x40, 0xc1, 0xe7, 0x52, - 0xc9, 0xc0, 0xda, 0xe5, 0x1f, 0xb2, 0x2a, 0xf9, 0x81, 0x30, 0xfa, 0x59, 0xb2, 0x9e, 0x30, 0x3a, - 0x15, 0x31, 0xba, 0x8e, 0x34, 0x92, 0xd1, 0xef, 0x3e, 0x63, 0x66, 0x54, 0x51, 0x0b, 0xb9, 0xda, - 0xe6, 0x01, 0xb9, 0x3b, 0x50, 0xc6, 0x14, 0x72, 0x48, 0xbb, 0x1c, 0xe3, 0x39, 0x65, 0x52, 0x11, - 0x68, 0x84, 0xc8, 0xfb, 0xc1, 0x37, 0xe2, 0x35, 0x85, 0xdc, 0x82, 0xd1, 0xfb, 0xf8, 0xff, 0xda, - 0xa4, 0x8b, 0xa7, 0x29, 0x7e, 0x4f, 0x39, 0x51, 0x6e, 0x8f, 0x96, 0x0e, 0x82, 0x26, 0xf3, 0xdd, - 0x3f, 0x7c, 0xb6, 0xa0, 0x7c, 0xfa, 0xd9, 0x82, 0xf2, 0xf7, 0xcf, 0x16, 0x94, 0x8f, 0x3e, 0x5f, - 0x18, 0xfa, 0xf4, 0xf3, 0x85, 0xa1, 0xbf, 0x7e, 0xbe, 0x30, 0xf4, 0xed, 0x57, 0xca, 0xa6, 0xb7, - 0x57, 0x2f, 0x66, 0x4a, 0x76, 0x75, 0x59, 0x77, 0xaa, 0xba, 0xa1, 0xd7, 0x1c, 0x7b, 0x9f, 0x96, - 0x3c, 0xf1, 0xb5, 0x2c, 0xfe, 0x57, 0xfc, 0x93, 0xe1, 0xd9, 0x3b, 0x08, 0x6c, 0xf2, 0xe5, 0xcc, - 0x9a, 0x9d, 0xb9, 0x53, 0x33, 0x8b, 0xa3, 0x68, 0xc3, 0xeb, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, - 0x15, 0xc2, 0xbe, 0xc7, 0x19, 0x2f, 0x00, 0x00, + // 3186 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcf, 0x73, 0x1b, 0x49, + 0xf5, 0xf7, 0xf8, 0xb7, 0x9e, 0x2c, 0x47, 0xee, 0xd8, 0xce, 0x58, 0xc9, 0x5a, 0xce, 0x64, 0x37, + 0xeb, 0x64, 0xf3, 0x95, 0x37, 0xde, 0xef, 0x16, 0x49, 0x58, 0x08, 0x91, 0xac, 0x24, 0x76, 0x12, + 0xc7, 0x6b, 0xc7, 0xfb, 0x83, 0xa2, 0x10, 0x23, 0x4d, 0x5b, 0x1e, 0x5b, 0x9a, 0xd1, 0xce, 0x8c, + 0x92, 0x35, 0xd4, 0x5e, 0x28, 0x8a, 0x3d, 0x70, 0xd9, 0x82, 0x13, 0x05, 0x05, 0x1c, 0x38, 0x2d, + 0x67, 0x2e, 0x14, 0x7f, 0x00, 0x05, 0x97, 0xa5, 0xb8, 0xc0, 0x45, 0x45, 0xed, 0xf2, 0xa3, 0x4a, + 0x37, 0xee, 0x1c, 0xa8, 0x7e, 0xdd, 0x33, 0xd3, 0xa3, 0x5f, 0x96, 0x92, 0x78, 0x97, 0x03, 0x37, + 0xf5, 0xa7, 0xdf, 0xcf, 0xee, 0xd7, 0xaf, 0xdf, 0xeb, 0x11, 0xcc, 0xd6, 0x0e, 0xcb, 0x2b, 0x7a, + 0xcd, 0x5c, 0x71, 0xeb, 0xc5, 0xaa, 0xe9, 0x65, 0x6a, 0x8e, 0xed, 0xd9, 0x64, 0x44, 0xaf, 0x99, + 0xa9, 0xb3, 0x65, 0xdb, 0x2e, 0x57, 0xe8, 0x0a, 0x42, 0xc5, 0xfa, 0xde, 0x0a, 0xad, 0xd6, 0xbc, + 0x23, 0x4e, 0x91, 0x4a, 0xb7, 0x4e, 0x7a, 0x66, 0x95, 0xba, 0x9e, 0x5e, 0xad, 0x09, 0x02, 0xed, + 0xf0, 0x9a, 0x9b, 0x31, 0x6d, 0x94, 0x5d, 0xb2, 0x1d, 0xba, 0xf2, 0xf8, 0xea, 0x4a, 0x99, 0x5a, + 0xd4, 0xd1, 0x3d, 0x6a, 0x08, 0x9a, 0x65, 0x89, 0xc6, 0xa2, 0xde, 0x13, 0xdb, 0x39, 0x34, 0xad, + 0x72, 0x27, 0xca, 0x73, 0x42, 0x1d, 0xa3, 0xd4, 0x2d, 0xcb, 0xf6, 0x74, 0xcf, 0xb4, 0x2d, 0x57, + 0xcc, 0x06, 0x4e, 0xec, 0x53, 0xbd, 0xe2, 0xed, 0x73, 0x54, 0xfb, 0x29, 0xc0, 0xec, 0x86, 0x5d, + 0xdc, 0x41, 0xc7, 0xb6, 0xe9, 0x7b, 0x75, 0xea, 0x7a, 0xeb, 0x1e, 0xad, 0x92, 0x55, 0x98, 0xac, + 0x39, 0xa6, 0xed, 0x98, 0xde, 0x91, 0xaa, 0x2c, 0x29, 0xcb, 0x4a, 0x76, 0xbe, 0xd9, 0x48, 0x13, + 0x1f, 0xbb, 0x62, 0x57, 0x4d, 0x0f, 0x7d, 0xdd, 0x0e, 0xe8, 0xc8, 0xeb, 0x10, 0xb3, 0xf4, 0x2a, + 0x75, 0x6b, 0x7a, 0x89, 0xaa, 0x23, 0x4b, 0xca, 0x72, 0x2c, 0x7b, 0xa6, 0xd9, 0x48, 0x9f, 0x0e, + 0x40, 0x89, 0x2b, 0xa4, 0x24, 0xaf, 0x41, 0xac, 0x54, 0x31, 0xa9, 0xe5, 0x15, 0x4c, 0x43, 0x9d, + 0x44, 0x36, 0xd4, 0xc5, 0xc1, 0x75, 0x43, 0xd6, 0xe5, 0x63, 0x64, 0x07, 0xc6, 0x2b, 0x7a, 0x91, + 0x56, 0x5c, 0x75, 0x74, 0x69, 0x64, 0x39, 0xbe, 0xfa, 0x52, 0x46, 0xaf, 0x99, 0x99, 0x4e, 0xae, + 0x64, 0xee, 0x23, 0x5d, 0xde, 0xf2, 0x9c, 0xa3, 0xec, 0x6c, 0xb3, 0x91, 0x4e, 0x72, 0x46, 0x49, + 0xac, 0x10, 0x45, 0xca, 0x10, 0x97, 0x16, 0x4e, 0x1d, 0x43, 0xc9, 0x97, 0xbb, 0x4b, 0xbe, 0x15, + 0x12, 0x73, 0xf1, 0x0b, 0xcd, 0x46, 0x7a, 0x4e, 0x12, 0x21, 0xe9, 0x90, 0x25, 0x93, 0x0f, 0x15, + 0x98, 0x75, 0xe8, 0x7b, 0x75, 0xd3, 0xa1, 0x46, 0xc1, 0xb2, 0x0d, 0x5a, 0x10, 0xce, 0x8c, 0xa3, + 0xca, 0xab, 0xdd, 0x55, 0x6e, 0x0b, 0xae, 0x4d, 0xdb, 0xa0, 0xb2, 0x63, 0x5a, 0xb3, 0x91, 0x3e, + 0xe7, 0xb4, 0x4d, 0x86, 0x06, 0xa8, 0xca, 0x36, 0x69, 0x9f, 0x27, 0x0f, 0x61, 0xb2, 0x66, 0x1b, + 0x05, 0xb7, 0x46, 0x4b, 0xea, 0xf0, 0x92, 0xb2, 0x1c, 0x5f, 0x3d, 0x9b, 0xe1, 0x11, 0x87, 0x36, + 0xb0, 0xa8, 0xcc, 0x3c, 0xbe, 0x9a, 0xd9, 0xb2, 0x8d, 0x9d, 0x1a, 0x2d, 0xe1, 0x7e, 0xce, 0xd4, + 0xf8, 0x20, 0x22, 0x7b, 0x42, 0x80, 0x64, 0x0b, 0x62, 0xbe, 0x40, 0x57, 0x9d, 0x40, 0x77, 0x7a, + 0x4a, 0xe4, 0x61, 0xc5, 0x07, 0x6e, 0x24, 0xac, 0x04, 0x46, 0x72, 0x30, 0x61, 0x5a, 0x65, 0x87, + 0xba, 0xae, 0x1a, 0x43, 0x79, 0x04, 0x05, 0xad, 0x73, 0x2c, 0x67, 0x5b, 0x7b, 0x66, 0x39, 0x3b, + 0xc7, 0x0c, 0x13, 0x64, 0x92, 0x14, 0x9f, 0x93, 0xdc, 0x86, 0x49, 0x97, 0x3a, 0x8f, 0xcd, 0x12, + 0x75, 0x55, 0x90, 0xa4, 0xec, 0x70, 0x50, 0x48, 0x41, 0x63, 0x7c, 0x3a, 0xd9, 0x18, 0x1f, 0x63, + 0x31, 0xee, 0x96, 0xf6, 0xa9, 0x51, 0xaf, 0x50, 0x47, 0x8d, 0x87, 0x31, 0x1e, 0x80, 0x72, 0x8c, + 0x07, 0x20, 0xa9, 0xc3, 0x1c, 0x7d, 0xbf, 0x46, 0x1d, 0xb3, 0x4a, 0x2d, 0x4f, 0xaf, 0x6c, 0x39, + 0x66, 0x89, 0xae, 0x5b, 0x7b, 0xb6, 0x9a, 0xc0, 0x35, 0x4f, 0xa1, 0x2d, 0xf9, 0x4e, 0x14, 0xd9, + 0x0b, 0xcd, 0x46, 0x3a, 0xdd, 0x91, 0x59, 0x52, 0xd5, 0x59, 0x7a, 0x4a, 0x87, 0xb8, 0x14, 0x24, + 0xe4, 0x02, 0x8c, 0x1c, 0x52, 0x7e, 0x9e, 0x63, 0xd9, 0x99, 0x66, 0x23, 0x9d, 0x38, 0xa4, 0xf2, + 0x51, 0x66, 0xb3, 0xe4, 0x12, 0x8c, 0x3d, 0xd6, 0x2b, 0x75, 0x8a, 0xe1, 0x10, 0xcb, 0x9e, 0x6e, + 0x36, 0xd2, 0xa7, 0x10, 0x90, 0x08, 0x39, 0xc5, 0x8d, 0xe1, 0x6b, 0x4a, 0x6a, 0x0f, 0x92, 0xad, + 0xc7, 0xe0, 0x44, 0xf4, 0x54, 0xe1, 0x4c, 0x97, 0xd8, 0x3f, 0x09, 0x75, 0x1b, 0xa3, 0x93, 0x53, + 0xc9, 0x84, 0xf6, 0xaf, 0x11, 0x48, 0x44, 0xe2, 0x8c, 0xdc, 0x80, 0x51, 0xef, 0xa8, 0x46, 0x51, + 0xd9, 0xf4, 0x6a, 0x52, 0x8e, 0xc4, 0x47, 0x47, 0x35, 0x8a, 0x09, 0x66, 0x9a, 0x51, 0x44, 0x4e, + 0x07, 0xf2, 0x30, 0x13, 0x6a, 0xb6, 0xe3, 0xb9, 0xea, 0xf0, 0xd2, 0xc8, 0x72, 0x82, 0x9b, 0x80, + 0x80, 0x6c, 0x02, 0x02, 0xe4, 0x5b, 0xd1, 0x4c, 0x34, 0x82, 0x11, 0x7b, 0xa1, 0x3d, 0xee, 0x9f, + 0x3e, 0x05, 0x5d, 0x87, 0xb8, 0x57, 0x71, 0x0b, 0xd4, 0xd2, 0x8b, 0x15, 0x6a, 0xa8, 0xa3, 0x4b, + 0xca, 0xf2, 0x64, 0x56, 0x6d, 0x36, 0xd2, 0xb3, 0x1e, 0x5b, 0x57, 0x44, 0x25, 0x5e, 0x08, 0x51, + 0x4c, 0xd8, 0xd4, 0xf1, 0x0a, 0x2c, 0x85, 0xab, 0x63, 0x52, 0xc2, 0xa6, 0x8e, 0xb7, 0xa9, 0x57, + 0x69, 0x24, 0x61, 0x0b, 0x8c, 0xdc, 0x84, 0x44, 0xdd, 0xa5, 0x85, 0x52, 0xa5, 0xee, 0x7a, 0xd4, + 0x59, 0xdf, 0x52, 0xc7, 0x51, 0x63, 0xaa, 0xd9, 0x48, 0xcf, 0xd7, 0x5d, 0x9a, 0xf3, 0x71, 0x89, + 0x79, 0x4a, 0xc6, 0x3f, 0xaf, 0x40, 0xd3, 0x3c, 0x48, 0x44, 0x92, 0x02, 0xb9, 0xd6, 0x61, 0xcb, + 0x05, 0x05, 0x6e, 0x39, 0x69, 0xdf, 0xf2, 0x81, 0x37, 0x5c, 0xfb, 0x8b, 0x02, 0xc9, 0xd6, 0x84, + 0xcf, 0xf8, 0xdf, 0xab, 0xd3, 0x3a, 0x15, 0x0e, 0x22, 0x3f, 0x02, 0x32, 0x3f, 0x02, 0xe4, 0xff, + 0x01, 0x0e, 0xec, 0x62, 0xc1, 0xa5, 0x78, 0x8b, 0x0e, 0x87, 0x9b, 0x72, 0x60, 0x17, 0x77, 0x68, + 0xcb, 0x2d, 0xea, 0x63, 0xc4, 0x80, 0x19, 0xc6, 0xe5, 0x70, 0x7d, 0x05, 0x46, 0xe0, 0x07, 0xdb, + 0x42, 0xd7, 0x3b, 0x28, 0xfb, 0x42, 0xb3, 0x91, 0x5e, 0x38, 0xb0, 0x8b, 0x12, 0x26, 0x7b, 0x74, + 0xaa, 0x65, 0x4a, 0xfb, 0xa3, 0x02, 0x33, 0x1b, 0x76, 0x71, 0xcb, 0xa1, 0x8c, 0xe0, 0x73, 0x73, + 0xee, 0xff, 0x60, 0x82, 0x71, 0x99, 0x06, 0x77, 0x29, 0xc6, 0x2f, 0xff, 0x03, 0xbb, 0xb8, 0x6e, + 0x44, 0x2e, 0x7f, 0x8e, 0x90, 0x2b, 0x30, 0xee, 0x50, 0xdd, 0xb5, 0x2d, 0x3c, 0x0b, 0x82, 0x9a, + 0x23, 0x32, 0x35, 0x47, 0xb4, 0x7f, 0xf3, 0xfd, 0xca, 0xe9, 0x56, 0x89, 0x56, 0x7c, 0x97, 0x2e, + 0xc3, 0x38, 0xd7, 0x28, 0xfb, 0x84, 0xe2, 0x65, 0x9f, 0x10, 0x78, 0x4a, 0x9f, 0x82, 0x45, 0x1b, + 0x39, 0x76, 0xd1, 0x24, 0xf7, 0x47, 0x07, 0x72, 0x7f, 0xac, 0x0f, 0xf7, 0xff, 0xae, 0xc0, 0xe9, + 0x0d, 0x34, 0x2a, 0xba, 0x02, 0x51, 0xaf, 0x94, 0x41, 0xbd, 0x1a, 0x3e, 0xd6, 0xab, 0x9b, 0x30, + 0xbe, 0x67, 0x56, 0x3c, 0xea, 0xe0, 0x0a, 0xc4, 0x57, 0x67, 0x82, 0x30, 0xa5, 0xde, 0x6d, 0x9c, + 0xe0, 0x96, 0x73, 0x22, 0xd9, 0x72, 0x8e, 0x0c, 0xb8, 0xcd, 0xf7, 0x60, 0x4a, 0x96, 0x4d, 0xbe, + 0x0c, 0xe3, 0xae, 0xa7, 0x7b, 0xd4, 0x55, 0x95, 0xa5, 0x91, 0xe5, 0xe9, 0xd5, 0x44, 0xa0, 0x9e, + 0xa1, 0x5c, 0x18, 0x27, 0x90, 0x85, 0x71, 0x44, 0xfb, 0x71, 0x12, 0x46, 0x36, 0xec, 0x22, 0x59, + 0x82, 0xe1, 0x60, 0x71, 0x92, 0xcd, 0x46, 0x7a, 0xca, 0x94, 0x97, 0x65, 0xd8, 0x34, 0xa2, 0x25, + 0x71, 0xa2, 0xcf, 0x92, 0xf8, 0xc4, 0x23, 0x2a, 0x52, 0xdf, 0x4f, 0xf4, 0x5d, 0xdf, 0x67, 0x83, + 0x52, 0x9d, 0x97, 0x6f, 0xb3, 0xfe, 0x9a, 0x0d, 0x50, 0x99, 0xbf, 0x15, 0xbd, 0x0f, 0x21, 0x9a, + 0xa2, 0x9e, 0xfe, 0x16, 0x7c, 0xdc, 0xa5, 0x0e, 0x8f, 0xa3, 0x82, 0xa5, 0x40, 0xc1, 0xf3, 0x2e, + 0xbb, 0x2f, 0xc1, 0x98, 0xfd, 0xc4, 0xa2, 0x8e, 0xe8, 0x77, 0x70, 0xd5, 0x11, 0x90, 0x57, 0x1d, + 0x01, 0x42, 0xe1, 0x2c, 0x2e, 0x7f, 0x01, 0x87, 0xee, 0xbe, 0x59, 0x2b, 0xd4, 0x5d, 0xea, 0x14, + 0xca, 0x8e, 0x5d, 0xaf, 0xb9, 0xea, 0x29, 0x3c, 0xdb, 0x17, 0x9b, 0x8d, 0xb4, 0x86, 0x64, 0x0f, + 0x7d, 0xaa, 0x5d, 0x97, 0x3a, 0x77, 0x90, 0x46, 0x92, 0xa9, 0x76, 0xa3, 0x21, 0xdf, 0x53, 0xe0, + 0x62, 0xc9, 0xae, 0xd6, 0x58, 0x6d, 0x41, 0x8d, 0x42, 0x2f, 0x95, 0xa7, 0x97, 0x94, 0xe5, 0xa9, + 0xec, 0xab, 0xcd, 0x46, 0xfa, 0x4a, 0xc8, 0xf1, 0xe6, 0xf1, 0xca, 0xb5, 0xe3, 0xa9, 0x23, 0x7d, + 0xe7, 0x68, 0x9f, 0x7d, 0xa7, 0xdc, 0xc3, 0x8c, 0x3d, 0xf7, 0x1e, 0x66, 0xea, 0x79, 0xf4, 0x30, + 0xbf, 0x50, 0x60, 0x49, 0x74, 0x03, 0xa6, 0x55, 0x2e, 0x38, 0xd4, 0xb5, 0xeb, 0x4e, 0x89, 0x16, + 0x44, 0x68, 0xb0, 0xba, 0xdd, 0x55, 0xe7, 0xd0, 0xf6, 0xe5, 0x4e, 0x9a, 0xb6, 0x05, 0xc3, 0xb6, + 0x44, 0x9f, 0xbd, 0xd2, 0x6c, 0xa4, 0x97, 0x43, 0xa9, 0x9d, 0x68, 0x24, 0x63, 0x16, 0x7b, 0x53, + 0x92, 0x7b, 0x30, 0x51, 0x72, 0xa8, 0xee, 0x51, 0x03, 0x4b, 0x33, 0xd6, 0x94, 0xf0, 0x07, 0x85, + 0x8c, 0xff, 0x7e, 0x91, 0x79, 0xe4, 0xbf, 0x5f, 0xf0, 0x76, 0x4b, 0x90, 0xcb, 0xed, 0x96, 0x80, + 0xe4, 0x9e, 0x6d, 0xfa, 0xb9, 0xf4, 0x6c, 0xc9, 0x67, 0xe8, 0xd9, 0xbe, 0x01, 0xf1, 0xc3, 0x6b, + 0x6e, 0xc1, 0x37, 0x68, 0x06, 0x45, 0x9d, 0x97, 0x97, 0x39, 0x7c, 0x58, 0x61, 0x8b, 0x2d, 0xac, + 0xe4, 0xd5, 0xf0, 0xe1, 0x35, 0x77, 0xbd, 0xcd, 0x44, 0x08, 0x51, 0x96, 0x9a, 0x98, 0x74, 0xa1, + 0x4d, 0x25, 0xdd, 0xc3, 0x45, 0xd8, 0x1d, 0xc8, 0x15, 0xe3, 0x16, 0xb9, 0x02, 0x8d, 0x76, 0x9a, + 0xb3, 0xcf, 0xde, 0x69, 0x2e, 0xfe, 0xaf, 0xd3, 0xfc, 0x2f, 0xed, 0x34, 0xe7, 0x93, 0x67, 0xb4, + 0x7b, 0x30, 0xd7, 0x71, 0x53, 0x58, 0x42, 0x2c, 0x9a, 0x06, 0x8e, 0xe5, 0x87, 0x38, 0x1f, 0x93, + 0x03, 0xde, 0xc7, 0xb4, 0x7f, 0x28, 0x30, 0xbf, 0xc1, 0x8a, 0x70, 0x91, 0x22, 0xcd, 0x6f, 0x53, + 0xbf, 0x40, 0x93, 0xaa, 0x42, 0xa5, 0x8f, 0xaa, 0xf0, 0xc4, 0x6b, 0x8a, 0x37, 0x60, 0xca, 0xa2, + 0x4f, 0x0a, 0x2d, 0x39, 0x1f, 0xaf, 0x6f, 0x8b, 0x3e, 0xd9, 0x6a, 0x4f, 0xfb, 0x71, 0x09, 0xd6, + 0x7e, 0x35, 0x0c, 0x67, 0xda, 0x1c, 0x75, 0x6b, 0xb6, 0xe5, 0x52, 0xf2, 0x13, 0x05, 0x54, 0x27, + 0x9c, 0xc0, 0xa8, 0x61, 0x89, 0xb7, 0x5e, 0xf1, 0xb8, 0xef, 0xf1, 0xd5, 0xeb, 0xfe, 0xfd, 0xde, + 0x49, 0x40, 0x66, 0xbb, 0x85, 0x79, 0x9b, 0xf3, 0xf2, 0x8b, 0xff, 0xa5, 0x66, 0x23, 0x7d, 0xde, + 0xe9, 0x4c, 0x21, 0x59, 0x7b, 0xa6, 0x0b, 0x49, 0xca, 0x81, 0x73, 0xbd, 0xe4, 0x9f, 0x48, 0x67, + 0x6b, 0xc1, 0x9c, 0xd4, 0xcf, 0x71, 0x2f, 0xf1, 0xb1, 0x77, 0x90, 0xbe, 0xe5, 0x12, 0x8c, 0x51, + 0xc7, 0xb1, 0x1d, 0x59, 0x27, 0x02, 0x32, 0x29, 0x02, 0xda, 0x07, 0xd8, 0xf6, 0x45, 0xf5, 0x91, + 0x7d, 0x20, 0xbc, 0xe5, 0xe4, 0x63, 0xd1, 0x73, 0xf2, 0xfd, 0x48, 0xb5, 0xf6, 0x9c, 0xa1, 0x8d, + 0xd9, 0xc5, 0x66, 0x23, 0x9d, 0xc2, 0xce, 0x32, 0x04, 0xe5, 0x95, 0x4e, 0xb6, 0xce, 0x69, 0x1f, + 0xc6, 0x61, 0x0c, 0xeb, 0x0c, 0x72, 0x11, 0x46, 0xf1, 0xad, 0x82, 0x7b, 0x87, 0xfd, 0xba, 0x15, + 0x7d, 0xa7, 0xc0, 0x79, 0x92, 0x87, 0x53, 0x7e, 0x20, 0x16, 0xf6, 0xf4, 0x92, 0x27, 0xbc, 0x54, + 0xb2, 0xe7, 0x9a, 0x8d, 0xb4, 0xea, 0x4f, 0xdd, 0xc6, 0x19, 0x89, 0x79, 0x3a, 0x3a, 0x43, 0xae, + 0x43, 0x1c, 0xcb, 0x25, 0x5e, 0x3d, 0x89, 0xe6, 0x13, 0x93, 0x3e, 0x83, 0x79, 0xd5, 0x23, 0x27, + 0xfd, 0x10, 0x65, 0xc7, 0x01, 0x8b, 0x2c, 0x9f, 0x97, 0x77, 0x6e, 0x78, 0x1c, 0x10, 0x6f, 0x63, + 0x8e, 0x4b, 0x30, 0x29, 0xc3, 0xa9, 0xa0, 0xb2, 0xa8, 0x98, 0x55, 0xd3, 0xf3, 0xdf, 0xb0, 0x17, + 0x71, 0x61, 0x71, 0x31, 0x82, 0x52, 0xe2, 0x3e, 0x12, 0xf0, 0x68, 0x66, 0x8b, 0xab, 0x3a, 0x91, + 0x89, 0x48, 0x65, 0x34, 0x1d, 0x9d, 0x23, 0xbf, 0x56, 0xe0, 0x62, 0x8b, 0xa6, 0x42, 0xf1, 0x28, + 0x38, 0xc5, 0x85, 0x52, 0x45, 0x77, 0x5d, 0xfe, 0x3e, 0x34, 0x21, 0xbd, 0x68, 0x77, 0x32, 0x20, + 0x7b, 0xe4, 0x9f, 0xe6, 0x1c, 0x63, 0xda, 0xd4, 0xab, 0x94, 0xdb, 0xb4, 0xd2, 0x6c, 0xa4, 0x5f, + 0x71, 0x8e, 0xa3, 0x95, 0x96, 0xe2, 0xfc, 0xb1, 0xc4, 0x64, 0x07, 0xe2, 0x35, 0xea, 0x54, 0x4d, + 0xd7, 0xc5, 0x36, 0x82, 0xbf, 0xb6, 0xcf, 0x4b, 0xb6, 0x6d, 0x85, 0xb3, 0x7c, 0xd5, 0x25, 0x72, + 0x79, 0xd5, 0x25, 0x98, 0x65, 0xe8, 0x92, 0xed, 0x18, 0xb6, 0x45, 0xf9, 0xe7, 0x8b, 0x49, 0xd1, + 0xab, 0x09, 0x2c, 0xd2, 0xab, 0x09, 0x8c, 0x3c, 0x80, 0x19, 0xde, 0x69, 0x14, 0x0c, 0x5a, 0x73, + 0x68, 0x09, 0xcb, 0xae, 0x18, 0x6e, 0xf6, 0x12, 0x0b, 0x74, 0x3e, 0xb9, 0x16, 0xcc, 0x45, 0x76, + 0x23, 0xd9, 0x3a, 0x4b, 0xd6, 0x82, 0x16, 0x0b, 0xda, 0x5c, 0xea, 0xbb, 0xc9, 0x4a, 0xfd, 0x53, + 0x81, 0xb8, 0xb4, 0x00, 0x64, 0x1b, 0x26, 0xdd, 0x7a, 0xf1, 0x80, 0x96, 0x82, 0x84, 0xb9, 0xd8, + 0x79, 0xa9, 0x32, 0x3b, 0x9c, 0x4c, 0xd4, 0x62, 0x82, 0x27, 0x52, 0x8b, 0x09, 0x0c, 0x53, 0x16, + 0x75, 0x8a, 0xfc, 0x49, 0xcc, 0x4f, 0x59, 0x0c, 0x88, 0xa4, 0x2c, 0x06, 0xa4, 0xde, 0x85, 0x09, + 0x21, 0x97, 0x1d, 0xe0, 0x43, 0xd3, 0x32, 0xe4, 0x03, 0xcc, 0xc6, 0xf2, 0x01, 0x66, 0xe3, 0xe0, + 0xa0, 0x0f, 0xf7, 0x3e, 0xe8, 0x29, 0x13, 0x4e, 0x77, 0x38, 0x06, 0x4f, 0x91, 0x74, 0x95, 0x63, + 0xab, 0x89, 0x9f, 0x29, 0x70, 0xb1, 0xbf, 0x88, 0xef, 0x4f, 0xfd, 0x3d, 0x59, 0xbd, 0xdf, 0xa2, + 0x46, 0x04, 0xb6, 0x68, 0x3b, 0xce, 0xc0, 0x93, 0xaf, 0xdc, 0xb4, 0x1f, 0x8e, 0xc1, 0xd9, 0x1e, + 0x26, 0xb2, 0xee, 0x68, 0xa1, 0xaa, 0xbf, 0x6f, 0x56, 0xeb, 0xd5, 0xb0, 0x35, 0xda, 0x73, 0xf4, + 0x12, 0xbb, 0x16, 0x45, 0xe8, 0x7d, 0xe5, 0x38, 0x47, 0x33, 0x0f, 0xb8, 0x04, 0x1f, 0xbd, 0x2d, + 0xf8, 0xa5, 0xfb, 0xba, 0xda, 0x99, 0x42, 0xbe, 0xaf, 0xbb, 0x90, 0x90, 0xdf, 0x28, 0x70, 0xbe, + 0xab, 0x89, 0x98, 0xfb, 0x6c, 0xbb, 0x82, 0x41, 0x1d, 0x5f, 0xcd, 0x3d, 0xad, 0xa9, 0xd9, 0xa3, + 0x2d, 0xdb, 0xae, 0x70, 0x83, 0x5f, 0x69, 0x36, 0xd2, 0x2f, 0x57, 0x7b, 0xd1, 0x49, 0x66, 0xbf, + 0xd0, 0x93, 0x90, 0x15, 0x1b, 0xbd, 0x16, 0xe7, 0xa4, 0xe2, 0x5e, 0x3b, 0xde, 0xcd, 0xfe, 0x54, + 0x3f, 0x8c, 0xc6, 0xfc, 0x8b, 0xed, 0xeb, 0xcb, 0x04, 0x0e, 0x16, 0xf7, 0xda, 0x6f, 0x87, 0x21, + 0x7d, 0x8c, 0x0c, 0xf2, 0xcb, 0x3e, 0x02, 0xf3, 0x56, 0x3f, 0xd6, 0x9c, 0x68, 0x70, 0x7e, 0x11, + 0xfb, 0xab, 0xe5, 0x21, 0x86, 0xf7, 0xc0, 0x7d, 0xd3, 0xf5, 0xc8, 0x35, 0x18, 0xc7, 0x72, 0xde, + 0xbf, 0x27, 0x20, 0xbc, 0x27, 0xf8, 0x9d, 0xc3, 0x67, 0xe5, 0x3b, 0x87, 0x23, 0xda, 0x2e, 0x10, + 0xfe, 0x82, 0x5c, 0x91, 0x6a, 0x60, 0x72, 0x13, 0x12, 0x25, 0x8e, 0x52, 0x43, 0xea, 0x55, 0xf0, + 0x63, 0x51, 0x30, 0x11, 0xed, 0x58, 0xa6, 0x64, 0x5c, 0xbb, 0x0e, 0xa7, 0x50, 0xfb, 0x1d, 0x1a, + 0x7c, 0x6f, 0xe8, 0xb3, 0x08, 0xd4, 0xde, 0x00, 0x82, 0xac, 0x39, 0xbc, 0xab, 0x07, 0xe5, 0xfe, + 0x2a, 0xcc, 0x22, 0xf7, 0xae, 0x55, 0x7a, 0x2a, 0xfe, 0x9b, 0xa0, 0xee, 0x78, 0x0e, 0xd5, 0xab, + 0xa6, 0x55, 0x6e, 0xf5, 0xe0, 0x02, 0x8c, 0x58, 0xf5, 0x2a, 0x8a, 0x48, 0xf0, 0x6d, 0xb4, 0xea, + 0x55, 0x79, 0x1b, 0xad, 0x7a, 0x35, 0x30, 0x7f, 0x8d, 0x56, 0xa8, 0x47, 0x07, 0x55, 0xff, 0xb1, + 0x02, 0xc0, 0x1f, 0xbc, 0xb1, 0xf9, 0xec, 0xb7, 0x70, 0xbe, 0x0e, 0x71, 0xdc, 0x4f, 0xa3, 0x70, + 0x60, 0xe3, 0xdd, 0xae, 0x2c, 0x8f, 0xf1, 0x8a, 0x97, 0xc3, 0x1b, 0x76, 0xe4, 0x82, 0x87, 0x10, + 0x65, 0xac, 0x15, 0xaa, 0xbb, 0x3e, 0xeb, 0x48, 0xc8, 0xca, 0xe1, 0x56, 0xd6, 0x10, 0xd5, 0x9e, + 0xc0, 0x69, 0xbe, 0xd6, 0x35, 0x43, 0xf7, 0xc2, 0xc6, 0xef, 0x75, 0xf9, 0xc3, 0x52, 0x34, 0x16, + 0x7b, 0x75, 0xa2, 0x03, 0x34, 0x36, 0x75, 0x50, 0xb3, 0xba, 0x57, 0xda, 0xef, 0xa4, 0xfd, 0x5d, + 0x48, 0xec, 0xe9, 0x66, 0xc5, 0x7f, 0x42, 0xf5, 0x4f, 0x84, 0x1a, 0x5a, 0x11, 0x65, 0xe0, 0x41, + 0xcd, 0x59, 0xde, 0x6c, 0x3d, 0x25, 0x53, 0x32, 0x1e, 0xf8, 0x9b, 0xc3, 0x47, 0xb6, 0x2f, 0xca, + 0xdf, 0x16, 0xed, 0xc7, 0xfb, 0x1b, 0x65, 0x18, 0xc0, 0xdf, 0x38, 0xc4, 0xf2, 0x96, 0xf1, 0x40, + 0x77, 0x0e, 0xa9, 0xa3, 0x7d, 0xa4, 0xc0, 0x5c, 0xf4, 0x64, 0x3c, 0xa0, 0xae, 0xab, 0x97, 0x29, + 0xf9, 0xd2, 0x60, 0xfe, 0xdf, 0x1d, 0x0a, 0xbf, 0x67, 0x8c, 0x50, 0xcb, 0x10, 0x97, 0xca, 0x34, + 0x7f, 0x18, 0xf3, 0xf5, 0xf1, 0xf3, 0x45, 0xe5, 0x1a, 0xf3, 0xee, 0xd0, 0x36, 0xa3, 0xcf, 0x4e, + 0xc0, 0x18, 0x7d, 0x4c, 0x2d, 0x4f, 0xfb, 0xbe, 0x22, 0x36, 0xa4, 0xe5, 0xcb, 0x66, 0xbf, 0xa7, + 0xe6, 0x4e, 0xd8, 0x6e, 0xe2, 0xb5, 0x41, 0xfd, 0xaa, 0x18, 0x3f, 0xb0, 0xb6, 0x4c, 0xc9, 0x1f, + 0x58, 0x5b, 0xa6, 0xb4, 0x3f, 0x28, 0x7e, 0xce, 0x8a, 0x7c, 0x8c, 0xfb, 0xbc, 0xed, 0x20, 0x6b, + 0x10, 0x3b, 0x10, 0x9f, 0xc2, 0x78, 0xdb, 0xdb, 0xf6, 0x81, 0x0c, 0x5f, 0x30, 0x03, 0x1a, 0xf9, + 0x05, 0x33, 0x00, 0x2f, 0xa7, 0x20, 0x2e, 0xfd, 0xa3, 0x82, 0xc4, 0x61, 0x42, 0x0c, 0x93, 0x43, + 0x97, 0x2f, 0x41, 0x5c, 0xfa, 0xf4, 0x4e, 0xa6, 0x60, 0x72, 0xd3, 0x36, 0xe8, 0x96, 0xed, 0x78, + 0xc9, 0x21, 0x36, 0xba, 0x4b, 0x75, 0xa3, 0xc2, 0x48, 0x95, 0xcb, 0x3f, 0x57, 0x60, 0xd2, 0xd7, + 0x4b, 0x00, 0xc6, 0xdf, 0xdc, 0xcd, 0xef, 0xe6, 0xd7, 0x92, 0x43, 0x4c, 0xe0, 0x56, 0x7e, 0x73, + 0x6d, 0x7d, 0xf3, 0x4e, 0x52, 0x61, 0x83, 0xed, 0xdd, 0xcd, 0x4d, 0x36, 0x18, 0x26, 0x09, 0x88, + 0xed, 0xec, 0xe6, 0x72, 0xf9, 0xfc, 0x5a, 0x7e, 0x2d, 0x39, 0xc2, 0x98, 0x6e, 0xdf, 0x5a, 0xbf, + 0x9f, 0x5f, 0x4b, 0x8e, 0x32, 0xba, 0xdd, 0xcd, 0x7b, 0x9b, 0x0f, 0xdf, 0xde, 0x4c, 0x8e, 0x71, + 0xba, 0xec, 0x83, 0xf5, 0x47, 0x8f, 0xf2, 0x6b, 0xc9, 0x71, 0x46, 0x77, 0x3f, 0x7f, 0x6b, 0x27, + 0xbf, 0x96, 0x9c, 0x60, 0x53, 0x5b, 0xdb, 0xf9, 0xfc, 0x83, 0x2d, 0x36, 0x35, 0xc9, 0x86, 0xb9, + 0x5b, 0x9b, 0xb9, 0xfc, 0x7d, 0x26, 0x25, 0xc6, 0x2c, 0xdc, 0xce, 0x6f, 0xe4, 0x73, 0x6c, 0x12, + 0x56, 0x7f, 0x3f, 0x06, 0x53, 0xb8, 0x6d, 0xfe, 0x93, 0xef, 0x6b, 0x10, 0xe7, 0x87, 0x85, 0x3f, + 0x5b, 0x48, 0x91, 0x9c, 0x9a, 0x6f, 0x7b, 0x8c, 0xcf, 0xb3, 0x75, 0xd3, 0x86, 0xc8, 0x4d, 0x98, + 0x92, 0x98, 0x5c, 0x32, 0x1d, 0x72, 0xb1, 0xbb, 0x39, 0xf5, 0x02, 0x8e, 0xbb, 0x9d, 0x5f, 0x6d, + 0x88, 0x69, 0xe5, 0x29, 0x69, 0x40, 0xad, 0x12, 0xd3, 0xf1, 0x5a, 0xa3, 0x49, 0x4f, 0x1b, 0x22, + 0x5f, 0x83, 0x38, 0xbf, 0xa2, 0xb8, 0xd6, 0x33, 0x21, 0x7f, 0xe4, 0xe6, 0xea, 0x61, 0x42, 0x06, + 0x26, 0xef, 0x50, 0x8f, 0xb3, 0xcf, 0x86, 0xec, 0xe1, 0x85, 0x99, 0x92, 0x5c, 0xd1, 0x86, 0xc8, + 0x06, 0xc4, 0x7c, 0x7a, 0x97, 0x70, 0xfb, 0xba, 0x5d, 0xb5, 0xa9, 0x54, 0x87, 0x69, 0x91, 0x6f, + 0xb4, 0xa1, 0x57, 0x15, 0x66, 0x3d, 0xaf, 0x0f, 0xda, 0xac, 0x8f, 0x94, 0x0d, 0x3d, 0xac, 0x5f, + 0x83, 0x84, 0x5f, 0x23, 0x70, 0x19, 0x0b, 0xd2, 0x0d, 0x11, 0x2d, 0x1e, 0x7a, 0x4a, 0x99, 0x16, + 0xc9, 0xe7, 0xa1, 0x10, 0x23, 0x25, 0xde, 0x68, 0x5a, 0xea, 0x21, 0x25, 0x0b, 0x09, 0x9e, 0x39, + 0x1e, 0x76, 0xf0, 0x47, 0x4e, 0x29, 0xdd, 0x65, 0xac, 0xfe, 0x20, 0x06, 0xe3, 0xfc, 0xd9, 0x8e, + 0xbc, 0x05, 0xc0, 0x7f, 0xe1, 0x05, 0x3f, 0xd7, 0xf1, 0x8f, 0x24, 0xa9, 0xf9, 0xce, 0x6f, 0x7d, + 0xda, 0xc2, 0x77, 0xff, 0xf4, 0xb7, 0x1f, 0x0d, 0x9f, 0xd6, 0xa6, 0x57, 0x1e, 0x5f, 0x5d, 0x39, + 0xb0, 0x8b, 0xe2, 0x4f, 0xb7, 0x37, 0x94, 0xcb, 0xe4, 0x6d, 0x00, 0x6e, 0x4d, 0x54, 0x6e, 0xd4, + 0x42, 0x6e, 0x7a, 0x7b, 0x4d, 0xd9, 0x2e, 0x98, 0x17, 0x8c, 0x4c, 0xf0, 0x37, 0x61, 0x2a, 0x10, + 0xbc, 0x43, 0x3d, 0xb1, 0x86, 0x1d, 0xfe, 0xdf, 0xd0, 0xd5, 0xff, 0x73, 0x28, 0x7c, 0x5e, 0x9b, + 0x11, 0xc2, 0x5d, 0xea, 0x49, 0xf2, 0x2d, 0x48, 0xca, 0x2f, 0xcc, 0x68, 0xfe, 0xd9, 0xce, 0x6f, + 0xcf, 0x5c, 0xcd, 0xb9, 0x5e, 0x0f, 0xd3, 0x5a, 0x1a, 0x95, 0x2d, 0x68, 0xb3, 0xbe, 0x27, 0xd2, + 0x23, 0x33, 0x65, 0xfa, 0xde, 0x85, 0xb8, 0xd8, 0x7b, 0x54, 0x15, 0x2c, 0x75, 0x9f, 0x01, 0x91, + 0x42, 0xf9, 0xb3, 0xda, 0x29, 0x5f, 0x7e, 0x8d, 0xf3, 0x31, 0xd1, 0x77, 0x06, 0x4f, 0x51, 0xb3, + 0x28, 0x6e, 0x5a, 0x8b, 0x31, 0x71, 0x78, 0xf3, 0x32, 0x41, 0xa5, 0x67, 0x4b, 0x5b, 0x2f, 0xa2, + 0xd0, 0x45, 0x6d, 0x81, 0x09, 0x2d, 0x32, 0x2a, 0x6a, 0xac, 0xf0, 0xaf, 0x8f, 0xa2, 0x10, 0x61, + 0x4a, 0x36, 0x07, 0x4f, 0x6d, 0x67, 0x51, 0xf0, 0x5c, 0x2a, 0x19, 0x58, 0xbb, 0xf2, 0x1d, 0x76, + 0x4b, 0x7e, 0x20, 0x8c, 0x7e, 0x96, 0xac, 0x27, 0x8c, 0x4e, 0x45, 0x8c, 0xae, 0x23, 0x8d, 0x64, + 0xf4, 0x3b, 0xcf, 0x98, 0x19, 0x55, 0xd4, 0x42, 0x2e, 0xb7, 0x79, 0x40, 0x6e, 0x0f, 0x94, 0x31, + 0x85, 0x1c, 0xd2, 0x2e, 0xc7, 0x78, 0x4e, 0x99, 0x54, 0x04, 0x1a, 0x21, 0xf2, 0x7a, 0xf0, 0x85, + 0x78, 0x55, 0x21, 0x37, 0x60, 0xfc, 0x2e, 0xfe, 0x57, 0x9d, 0x74, 0xf1, 0x34, 0xc5, 0xcf, 0x29, + 0x27, 0xca, 0xed, 0xd3, 0xd2, 0x61, 0x50, 0x64, 0xbe, 0xf3, 0xbb, 0x4f, 0x17, 0x95, 0x4f, 0x3e, + 0x5d, 0x54, 0xfe, 0xfa, 0xe9, 0xa2, 0xf2, 0xd1, 0x67, 0x8b, 0x43, 0x9f, 0x7c, 0xb6, 0x38, 0xf4, + 0xe7, 0xcf, 0x16, 0x87, 0xbe, 0xfe, 0x72, 0xd9, 0xf4, 0xf6, 0xeb, 0xc5, 0x4c, 0xc9, 0xae, 0xae, + 0xe8, 0x4e, 0x55, 0x37, 0xf4, 0x9a, 0x63, 0x1f, 0xd0, 0x92, 0x27, 0x46, 0x2b, 0xe2, 0x7f, 0xf2, + 0x1f, 0x0f, 0xcf, 0xde, 0x42, 0x60, 0x8b, 0x4f, 0x67, 0xd6, 0xed, 0xcc, 0xad, 0x9a, 0x59, 0x1c, + 0x47, 0x1b, 0x5e, 0xfb, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x33, 0xb3, 0x1f, 0x9e, 0x15, 0x30, + 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -3570,6 +3625,18 @@ func (m *JobSubmitRequestItem) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ExperimentalPriceInfo != nil { + { + size, err := m.ExperimentalPriceInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSubmit(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } if len(m.Scheduler) > 0 { i -= len(m.Scheduler) copy(dAtA[i:], m.Scheduler) @@ -3778,20 +3845,20 @@ func (m *IngressConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { } } if len(m.Ports) > 0 { - dAtA3 := make([]byte, len(m.Ports)*10) - var j2 int + dAtA4 := make([]byte, len(m.Ports)*10) + var j3 int for _, num := range m.Ports { for num >= 1<<7 { - dAtA3[j2] = uint8(uint64(num)&0x7f | 0x80) + dAtA4[j3] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j2++ + j3++ } - dAtA3[j2] = uint8(num) - j2++ + dAtA4[j3] = uint8(num) + j3++ } - i -= j2 - copy(dAtA[i:], dAtA3[:j2]) - i = encodeVarintSubmit(dAtA, i, uint64(j2)) + i -= j3 + copy(dAtA[i:], dAtA4[:j3]) + i = encodeVarintSubmit(dAtA, i, uint64(j3)) i-- dAtA[i] = 0x12 } @@ -3824,20 +3891,20 @@ func (m *ServiceConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { var l int _ = l if len(m.Ports) > 0 { - dAtA5 := make([]byte, len(m.Ports)*10) - var j4 int + dAtA6 := make([]byte, len(m.Ports)*10) + var j5 int for _, num := range m.Ports { for num >= 1<<7 { - dAtA5[j4] = uint8(uint64(num)&0x7f | 0x80) + dAtA6[j5] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j4++ + j5++ } - dAtA5[j4] = uint8(num) - j4++ + dAtA6[j5] = uint8(num) + j5++ } - i -= j4 - copy(dAtA[i:], dAtA5[:j4]) - i = encodeVarintSubmit(dAtA, i, uint64(j4)) + i -= j5 + copy(dAtA[i:], dAtA6[:j5]) + i = encodeVarintSubmit(dAtA, i, uint64(j5)) i-- dAtA[i] = 0x12 } @@ -4090,20 +4157,20 @@ func (m *JobSetFilter) MarshalToSizedBuffer(dAtA []byte) (int, error) { var l int _ = l if len(m.States) > 0 { - dAtA8 := make([]byte, len(m.States)*10) - var j7 int + dAtA9 := make([]byte, len(m.States)*10) + var j8 int for _, num := range m.States { for num >= 1<<7 { - dAtA8[j7] = uint8(uint64(num)&0x7f | 0x80) + dAtA9[j8] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j7++ + j8++ } - dAtA8[j7] = uint8(num) - j7++ + dAtA9[j8] = uint8(num) + j8++ } - i -= j7 - copy(dAtA[i:], dAtA8[:j7]) - i = encodeVarintSubmit(dAtA, i, uint64(j7)) + i -= j8 + copy(dAtA[i:], dAtA9[:j8]) + i = encodeVarintSubmit(dAtA, i, uint64(j8)) i-- dAtA[i] = 0xa } @@ -4130,12 +4197,19 @@ func (m *Job) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.QueueTtlSeconds != 0 { - i = encodeVarintSubmit(dAtA, i, uint64(m.QueueTtlSeconds)) + if m.ExperimentalPriceInfo != nil { + { + size, err := m.ExperimentalPriceInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSubmit(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0x1 i-- - dAtA[i] = 0xb0 + dAtA[i] = 0xf2 } if m.SchedulingResourceRequirements != nil { { @@ -4386,6 +4460,35 @@ func (m *Job) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ExperimentalPriceInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExperimentalPriceInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExperimentalPriceInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.BidPrice != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.BidPrice)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + func (m *JobReprioritizeRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -5467,20 +5570,20 @@ func (m *QueueCancelRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { var l int _ = l if len(m.JobStates) > 0 { - dAtA19 := make([]byte, len(m.JobStates)*10) - var j18 int + dAtA21 := make([]byte, len(m.JobStates)*10) + var j20 int for _, num := range m.JobStates { for num >= 1<<7 { - dAtA19[j18] = uint8(uint64(num)&0x7f | 0x80) + dAtA21[j20] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j18++ + j20++ } - dAtA19[j18] = uint8(num) - j18++ + dAtA21[j20] = uint8(num) + j20++ } - i -= j18 - copy(dAtA[i:], dAtA19[:j18]) - i = encodeVarintSubmit(dAtA, i, uint64(j18)) + i -= j20 + copy(dAtA[i:], dAtA21[:j20]) + i = encodeVarintSubmit(dAtA, i, uint64(j20)) i-- dAtA[i] = 0x1a } @@ -5581,6 +5684,10 @@ func (m *JobSubmitRequestItem) Size() (n int) { if l > 0 { n += 1 + l + sovSubmit(uint64(l)) } + if m.ExperimentalPriceInfo != nil { + l = m.ExperimentalPriceInfo.Size() + n += 1 + l + sovSubmit(uint64(l)) + } return n } @@ -5875,8 +5982,21 @@ func (m *Job) Size() (n int) { l = m.SchedulingResourceRequirements.Size() n += 2 + l + sovSubmit(uint64(l)) } - if m.QueueTtlSeconds != 0 { - n += 2 + sovSubmit(uint64(m.QueueTtlSeconds)) + if m.ExperimentalPriceInfo != nil { + l = m.ExperimentalPriceInfo.Size() + n += 2 + l + sovSubmit(uint64(l)) + } + return n +} + +func (m *ExperimentalPriceInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BidPrice != 0 { + n += 9 } return n } @@ -7041,6 +7161,42 @@ func (m *JobSubmitRequestItem) Unmarshal(dAtA []byte) error { } m.Scheduler = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExperimentalPriceInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSubmit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSubmit + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSubmit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExperimentalPriceInfo == nil { + m.ExperimentalPriceInfo = &ExperimentalPriceInfo{} + } + if err := m.ExperimentalPriceInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipSubmit(dAtA[iNdEx:]) @@ -9377,11 +9533,11 @@ func (m *Job) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 22: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field QueueTtlSeconds", wireType) + case 30: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExperimentalPriceInfo", wireType) } - m.QueueTtlSeconds = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowSubmit @@ -9391,11 +9547,89 @@ func (m *Job) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.QueueTtlSeconds |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLengthSubmit + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSubmit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExperimentalPriceInfo == nil { + m.ExperimentalPriceInfo = &ExperimentalPriceInfo{} + } + if err := m.ExperimentalPriceInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSubmit(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSubmit + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExperimentalPriceInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSubmit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExperimentalPriceInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExperimentalPriceInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field BidPrice", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.BidPrice = float64(math.Float64frombits(v)) default: iNdEx = preIndex skippy, err := skipSubmit(dAtA[iNdEx:]) diff --git a/pkg/api/submit.proto b/pkg/api/submit.proto index 46a98013ed2..0e2f92eb1d0 100644 --- a/pkg/api/submit.proto +++ b/pkg/api/submit.proto @@ -12,6 +12,7 @@ import "google/api/annotations.proto"; import "pkg/api/health.proto"; message JobSubmitRequestItem { + reserved 12; double priority = 1; string namespace = 3; string client_id = 8; @@ -25,7 +26,7 @@ message JobSubmitRequestItem { // Indicates which scheduler should manage this job. // If empty, the default scheduler is used. string scheduler = 11; - // Ordinal 12 was previously used for queue_ttl_seconds + ExperimentalPriceInfo experimentalPriceInfo = 13; } message IngressConfig { @@ -105,6 +106,7 @@ enum JobState { } message Job { + reserved 22; string id = 1; string client_id = 13; string job_set_id = 2; @@ -144,8 +146,11 @@ message Job { // Indicates which scheduler should manage this job. // If empty, the default scheduler is used. string scheduler = 20; - // Queuing TTL for this job in seconds. If this job queues for more than this duration it will be cancelled. Zero indicates an infinite lifetime. - int64 queue_ttl_seconds = 22; + ExperimentalPriceInfo experimentalPriceInfo = 30; +} + +message ExperimentalPriceInfo { + double bidPrice = 1; } // swagger:model diff --git a/pkg/armadaevents/events.pb.go b/pkg/armadaevents/events.pb.go index 9d065f22358..9a306a32f64 100644 --- a/pkg/armadaevents/events.pb.go +++ b/pkg/armadaevents/events.pb.go @@ -4,6 +4,7 @@ package armadaevents import ( + encoding_binary "encoding/binary" fmt "fmt" io "io" math "math" @@ -655,6 +656,8 @@ type SubmitJob struct { IsDuplicate bool `protobuf:"varint,12,opt,name=isDuplicate,proto3" json:"isDuplicate,omitempty"` // The job id JobId string `protobuf:"bytes,14,opt,name=job_id,json=jobId,proto3" json:"jobId,omitempty"` + // Pricing Information for the job. Currently experimental + ExperimentalPriceInfo *ExperimentalPriceInfo `protobuf:"bytes,15,opt,name=experimentalPriceInfo,proto3" json:"experimentalPriceInfo,omitempty"` } func (m *SubmitJob) Reset() { *m = SubmitJob{} } @@ -774,6 +777,57 @@ func (m *SubmitJob) GetJobId() string { return "" } +func (m *SubmitJob) GetExperimentalPriceInfo() *ExperimentalPriceInfo { + if m != nil { + return m.ExperimentalPriceInfo + } + return nil +} + +type ExperimentalPriceInfo struct { + BidPrice float64 `protobuf:"fixed64,1,opt,name=bidPrice,proto3" json:"bidPrice,omitempty"` +} + +func (m *ExperimentalPriceInfo) Reset() { *m = ExperimentalPriceInfo{} } +func (m *ExperimentalPriceInfo) String() string { return proto.CompactTextString(m) } +func (*ExperimentalPriceInfo) ProtoMessage() {} +func (*ExperimentalPriceInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_6aab92ca59e015f8, []int{3} +} +func (m *ExperimentalPriceInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExperimentalPriceInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExperimentalPriceInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExperimentalPriceInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExperimentalPriceInfo.Merge(m, src) +} +func (m *ExperimentalPriceInfo) XXX_Size() int { + return m.Size() +} +func (m *ExperimentalPriceInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ExperimentalPriceInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ExperimentalPriceInfo proto.InternalMessageInfo + +func (m *ExperimentalPriceInfo) GetBidPrice() float64 { + if m != nil { + return m.BidPrice + } + return 0 +} + // Kubernetes objects that can serve as main objects for an Armada job. type KubernetesMainObject struct { ObjectMeta *ObjectMeta `protobuf:"bytes,1,opt,name=objectMeta,proto3" json:"objectMeta,omitempty"` @@ -786,7 +840,7 @@ func (m *KubernetesMainObject) Reset() { *m = KubernetesMainObject{} } func (m *KubernetesMainObject) String() string { return proto.CompactTextString(m) } func (*KubernetesMainObject) ProtoMessage() {} func (*KubernetesMainObject) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{3} + return fileDescriptor_6aab92ca59e015f8, []int{4} } func (m *KubernetesMainObject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -870,7 +924,7 @@ func (m *KubernetesObject) Reset() { *m = KubernetesObject{} } func (m *KubernetesObject) String() string { return proto.CompactTextString(m) } func (*KubernetesObject) ProtoMessage() {} func (*KubernetesObject) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{4} + return fileDescriptor_6aab92ca59e015f8, []int{5} } func (m *KubernetesObject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -997,7 +1051,7 @@ func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } func (m *ObjectMeta) String() string { return proto.CompactTextString(m) } func (*ObjectMeta) ProtoMessage() {} func (*ObjectMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{5} + return fileDescriptor_6aab92ca59e015f8, []int{6} } func (m *ObjectMeta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1080,7 +1134,7 @@ func (m *PodSpecWithAvoidList) Reset() { *m = PodSpecWithAvoidList{} } func (m *PodSpecWithAvoidList) String() string { return proto.CompactTextString(m) } func (*PodSpecWithAvoidList) ProtoMessage() {} func (*PodSpecWithAvoidList) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{6} + return fileDescriptor_6aab92ca59e015f8, []int{7} } func (m *PodSpecWithAvoidList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1133,7 +1187,7 @@ func (m *ReprioritiseJob) Reset() { *m = ReprioritiseJob{} } func (m *ReprioritiseJob) String() string { return proto.CompactTextString(m) } func (*ReprioritiseJob) ProtoMessage() {} func (*ReprioritiseJob) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{7} + return fileDescriptor_6aab92ca59e015f8, []int{8} } func (m *ReprioritiseJob) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1187,7 +1241,7 @@ func (m *JobRequeued) Reset() { *m = JobRequeued{} } func (m *JobRequeued) String() string { return proto.CompactTextString(m) } func (*JobRequeued) ProtoMessage() {} func (*JobRequeued) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{8} + return fileDescriptor_6aab92ca59e015f8, []int{9} } func (m *JobRequeued) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1247,7 +1301,7 @@ func (m *ReprioritiseJobSet) Reset() { *m = ReprioritiseJobSet{} } func (m *ReprioritiseJobSet) String() string { return proto.CompactTextString(m) } func (*ReprioritiseJobSet) ProtoMessage() {} func (*ReprioritiseJobSet) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{9} + return fileDescriptor_6aab92ca59e015f8, []int{10} } func (m *ReprioritiseJobSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1294,7 +1348,7 @@ func (m *ReprioritisedJob) Reset() { *m = ReprioritisedJob{} } func (m *ReprioritisedJob) String() string { return proto.CompactTextString(m) } func (*ReprioritisedJob) ProtoMessage() {} func (*ReprioritisedJob) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{10} + return fileDescriptor_6aab92ca59e015f8, []int{11} } func (m *ReprioritisedJob) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1349,7 +1403,7 @@ func (m *CancelJob) Reset() { *m = CancelJob{} } func (m *CancelJob) String() string { return proto.CompactTextString(m) } func (*CancelJob) ProtoMessage() {} func (*CancelJob) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{11} + return fileDescriptor_6aab92ca59e015f8, []int{12} } func (m *CancelJob) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1402,7 +1456,7 @@ func (m *JobSetFilter) Reset() { *m = JobSetFilter{} } func (m *JobSetFilter) String() string { return proto.CompactTextString(m) } func (*JobSetFilter) ProtoMessage() {} func (*JobSetFilter) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{12} + return fileDescriptor_6aab92ca59e015f8, []int{13} } func (m *JobSetFilter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1449,7 +1503,7 @@ func (m *CancelJobSet) Reset() { *m = CancelJobSet{} } func (m *CancelJobSet) String() string { return proto.CompactTextString(m) } func (*CancelJobSet) ProtoMessage() {} func (*CancelJobSet) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{13} + return fileDescriptor_6aab92ca59e015f8, []int{14} } func (m *CancelJobSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1503,7 +1557,7 @@ func (m *CancelledJob) Reset() { *m = CancelledJob{} } func (m *CancelledJob) String() string { return proto.CompactTextString(m) } func (*CancelledJob) ProtoMessage() {} func (*CancelledJob) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{14} + return fileDescriptor_6aab92ca59e015f8, []int{15} } func (m *CancelledJob) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1558,7 +1612,7 @@ func (m *JobSucceeded) Reset() { *m = JobSucceeded{} } func (m *JobSucceeded) String() string { return proto.CompactTextString(m) } func (*JobSucceeded) ProtoMessage() {} func (*JobSucceeded) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{15} + return fileDescriptor_6aab92ca59e015f8, []int{16} } func (m *JobSucceeded) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1634,7 +1688,7 @@ func (m *JobRunLeased) Reset() { *m = JobRunLeased{} } func (m *JobRunLeased) String() string { return proto.CompactTextString(m) } func (*JobRunLeased) ProtoMessage() {} func (*JobRunLeased) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{16} + return fileDescriptor_6aab92ca59e015f8, []int{17} } func (m *JobRunLeased) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1740,7 +1794,7 @@ func (m *JobRunAssigned) Reset() { *m = JobRunAssigned{} } func (m *JobRunAssigned) String() string { return proto.CompactTextString(m) } func (*JobRunAssigned) ProtoMessage() {} func (*JobRunAssigned) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{17} + return fileDescriptor_6aab92ca59e015f8, []int{18} } func (m *JobRunAssigned) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1803,7 +1857,7 @@ func (m *JobRunRunning) Reset() { *m = JobRunRunning{} } func (m *JobRunRunning) String() string { return proto.CompactTextString(m) } func (*JobRunRunning) ProtoMessage() {} func (*JobRunRunning) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{18} + return fileDescriptor_6aab92ca59e015f8, []int{19} } func (m *JobRunRunning) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1866,7 +1920,7 @@ func (m *KubernetesResourceInfo) Reset() { *m = KubernetesResourceInfo{} func (m *KubernetesResourceInfo) String() string { return proto.CompactTextString(m) } func (*KubernetesResourceInfo) ProtoMessage() {} func (*KubernetesResourceInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{19} + return fileDescriptor_6aab92ca59e015f8, []int{20} } func (m *KubernetesResourceInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1957,7 +2011,7 @@ func (m *PodInfo) Reset() { *m = PodInfo{} } func (m *PodInfo) String() string { return proto.CompactTextString(m) } func (*PodInfo) ProtoMessage() {} func (*PodInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{20} + return fileDescriptor_6aab92ca59e015f8, []int{21} } func (m *PodInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2012,7 +2066,7 @@ func (m *IngressInfo) Reset() { *m = IngressInfo{} } func (m *IngressInfo) String() string { return proto.CompactTextString(m) } func (*IngressInfo) ProtoMessage() {} func (*IngressInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{21} + return fileDescriptor_6aab92ca59e015f8, []int{22} } func (m *IngressInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2075,7 +2129,7 @@ func (m *StandaloneIngressInfo) Reset() { *m = StandaloneIngressInfo{} } func (m *StandaloneIngressInfo) String() string { return proto.CompactTextString(m) } func (*StandaloneIngressInfo) ProtoMessage() {} func (*StandaloneIngressInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{22} + return fileDescriptor_6aab92ca59e015f8, []int{23} } func (m *StandaloneIngressInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2174,7 +2228,7 @@ func (m *JobRunSucceeded) Reset() { *m = JobRunSucceeded{} } func (m *JobRunSucceeded) String() string { return proto.CompactTextString(m) } func (*JobRunSucceeded) ProtoMessage() {} func (*JobRunSucceeded) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{23} + return fileDescriptor_6aab92ca59e015f8, []int{24} } func (m *JobRunSucceeded) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2237,7 +2291,7 @@ func (m *JobErrors) Reset() { *m = JobErrors{} } func (m *JobErrors) String() string { return proto.CompactTextString(m) } func (*JobErrors) ProtoMessage() {} func (*JobErrors) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{24} + return fileDescriptor_6aab92ca59e015f8, []int{25} } func (m *JobErrors) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2298,7 +2352,7 @@ func (m *JobRunErrors) Reset() { *m = JobRunErrors{} } func (m *JobRunErrors) String() string { return proto.CompactTextString(m) } func (*JobRunErrors) ProtoMessage() {} func (*JobRunErrors) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{25} + return fileDescriptor_6aab92ca59e015f8, []int{26} } func (m *JobRunErrors) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2380,7 +2434,7 @@ func (m *Error) Reset() { *m = Error{} } func (m *Error) String() string { return proto.CompactTextString(m) } func (*Error) ProtoMessage() {} func (*Error) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{26} + return fileDescriptor_6aab92ca59e015f8, []int{27} } func (m *Error) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2579,7 +2633,7 @@ func (m *KubernetesError) Reset() { *m = KubernetesError{} } func (m *KubernetesError) String() string { return proto.CompactTextString(m) } func (*KubernetesError) ProtoMessage() {} func (*KubernetesError) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{27} + return fileDescriptor_6aab92ca59e015f8, []int{28} } func (m *KubernetesError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2631,7 +2685,7 @@ func (m *PodError) Reset() { *m = PodError{} } func (m *PodError) String() string { return proto.CompactTextString(m) } func (*PodError) ProtoMessage() {} func (*PodError) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{28} + return fileDescriptor_6aab92ca59e015f8, []int{29} } func (m *PodError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2726,7 +2780,7 @@ func (m *ContainerError) Reset() { *m = ContainerError{} } func (m *ContainerError) String() string { return proto.CompactTextString(m) } func (*ContainerError) ProtoMessage() {} func (*ContainerError) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{29} + return fileDescriptor_6aab92ca59e015f8, []int{30} } func (m *ContainerError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2803,7 +2857,7 @@ func (m *PodLeaseReturned) Reset() { *m = PodLeaseReturned{} } func (m *PodLeaseReturned) String() string { return proto.CompactTextString(m) } func (*PodLeaseReturned) ProtoMessage() {} func (*PodLeaseReturned) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{30} + return fileDescriptor_6aab92ca59e015f8, []int{31} } func (m *PodLeaseReturned) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2881,7 +2935,7 @@ func (m *PodTerminated) Reset() { *m = PodTerminated{} } func (m *PodTerminated) String() string { return proto.CompactTextString(m) } func (*PodTerminated) ProtoMessage() {} func (*PodTerminated) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{31} + return fileDescriptor_6aab92ca59e015f8, []int{32} } func (m *PodTerminated) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2945,7 +2999,7 @@ func (m *ExecutorError) Reset() { *m = ExecutorError{} } func (m *ExecutorError) String() string { return proto.CompactTextString(m) } func (*ExecutorError) ProtoMessage() {} func (*ExecutorError) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{32} + return fileDescriptor_6aab92ca59e015f8, []int{33} } func (m *ExecutorError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2986,7 +3040,7 @@ func (m *PodUnschedulable) Reset() { *m = PodUnschedulable{} } func (m *PodUnschedulable) String() string { return proto.CompactTextString(m) } func (*PodUnschedulable) ProtoMessage() {} func (*PodUnschedulable) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{33} + return fileDescriptor_6aab92ca59e015f8, []int{34} } func (m *PodUnschedulable) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3050,7 +3104,7 @@ func (m *LeaseExpired) Reset() { *m = LeaseExpired{} } func (m *LeaseExpired) String() string { return proto.CompactTextString(m) } func (*LeaseExpired) ProtoMessage() {} func (*LeaseExpired) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{34} + return fileDescriptor_6aab92ca59e015f8, []int{35} } func (m *LeaseExpired) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3087,7 +3141,7 @@ func (m *MaxRunsExceeded) Reset() { *m = MaxRunsExceeded{} } func (m *MaxRunsExceeded) String() string { return proto.CompactTextString(m) } func (*MaxRunsExceeded) ProtoMessage() {} func (*MaxRunsExceeded) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{35} + return fileDescriptor_6aab92ca59e015f8, []int{36} } func (m *MaxRunsExceeded) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3131,7 +3185,7 @@ func (m *JobRunPreemptedError) Reset() { *m = JobRunPreemptedError{} } func (m *JobRunPreemptedError) String() string { return proto.CompactTextString(m) } func (*JobRunPreemptedError) ProtoMessage() {} func (*JobRunPreemptedError) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{36} + return fileDescriptor_6aab92ca59e015f8, []int{37} } func (m *JobRunPreemptedError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3175,7 +3229,7 @@ func (m *GangJobUnschedulable) Reset() { *m = GangJobUnschedulable{} } func (m *GangJobUnschedulable) String() string { return proto.CompactTextString(m) } func (*GangJobUnschedulable) ProtoMessage() {} func (*GangJobUnschedulable) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{37} + return fileDescriptor_6aab92ca59e015f8, []int{38} } func (m *GangJobUnschedulable) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3219,7 +3273,7 @@ func (m *JobRejected) Reset() { *m = JobRejected{} } func (m *JobRejected) String() string { return proto.CompactTextString(m) } func (*JobRejected) ProtoMessage() {} func (*JobRejected) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{38} + return fileDescriptor_6aab92ca59e015f8, []int{39} } func (m *JobRejected) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3266,7 +3320,7 @@ func (m *JobRunPreempted) Reset() { *m = JobRunPreempted{} } func (m *JobRunPreempted) String() string { return proto.CompactTextString(m) } func (*JobRunPreempted) ProtoMessage() {} func (*JobRunPreempted) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{39} + return fileDescriptor_6aab92ca59e015f8, []int{40} } func (m *JobRunPreempted) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3328,7 +3382,7 @@ func (m *PartitionMarker) Reset() { *m = PartitionMarker{} } func (m *PartitionMarker) String() string { return proto.CompactTextString(m) } func (*PartitionMarker) ProtoMessage() {} func (*PartitionMarker) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{40} + return fileDescriptor_6aab92ca59e015f8, []int{41} } func (m *PartitionMarker) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3381,7 +3435,7 @@ func (m *JobRunPreemptionRequested) Reset() { *m = JobRunPreemptionReque func (m *JobRunPreemptionRequested) String() string { return proto.CompactTextString(m) } func (*JobRunPreemptionRequested) ProtoMessage() {} func (*JobRunPreemptionRequested) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{41} + return fileDescriptor_6aab92ca59e015f8, []int{42} } func (m *JobRunPreemptionRequested) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3434,7 +3488,7 @@ func (m *JobPreemptionRequested) Reset() { *m = JobPreemptionRequested{} func (m *JobPreemptionRequested) String() string { return proto.CompactTextString(m) } func (*JobPreemptionRequested) ProtoMessage() {} func (*JobPreemptionRequested) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{42} + return fileDescriptor_6aab92ca59e015f8, []int{43} } func (m *JobPreemptionRequested) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3487,7 +3541,7 @@ func (m *JobValidated) Reset() { *m = JobValidated{} } func (m *JobValidated) String() string { return proto.CompactTextString(m) } func (*JobValidated) ProtoMessage() {} func (*JobValidated) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{43} + return fileDescriptor_6aab92ca59e015f8, []int{44} } func (m *JobValidated) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3541,7 +3595,7 @@ func (m *JobRunCancelled) Reset() { *m = JobRunCancelled{} } func (m *JobRunCancelled) String() string { return proto.CompactTextString(m) } func (*JobRunCancelled) ProtoMessage() {} func (*JobRunCancelled) Descriptor() ([]byte, []int) { - return fileDescriptor_6aab92ca59e015f8, []int{44} + return fileDescriptor_6aab92ca59e015f8, []int{45} } func (m *JobRunCancelled) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3593,6 +3647,7 @@ func init() { proto.RegisterMapType((map[string]*resource.Quantity)(nil), "armadaevents.ResourceUtilisation.MaxResourcesForPeriodEntry") proto.RegisterMapType((map[string]*resource.Quantity)(nil), "armadaevents.ResourceUtilisation.TotalCumulativeUsageEntry") proto.RegisterType((*SubmitJob)(nil), "armadaevents.SubmitJob") + proto.RegisterType((*ExperimentalPriceInfo)(nil), "armadaevents.ExperimentalPriceInfo") proto.RegisterType((*KubernetesMainObject)(nil), "armadaevents.KubernetesMainObject") proto.RegisterType((*KubernetesObject)(nil), "armadaevents.KubernetesObject") proto.RegisterType((*ObjectMeta)(nil), "armadaevents.ObjectMeta") @@ -3644,233 +3699,237 @@ func init() { func init() { proto.RegisterFile("pkg/armadaevents/events.proto", fileDescriptor_6aab92ca59e015f8) } var fileDescriptor_6aab92ca59e015f8 = []byte{ - // 3606 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x3b, 0x4d, 0x6f, 0x1b, 0xd7, - 0xb5, 0x1e, 0x7e, 0xf3, 0x50, 0x1f, 0xf4, 0xd5, 0x87, 0x69, 0x25, 0x16, 0x65, 0x3a, 0xef, 0xc5, - 0x0e, 0x12, 0xca, 0x71, 0x5e, 0x1e, 0xf2, 0xf1, 0x90, 0x40, 0xb4, 0x15, 0xdb, 0x8a, 0x65, 0x2b, - 0x94, 0x9d, 0xe7, 0xf7, 0x10, 0x80, 0x19, 0x72, 0xae, 0xa8, 0xb1, 0xc8, 0x99, 0xc9, 0x7c, 0x28, - 0x12, 0x10, 0xa0, 0x49, 0x91, 0x76, 0x9d, 0x4d, 0x81, 0xa2, 0x9b, 0x66, 0xd3, 0x45, 0x8a, 0x76, - 0xd9, 0x6e, 0xbb, 0xed, 0xa2, 0x28, 0xb2, 0x29, 0x50, 0x14, 0x0d, 0x51, 0x24, 0xe8, 0x86, 0x8b, - 0xfe, 0x86, 0xe2, 0x7e, 0xcc, 0xcc, 0xbd, 0xc3, 0x4b, 0x8b, 0x72, 0x2c, 0x23, 0x4d, 0x57, 0xd2, - 0x9c, 0xef, 0xb9, 0xe7, 0xdc, 0x33, 0xe7, 0x9c, 0x7b, 0x09, 0xe7, 0x9c, 0xbd, 0xee, 0xaa, 0xee, - 0xf6, 0x75, 0x43, 0xc7, 0xfb, 0xd8, 0xf2, 0xbd, 0x55, 0xf6, 0xa7, 0xee, 0xb8, 0xb6, 0x6f, 0xa3, - 0x29, 0x11, 0xb5, 0x54, 0xdb, 0x7b, 0xc5, 0xab, 0x9b, 0xf6, 0xaa, 0xee, 0x98, 0xab, 0x1d, 0xdb, - 0xc5, 0xab, 0xfb, 0x2f, 0xae, 0x76, 0xb1, 0x85, 0x5d, 0xdd, 0xc7, 0x06, 0xe3, 0x58, 0xba, 0x28, - 0xd0, 0x58, 0xd8, 0xff, 0xd0, 0x76, 0xf7, 0x4c, 0xab, 0xab, 0xa2, 0xac, 0x76, 0x6d, 0xbb, 0xdb, - 0xc3, 0xab, 0xf4, 0xa9, 0x1d, 0xec, 0xac, 0xfa, 0x66, 0x1f, 0x7b, 0xbe, 0xde, 0x77, 0x38, 0xc1, - 0x7f, 0xc5, 0xa2, 0xfa, 0x7a, 0x67, 0xd7, 0xb4, 0xb0, 0x7b, 0xb8, 0x4a, 0xed, 0x75, 0xcc, 0x55, - 0x17, 0x7b, 0x76, 0xe0, 0x76, 0xf0, 0x88, 0xd8, 0xd7, 0x4c, 0xcb, 0xc7, 0xae, 0xa5, 0xf7, 0x56, - 0xbd, 0xce, 0x2e, 0x36, 0x82, 0x1e, 0x76, 0xe3, 0xff, 0xec, 0xf6, 0x03, 0xdc, 0xf1, 0xbd, 0x11, - 0x00, 0xe3, 0xad, 0x7d, 0xb5, 0x00, 0xd3, 0xeb, 0xe4, 0x5d, 0xb7, 0xf1, 0x07, 0x01, 0xb6, 0x3a, - 0x18, 0x5d, 0x82, 0xec, 0x07, 0x01, 0x0e, 0x70, 0x45, 0x5b, 0xd1, 0x2e, 0x16, 0x1b, 0x73, 0xc3, - 0x41, 0x75, 0x96, 0x02, 0x9e, 0xb7, 0xfb, 0xa6, 0x8f, 0xfb, 0x8e, 0x7f, 0xd8, 0x64, 0x14, 0xe8, - 0x35, 0x98, 0x7a, 0x60, 0xb7, 0x5b, 0x1e, 0xf6, 0x5b, 0x96, 0xde, 0xc7, 0x95, 0x14, 0xe5, 0xa8, - 0x0c, 0x07, 0xd5, 0xf9, 0x07, 0x76, 0x7b, 0x1b, 0xfb, 0xb7, 0xf5, 0xbe, 0xc8, 0x06, 0x31, 0x14, - 0xbd, 0x00, 0xf9, 0xc0, 0xc3, 0x6e, 0xcb, 0x34, 0x2a, 0x69, 0xca, 0x36, 0x3f, 0x1c, 0x54, 0xcb, - 0x04, 0x74, 0xd3, 0x10, 0x58, 0x72, 0x0c, 0x82, 0x9e, 0x87, 0x5c, 0xd7, 0xb5, 0x03, 0xc7, 0xab, - 0x64, 0x56, 0xd2, 0x21, 0x35, 0x83, 0x88, 0xd4, 0x0c, 0x82, 0xee, 0x40, 0x8e, 0x39, 0xb0, 0x92, - 0x5d, 0x49, 0x5f, 0x2c, 0x5d, 0x39, 0x5f, 0x17, 0xbd, 0x5a, 0x97, 0x5e, 0x98, 0x3d, 0x31, 0x81, - 0x0c, 0x2f, 0x0a, 0xe4, 0x71, 0xf0, 0xdb, 0x39, 0xc8, 0x52, 0x3a, 0xf4, 0x36, 0xe4, 0x3b, 0x2e, - 0x26, 0xab, 0x5f, 0x41, 0x2b, 0xda, 0xc5, 0xd2, 0x95, 0xa5, 0x3a, 0xf3, 0x6a, 0x3d, 0xf4, 0x6a, - 0xfd, 0x6e, 0xe8, 0xd5, 0xc6, 0xc2, 0x70, 0x50, 0x3d, 0xcd, 0xc9, 0x05, 0xa9, 0xa1, 0x04, 0xb4, - 0x05, 0x45, 0x2f, 0x68, 0xf7, 0x4d, 0x7f, 0xc3, 0x6e, 0xd3, 0xf5, 0x2e, 0x5d, 0x39, 0x23, 0x9b, - 0xba, 0x1d, 0xa2, 0x1b, 0x67, 0x86, 0x83, 0xea, 0x5c, 0x44, 0x1d, 0x4b, 0xbb, 0x71, 0xaa, 0x19, - 0x0b, 0x41, 0xbb, 0x30, 0xeb, 0x62, 0xc7, 0x35, 0x6d, 0xd7, 0xf4, 0x4d, 0x0f, 0x13, 0xb9, 0x29, - 0x2a, 0xf7, 0x9c, 0x2c, 0xb7, 0x29, 0x13, 0x35, 0xce, 0x0d, 0x07, 0xd5, 0xb3, 0x09, 0x4e, 0x49, - 0x47, 0x52, 0x2c, 0xf2, 0x01, 0x25, 0x40, 0xdb, 0xd8, 0xa7, 0xbe, 0x2c, 0x5d, 0x59, 0x79, 0xa8, - 0xb2, 0x6d, 0xec, 0x37, 0x56, 0x86, 0x83, 0xea, 0xd3, 0xa3, 0xfc, 0x92, 0x4a, 0x85, 0x7c, 0xd4, - 0x83, 0xb2, 0x08, 0x35, 0xc8, 0x0b, 0x66, 0xa8, 0xce, 0xe5, 0xf1, 0x3a, 0x09, 0x55, 0x63, 0x79, - 0x38, 0xa8, 0x2e, 0x25, 0x79, 0x25, 0x7d, 0x23, 0x92, 0x89, 0x7f, 0x3a, 0xba, 0xd5, 0xc1, 0x3d, - 0xa2, 0x26, 0xab, 0xf2, 0xcf, 0xd5, 0x10, 0xcd, 0xfc, 0x13, 0x51, 0xcb, 0xfe, 0x89, 0xc0, 0xe8, - 0x3d, 0x98, 0x8a, 0x1e, 0xc8, 0x7a, 0xe5, 0x78, 0x0c, 0xa9, 0x85, 0x92, 0x95, 0x5a, 0x1a, 0x0e, - 0xaa, 0x8b, 0x22, 0x8f, 0x24, 0x5a, 0x92, 0x16, 0x4b, 0xef, 0xb1, 0x95, 0xc9, 0x8f, 0x97, 0xce, - 0x28, 0x44, 0xe9, 0xbd, 0xd1, 0x15, 0x91, 0xa4, 0x11, 0xe9, 0x64, 0x03, 0x07, 0x9d, 0x0e, 0xc6, - 0x06, 0x36, 0x2a, 0x05, 0x95, 0xf4, 0x0d, 0x81, 0x82, 0x49, 0x17, 0x79, 0x64, 0xe9, 0x22, 0x86, - 0xac, 0xf5, 0x03, 0xbb, 0xbd, 0xee, 0xba, 0xb6, 0xeb, 0x55, 0x8a, 0xaa, 0xb5, 0xde, 0x08, 0xd1, - 0x6c, 0xad, 0x23, 0x6a, 0x79, 0xad, 0x23, 0x30, 0xb7, 0xb7, 0x19, 0x58, 0xb7, 0xb0, 0xee, 0x61, - 0xa3, 0x02, 0x63, 0xec, 0x8d, 0x28, 0x22, 0x7b, 0x23, 0xc8, 0x88, 0xbd, 0x11, 0x06, 0x19, 0x30, - 0xc3, 0x9e, 0xd7, 0x3c, 0xcf, 0xec, 0x5a, 0xd8, 0xa8, 0x94, 0xa8, 0xfc, 0xa7, 0x55, 0xf2, 0x43, - 0x9a, 0xc6, 0xd3, 0xc3, 0x41, 0xb5, 0x22, 0xf3, 0x49, 0x3a, 0x12, 0x32, 0xd1, 0xfb, 0x30, 0xcd, - 0x20, 0xcd, 0xc0, 0xb2, 0x4c, 0xab, 0x5b, 0x99, 0xa2, 0x4a, 0x9e, 0x52, 0x29, 0xe1, 0x24, 0x8d, - 0xa7, 0x86, 0x83, 0xea, 0x19, 0x89, 0x4b, 0x52, 0x21, 0x0b, 0x24, 0x19, 0x83, 0x01, 0x62, 0xc7, - 0x4e, 0xab, 0x32, 0xc6, 0x86, 0x4c, 0xc4, 0x32, 0x46, 0x82, 0x53, 0xce, 0x18, 0x09, 0x64, 0xec, - 0x0f, 0xee, 0xe4, 0x99, 0xf1, 0xfe, 0xe0, 0x7e, 0x16, 0xfc, 0xa1, 0x70, 0xb5, 0x24, 0x0d, 0x7d, - 0xac, 0xc1, 0x82, 0xe7, 0xeb, 0x96, 0xa1, 0xf7, 0x6c, 0x0b, 0xdf, 0xb4, 0xba, 0x2e, 0xf6, 0xbc, - 0x9b, 0xd6, 0x8e, 0x5d, 0x29, 0x53, 0x3d, 0x17, 0x12, 0x89, 0x55, 0x45, 0xda, 0xb8, 0x30, 0x1c, - 0x54, 0xab, 0x4a, 0x29, 0x92, 0x66, 0xb5, 0x22, 0x74, 0x00, 0x73, 0xe1, 0x47, 0xfa, 0x9e, 0x6f, - 0xf6, 0x4c, 0x4f, 0xf7, 0x4d, 0xdb, 0xaa, 0x9c, 0xa6, 0xfa, 0xcf, 0x27, 0xf3, 0xd3, 0x08, 0x61, - 0xe3, 0xfc, 0x70, 0x50, 0x3d, 0xa7, 0x90, 0x20, 0xe9, 0x56, 0xa9, 0x88, 0x9d, 0xb8, 0xe5, 0x62, - 0x42, 0x88, 0x8d, 0xca, 0xdc, 0x78, 0x27, 0x46, 0x44, 0xa2, 0x13, 0x23, 0xa0, 0xca, 0x89, 0x11, - 0x92, 0x68, 0x72, 0x74, 0xd7, 0x37, 0x89, 0xda, 0x4d, 0xdd, 0xdd, 0xc3, 0x6e, 0x65, 0x5e, 0xa5, - 0x69, 0x4b, 0x26, 0x62, 0x9a, 0x12, 0x9c, 0xb2, 0xa6, 0x04, 0x12, 0x7d, 0xa6, 0x81, 0x6c, 0x9a, - 0x69, 0x5b, 0x4d, 0xf2, 0xd1, 0xf6, 0xc8, 0xeb, 0x2d, 0x50, 0xa5, 0xcf, 0x3e, 0xe4, 0xf5, 0x44, - 0xf2, 0xc6, 0xb3, 0xc3, 0x41, 0xf5, 0xc2, 0x58, 0x69, 0x92, 0x21, 0xe3, 0x95, 0xa2, 0xfb, 0x50, - 0x22, 0x48, 0x4c, 0xcb, 0x1f, 0xa3, 0xb2, 0x48, 0x6d, 0x38, 0x3b, 0x6a, 0x03, 0x27, 0x68, 0x9c, - 0x1d, 0x0e, 0xaa, 0x0b, 0x02, 0x87, 0xa4, 0x47, 0x14, 0x85, 0x3e, 0xd5, 0x80, 0x04, 0xba, 0xea, - 0x4d, 0xcf, 0x50, 0x2d, 0xcf, 0x8c, 0x68, 0x51, 0xbd, 0xe6, 0x33, 0xc3, 0x41, 0x75, 0x45, 0x2d, - 0x47, 0xd2, 0x3d, 0x46, 0x57, 0x1c, 0x47, 0xd1, 0x47, 0xa2, 0x52, 0x19, 0x1f, 0x47, 0x11, 0x91, - 0x18, 0x47, 0x11, 0x50, 0x15, 0x47, 0x11, 0x92, 0x27, 0x83, 0x77, 0xf5, 0x9e, 0x69, 0xd0, 0x62, - 0xea, 0xec, 0x98, 0x64, 0x10, 0x51, 0x44, 0xc9, 0x20, 0x82, 0x8c, 0x24, 0x83, 0x98, 0x36, 0x0f, - 0x59, 0x2a, 0xa2, 0xf6, 0x97, 0x1c, 0xcc, 0x29, 0xb6, 0x1a, 0xc2, 0x30, 0x1d, 0xee, 0xa3, 0x96, - 0x49, 0x92, 0x44, 0x5a, 0xb5, 0xca, 0x6f, 0x07, 0x6d, 0xec, 0x5a, 0xd8, 0xc7, 0x5e, 0x28, 0x83, - 0x66, 0x09, 0x6a, 0x89, 0x2b, 0x40, 0x84, 0xda, 0x6e, 0x4a, 0x84, 0xa3, 0x9f, 0x69, 0x50, 0xe9, - 0xeb, 0x07, 0xad, 0x10, 0xe8, 0xb5, 0x76, 0x6c, 0xb7, 0xe5, 0x60, 0xd7, 0xb4, 0x0d, 0x5a, 0xc9, - 0x96, 0xae, 0xfc, 0xcf, 0x91, 0x79, 0xa1, 0xbe, 0xa9, 0x1f, 0x84, 0x60, 0xef, 0x2d, 0xdb, 0xdd, - 0xa2, 0xec, 0xeb, 0x96, 0xef, 0x1e, 0xb2, 0x84, 0xd5, 0x57, 0xe1, 0x05, 0x9b, 0x16, 0x94, 0x04, - 0xe8, 0x27, 0x1a, 0x2c, 0xfa, 0xb6, 0xaf, 0xf7, 0x5a, 0x9d, 0xa0, 0x1f, 0xf4, 0x74, 0xdf, 0xdc, - 0xc7, 0xad, 0xc0, 0xd3, 0xbb, 0x98, 0x97, 0xcd, 0xaf, 0x1f, 0x6d, 0xda, 0x5d, 0xc2, 0x7f, 0x35, - 0x62, 0xbf, 0x47, 0xb8, 0x99, 0x65, 0xb5, 0xe1, 0xa0, 0xba, 0xec, 0x2b, 0xd0, 0x82, 0x61, 0xf3, - 0x2a, 0x3c, 0x7a, 0x0e, 0x72, 0xa4, 0xad, 0x30, 0x0d, 0x5a, 0x1d, 0xf1, 0x16, 0xe4, 0x81, 0xdd, - 0x96, 0x1a, 0x83, 0x2c, 0x05, 0x10, 0x5a, 0x37, 0xb0, 0x08, 0x6d, 0x3e, 0xa6, 0x75, 0x03, 0x4b, - 0xa6, 0xa5, 0x80, 0xa5, 0xcf, 0x35, 0x58, 0x1a, 0xbf, 0x94, 0xe8, 0x02, 0xa4, 0xf7, 0xf0, 0x21, - 0x6f, 0x7b, 0x4e, 0x0f, 0x07, 0xd5, 0xe9, 0x3d, 0x7c, 0x28, 0x48, 0x21, 0x58, 0xf4, 0x7f, 0x90, - 0xdd, 0xd7, 0x7b, 0x01, 0xe6, 0x55, 0x75, 0xbd, 0xce, 0x3a, 0xb6, 0xba, 0xd8, 0xb1, 0xd5, 0x9d, - 0xbd, 0x2e, 0x01, 0xd4, 0x43, 0xaf, 0xd7, 0xdf, 0x09, 0x74, 0xcb, 0x37, 0xfd, 0x43, 0x66, 0x1e, - 0x15, 0x20, 0x9a, 0x47, 0x01, 0xaf, 0xa5, 0x5e, 0xd1, 0x96, 0x7e, 0xae, 0xc1, 0xd9, 0xb1, 0x4b, - 0xfa, 0x5d, 0xb0, 0x70, 0x23, 0x53, 0xd0, 0xca, 0xa9, 0x8d, 0x4c, 0x21, 0x55, 0x4e, 0xd7, 0x7e, - 0x95, 0x83, 0x62, 0xd4, 0xa0, 0xa0, 0x1b, 0x50, 0x36, 0xb0, 0x11, 0x38, 0x3d, 0xb3, 0x43, 0x63, - 0x83, 0x38, 0x85, 0x75, 0x84, 0x34, 0x3b, 0x48, 0x38, 0xc9, 0x3d, 0xb3, 0x09, 0x14, 0xba, 0x02, - 0x05, 0x5e, 0x88, 0x1f, 0xd2, 0x7d, 0x39, 0xdd, 0x58, 0x1c, 0x0e, 0xaa, 0x28, 0x84, 0x09, 0xac, - 0x11, 0x1d, 0x6a, 0x02, 0xb0, 0xce, 0x76, 0x13, 0xfb, 0x3a, 0x6f, 0x09, 0x2a, 0x72, 0xfc, 0xde, - 0x89, 0xf0, 0xac, 0x47, 0x8d, 0xe9, 0xc5, 0x1e, 0x35, 0x86, 0xa2, 0xf7, 0x00, 0xfa, 0xba, 0x69, - 0x31, 0x3e, 0x5e, 0xff, 0xd7, 0xc6, 0x65, 0x88, 0xcd, 0x88, 0x92, 0x49, 0x8f, 0x39, 0x45, 0xe9, - 0x31, 0x14, 0xdd, 0x81, 0x3c, 0xef, 0xc5, 0x2b, 0x39, 0xba, 0xdd, 0x96, 0xc7, 0x89, 0xe6, 0x62, - 0x69, 0x37, 0xc9, 0x59, 0xc4, 0x6e, 0x92, 0x83, 0xc8, 0xb2, 0xf5, 0xcc, 0x1d, 0xec, 0x9b, 0x7d, - 0x4c, 0x77, 0x03, 0x5f, 0xb6, 0x10, 0x26, 0x2e, 0x5b, 0x08, 0x43, 0xaf, 0x00, 0xe8, 0xfe, 0xa6, - 0xed, 0xf9, 0x77, 0xac, 0x0e, 0xa6, 0x15, 0x7d, 0x81, 0x99, 0x1f, 0x43, 0x45, 0xf3, 0x63, 0x28, - 0x7a, 0x1d, 0x4a, 0x0e, 0xff, 0x82, 0xb4, 0x7b, 0x98, 0x56, 0xec, 0x05, 0xf6, 0xc1, 0x13, 0xc0, - 0x02, 0xaf, 0x48, 0x8d, 0xae, 0xc3, 0x6c, 0xc7, 0xb6, 0x3a, 0x81, 0xeb, 0x62, 0xab, 0x73, 0xb8, - 0xad, 0xef, 0x60, 0x5a, 0x9d, 0x17, 0x58, 0xa8, 0x24, 0x50, 0x62, 0xa8, 0x24, 0x50, 0xe8, 0x65, - 0x28, 0x46, 0x93, 0x0d, 0x5a, 0x80, 0x17, 0x79, 0xa3, 0x1c, 0x02, 0x05, 0xe6, 0x98, 0x92, 0x18, - 0x6f, 0x7a, 0xd7, 0x78, 0xd0, 0x61, 0x5a, 0x54, 0x73, 0xe3, 0x05, 0xb0, 0x68, 0xbc, 0x00, 0x16, - 0xf2, 0xd3, 0xcc, 0x51, 0xf9, 0x29, 0xda, 0x2e, 0xd3, 0xe5, 0x99, 0xda, 0x1f, 0x34, 0x98, 0x57, - 0xc5, 0x4b, 0x22, 0x76, 0xb5, 0xc7, 0x12, 0xbb, 0xef, 0x42, 0xc1, 0xb1, 0x8d, 0x96, 0xe7, 0xe0, - 0x0e, 0xcf, 0x04, 0x89, 0xc8, 0xdd, 0xb2, 0x8d, 0x6d, 0x07, 0x77, 0xfe, 0xd7, 0xf4, 0x77, 0xd7, - 0xf6, 0x6d, 0xd3, 0xb8, 0x65, 0x7a, 0x3c, 0xc4, 0x1c, 0x86, 0x91, 0x3e, 0xaf, 0x79, 0x0e, 0x6c, - 0x14, 0x20, 0xc7, 0xb4, 0xd4, 0xfe, 0x98, 0x86, 0x72, 0x32, 0x46, 0xff, 0x95, 0x5e, 0x05, 0xdd, - 0x87, 0xbc, 0xc9, 0xaa, 0x77, 0xfe, 0xf5, 0xff, 0x0f, 0x21, 0x57, 0xd6, 0xe3, 0x51, 0x5e, 0x7d, - 0xff, 0xc5, 0x3a, 0x2f, 0xf3, 0xe9, 0x12, 0x50, 0xc9, 0x9c, 0x53, 0x96, 0xcc, 0x81, 0xa8, 0x09, - 0x79, 0x0f, 0xbb, 0xfb, 0x66, 0x07, 0xf3, 0x4c, 0x54, 0x15, 0x25, 0x77, 0x6c, 0x17, 0x13, 0x99, - 0xdb, 0x8c, 0x24, 0x96, 0xc9, 0x79, 0x64, 0x99, 0x1c, 0x88, 0xde, 0x85, 0x62, 0xc7, 0xb6, 0x76, - 0xcc, 0xee, 0xa6, 0xee, 0xf0, 0x5c, 0x74, 0x4e, 0x25, 0xf5, 0x6a, 0x48, 0xc4, 0x27, 0x12, 0xe1, - 0x63, 0x62, 0x22, 0x11, 0x51, 0xc5, 0x0e, 0xfd, 0x47, 0x06, 0x20, 0x76, 0x0e, 0x7a, 0x15, 0x4a, - 0xf8, 0x00, 0x77, 0x02, 0xdf, 0xa6, 0x53, 0x3a, 0x2d, 0x1e, 0xee, 0x85, 0x60, 0x29, 0xe0, 0x21, - 0x86, 0x92, 0x5d, 0x69, 0xe9, 0x7d, 0xec, 0x39, 0x7a, 0x27, 0x9c, 0x0a, 0x52, 0x63, 0x22, 0xa0, - 0xb8, 0x2b, 0x23, 0x20, 0xfa, 0x4f, 0xc8, 0xd0, 0x39, 0x22, 0x1b, 0x08, 0xa2, 0xe1, 0xa0, 0x3a, - 0x63, 0xc9, 0x13, 0x44, 0x8a, 0x47, 0x6f, 0xc2, 0xf4, 0x5e, 0x14, 0x78, 0xc4, 0xb6, 0x0c, 0x65, - 0xa0, 0x65, 0x59, 0x8c, 0x90, 0xac, 0x9b, 0x12, 0xe1, 0x68, 0x07, 0x4a, 0xba, 0x65, 0xd9, 0x3e, - 0xfd, 0xe0, 0x84, 0x43, 0xc2, 0x4b, 0xe3, 0xc2, 0xb4, 0xbe, 0x16, 0xd3, 0xb2, 0xda, 0x86, 0x66, - 0x0a, 0x41, 0x82, 0x98, 0x29, 0x04, 0x30, 0x6a, 0x42, 0xae, 0xa7, 0xb7, 0x71, 0x2f, 0xcc, 0xf0, - 0xcf, 0x8c, 0x55, 0x71, 0x8b, 0x92, 0x31, 0xe9, 0x74, 0x14, 0xc9, 0xf8, 0xc4, 0x51, 0x24, 0x83, - 0x2c, 0xed, 0x40, 0x39, 0x69, 0xcf, 0x64, 0x85, 0xc1, 0x25, 0xb1, 0x30, 0x28, 0x1e, 0x59, 0x8a, - 0xe8, 0x50, 0x12, 0x8c, 0x3a, 0x09, 0x15, 0xb5, 0x2f, 0x34, 0x98, 0x57, 0xed, 0x5d, 0xb4, 0x29, - 0xec, 0x78, 0x8d, 0x0f, 0x3c, 0x14, 0xa1, 0xce, 0x79, 0xc7, 0x6c, 0xf5, 0x78, 0xa3, 0x37, 0x60, - 0xc6, 0xb2, 0x0d, 0xdc, 0xd2, 0x89, 0x82, 0x9e, 0xe9, 0xf9, 0x95, 0x14, 0x1d, 0x22, 0xd3, 0x41, - 0x09, 0xc1, 0xac, 0x85, 0x08, 0x81, 0x7b, 0x5a, 0x42, 0xd4, 0x3e, 0x84, 0xd9, 0xc4, 0x18, 0x53, - 0x2a, 0x53, 0x52, 0x13, 0x96, 0x29, 0xf1, 0xb7, 0x23, 0x3d, 0xd9, 0xb7, 0xa3, 0xf6, 0xa3, 0x14, - 0x94, 0x84, 0x9e, 0x12, 0x3d, 0x80, 0x59, 0xfe, 0x1d, 0x33, 0xad, 0x2e, 0xeb, 0x5d, 0x52, 0x7c, - 0xc0, 0x31, 0x32, 0xe3, 0xdf, 0xb0, 0xdb, 0xdb, 0x11, 0x2d, 0x6d, 0x5d, 0xe8, 0xfc, 0xc9, 0x93, - 0x60, 0x82, 0xe2, 0x19, 0x19, 0x83, 0xee, 0xc3, 0x62, 0xe0, 0x90, 0x8e, 0xaa, 0xe5, 0xf1, 0x69, - 0x79, 0xcb, 0x0a, 0xfa, 0x6d, 0xec, 0x52, 0xeb, 0xb3, 0xac, 0xc6, 0x67, 0x14, 0xe1, 0x38, 0xfd, - 0x36, 0xc5, 0x8b, 0x35, 0xbe, 0x0a, 0x2f, 0xac, 0x43, 0x66, 0xc2, 0x75, 0xb8, 0x01, 0x68, 0x74, - 0x8e, 0x2c, 0xf9, 0x40, 0x9b, 0xcc, 0x07, 0xb5, 0x03, 0x28, 0x27, 0xa7, 0xc3, 0x4f, 0xc8, 0x97, - 0x7b, 0x50, 0x8c, 0x66, 0xbb, 0xe8, 0x79, 0xc8, 0xb9, 0x58, 0xf7, 0x6c, 0x8b, 0xef, 0x16, 0xba, - 0xed, 0x19, 0x44, 0xdc, 0xf6, 0x0c, 0xf2, 0x08, 0xca, 0xee, 0xc2, 0x14, 0x5b, 0xa4, 0xb7, 0xcc, - 0x9e, 0x8f, 0x5d, 0x74, 0x0d, 0x72, 0x9e, 0xaf, 0xfb, 0xd8, 0xab, 0x68, 0x2b, 0xe9, 0x8b, 0x33, - 0x57, 0x16, 0x47, 0x07, 0xb7, 0x04, 0xcd, 0xec, 0x60, 0x94, 0xa2, 0x1d, 0x0c, 0x52, 0xfb, 0xa1, - 0x06, 0x53, 0xe2, 0x7c, 0xfa, 0xf1, 0x88, 0x3d, 0xde, 0x62, 0xd4, 0xac, 0xd0, 0x06, 0x3e, 0x99, - 0x3e, 0xe9, 0xa5, 0xfc, 0x42, 0x63, 0x6b, 0x19, 0x8d, 0x32, 0xbb, 0xf1, 0xf8, 0x80, 0x6c, 0x14, - 0x8f, 0x26, 0x94, 0x49, 0xc7, 0x07, 0x34, 0xed, 0x48, 0xec, 0x62, 0xda, 0x91, 0x10, 0x8f, 0x60, - 0xeb, 0xe7, 0x59, 0x6a, 0x6b, 0x3c, 0xa8, 0x4e, 0x7c, 0xc7, 0xd3, 0xc7, 0xf8, 0x8e, 0xbf, 0x00, - 0x79, 0x9a, 0x38, 0xa3, 0x6d, 0x4a, 0x17, 0x96, 0x80, 0xe4, 0x43, 0x3a, 0x06, 0x79, 0x48, 0xba, - 0xc8, 0x7e, 0xcb, 0x74, 0xd1, 0x82, 0xb3, 0xbb, 0xba, 0xd7, 0x0a, 0x13, 0x9c, 0xd1, 0xd2, 0xfd, - 0x56, 0xb4, 0x5f, 0x73, 0xb4, 0x7a, 0xa7, 0xa3, 0xaf, 0x5d, 0xdd, 0xdb, 0x0e, 0x69, 0xd6, 0xfc, - 0xad, 0xd1, 0xdd, 0xbb, 0xa8, 0xa6, 0x40, 0xf7, 0x60, 0x41, 0x2d, 0x3c, 0x4f, 0x2d, 0xa7, 0x93, - 0x59, 0xef, 0xa1, 0x92, 0xe7, 0x14, 0x68, 0xf4, 0x89, 0x06, 0x15, 0xf2, 0x25, 0x73, 0xf1, 0x07, - 0x81, 0xe9, 0xe2, 0x3e, 0x09, 0x8b, 0x96, 0xbd, 0x8f, 0xdd, 0x9e, 0x7e, 0xc8, 0x0f, 0x39, 0xce, - 0x8f, 0xa6, 0xed, 0x2d, 0xdb, 0x68, 0x0a, 0x0c, 0xec, 0xd5, 0x1c, 0x19, 0x78, 0x87, 0x09, 0x11, - 0x5f, 0x4d, 0x4d, 0x21, 0x84, 0x10, 0x1c, 0x63, 0x9c, 0x52, 0x3a, 0x6a, 0x9c, 0x42, 0xaa, 0x35, - 0xc7, 0xb6, 0x7b, 0xb4, 0x79, 0xe2, 0xd5, 0x1a, 0x79, 0x16, 0xab, 0x35, 0xf2, 0x2c, 0x4e, 0x0c, - 0x36, 0x32, 0x85, 0x42, 0xb9, 0x58, 0xfb, 0x4a, 0x83, 0x19, 0xf9, 0x5c, 0x64, 0x74, 0x43, 0xa5, - 0x4f, 0x7c, 0x43, 0x65, 0x8e, 0xb1, 0x1a, 0xd9, 0xa3, 0x56, 0x43, 0x9a, 0x8b, 0xfc, 0x55, 0x83, - 0x69, 0xe9, 0x48, 0xe6, 0xfb, 0xf5, 0x7a, 0x3f, 0x4d, 0xc1, 0xa2, 0xda, 0xd4, 0x13, 0x69, 0xff, - 0x6e, 0x00, 0x29, 0xe4, 0x6e, 0xc6, 0x85, 0xce, 0xc2, 0x48, 0xf7, 0x47, 0x97, 0x29, 0xac, 0x02, - 0x47, 0x4e, 0x6b, 0x42, 0x76, 0x74, 0x1f, 0x4a, 0xa6, 0x70, 0x2e, 0x94, 0x56, 0x8d, 0xef, 0xc5, - 0xd3, 0x20, 0x36, 0x10, 0x18, 0x73, 0x06, 0x24, 0x8a, 0x6a, 0xe4, 0x20, 0x43, 0x2a, 0xb1, 0xda, - 0x3e, 0xe4, 0xb9, 0x39, 0xe8, 0x25, 0x28, 0xd2, 0xdc, 0x49, 0x3b, 0x1a, 0x56, 0x36, 0xd3, 0x92, - 0x82, 0x00, 0x13, 0xf7, 0x22, 0x0a, 0x21, 0x0c, 0xfd, 0x37, 0x00, 0x49, 0x17, 0x3c, 0x6b, 0xa6, - 0x68, 0xee, 0xa1, 0x9d, 0x93, 0x63, 0x1b, 0x23, 0xa9, 0xb2, 0x18, 0x01, 0x6b, 0xbf, 0x4e, 0x41, - 0x49, 0x3c, 0x89, 0x7a, 0x24, 0xe5, 0x1f, 0x41, 0xd8, 0xd5, 0xb6, 0x74, 0xc3, 0x20, 0x7f, 0x71, - 0xf8, 0x61, 0x5b, 0x1d, 0xbb, 0x48, 0xe1, 0xff, 0x6b, 0x21, 0x07, 0xeb, 0x61, 0xe8, 0x69, 0xbb, - 0x99, 0x40, 0x09, 0x5a, 0xcb, 0x49, 0xdc, 0xd2, 0x1e, 0x2c, 0x28, 0x45, 0x89, 0x9d, 0x47, 0xf6, - 0x71, 0x75, 0x1e, 0xbf, 0xc8, 0xc2, 0x82, 0xf2, 0x04, 0x30, 0x11, 0xc1, 0xe9, 0xc7, 0x12, 0xc1, - 0x3f, 0xd6, 0x54, 0x2b, 0xcb, 0xc6, 0xff, 0xaf, 0x4e, 0x70, 0x2c, 0xf9, 0xb8, 0xd6, 0x58, 0x0e, - 0x8b, 0xec, 0x23, 0xc5, 0x64, 0x6e, 0xd2, 0x98, 0x44, 0x97, 0x59, 0x13, 0x47, 0x75, 0xb1, 0xe1, - 0x7c, 0xb8, 0x43, 0x13, 0xaa, 0xf2, 0x1c, 0x44, 0xfa, 0xfa, 0x90, 0x83, 0x8d, 0x0e, 0x0a, 0x71, - 0x5f, 0xcf, 0x69, 0x92, 0xd3, 0x83, 0x29, 0x11, 0x2e, 0x64, 0xbf, 0xe2, 0x31, 0xb2, 0x1f, 0x1c, - 0x79, 0x72, 0xf0, 0x24, 0x63, 0x53, 0x4a, 0xb5, 0x03, 0x0d, 0x66, 0x13, 0x07, 0xef, 0xdf, 0xaf, - 0x6f, 0xc9, 0xc7, 0x1a, 0x14, 0xa3, 0x7b, 0x1d, 0x68, 0x0d, 0x72, 0x98, 0xdd, 0x0d, 0x60, 0x69, - 0x67, 0x2e, 0x71, 0x6f, 0x8b, 0xe0, 0xf8, 0x4d, 0xad, 0xc4, 0x75, 0x80, 0x26, 0x67, 0x7c, 0x84, - 0x82, 0xf9, 0x37, 0x5a, 0x58, 0x30, 0x8f, 0x58, 0x91, 0xfe, 0xf6, 0x56, 0x9c, 0xdc, 0xd2, 0xfd, - 0xae, 0x08, 0x59, 0x6a, 0x0b, 0x69, 0x5e, 0x7d, 0xec, 0xf6, 0x4d, 0x4b, 0xef, 0xd1, 0x50, 0x2c, - 0xb0, 0x5d, 0x1d, 0xc2, 0xc4, 0x5d, 0x1d, 0xc2, 0xd0, 0x2e, 0xcc, 0xc6, 0x23, 0x31, 0x2a, 0x46, - 0x7d, 0x51, 0xec, 0x6d, 0x99, 0x88, 0x0d, 0xe8, 0x13, 0x9c, 0xf2, 0x49, 0x6f, 0x02, 0x89, 0x0c, - 0x98, 0xe9, 0xd8, 0x96, 0xaf, 0x9b, 0x16, 0x76, 0x99, 0xa2, 0xb4, 0xea, 0xa2, 0xcc, 0x55, 0x89, - 0x86, 0x0d, 0x2a, 0x64, 0x3e, 0xf9, 0xa2, 0x8c, 0x8c, 0x43, 0xef, 0xc3, 0x74, 0xd8, 0xb8, 0x30, - 0x25, 0x19, 0xd5, 0x45, 0x99, 0x75, 0x91, 0x84, 0x6d, 0x06, 0x89, 0x4b, 0xbe, 0x28, 0x23, 0xa1, - 0x50, 0x0f, 0xca, 0x8e, 0x6d, 0xdc, 0xb3, 0x78, 0xb9, 0xae, 0xb7, 0x7b, 0x98, 0xcf, 0x61, 0x97, - 0x47, 0x0a, 0x12, 0x89, 0x8a, 0x25, 0xea, 0x24, 0xaf, 0x7c, 0xf5, 0x2c, 0x89, 0x45, 0xef, 0xc1, - 0x54, 0x8f, 0xf4, 0x6f, 0xeb, 0x07, 0x8e, 0xe9, 0x62, 0x43, 0x7d, 0x51, 0xec, 0x96, 0x40, 0xc1, - 0xd2, 0xa4, 0xc8, 0x23, 0x9f, 0x8f, 0x8b, 0x18, 0xe2, 0xfd, 0xbe, 0x7e, 0xd0, 0x0c, 0x2c, 0x6f, - 0xfd, 0x80, 0x5f, 0xfa, 0xc9, 0xab, 0xbc, 0xbf, 0x29, 0x13, 0x31, 0xef, 0x27, 0x38, 0x65, 0xef, - 0x27, 0x90, 0xe8, 0x16, 0xfd, 0x0a, 0x30, 0x97, 0xb0, 0x0b, 0x63, 0x8b, 0x23, 0xab, 0xc5, 0xbc, - 0xc1, 0x06, 0x2e, 0xfc, 0x49, 0x12, 0x1a, 0x49, 0xe0, 0x3e, 0xa0, 0xaf, 0xdd, 0xc4, 0x7e, 0xe0, - 0x5a, 0xd8, 0xe0, 0x6d, 0xd4, 0xa8, 0x0f, 0x24, 0xaa, 0xc8, 0x07, 0x12, 0x74, 0xc4, 0x07, 0x12, - 0x16, 0x7d, 0x04, 0xf3, 0x89, 0xeb, 0x2f, 0xec, 0x3d, 0x4a, 0xaa, 0x43, 0x88, 0x0d, 0x05, 0x25, - 0xeb, 0x78, 0x55, 0x32, 0x24, 0xcd, 0x4a, 0x2d, 0x44, 0x7b, 0x57, 0xb7, 0xba, 0x1b, 0x76, 0x5b, - 0x8e, 0xb9, 0x29, 0x95, 0xf6, 0xeb, 0x0a, 0x4a, 0xa6, 0x5d, 0x25, 0x43, 0xd6, 0xae, 0xa2, 0x88, - 0xae, 0xba, 0x90, 0x22, 0x26, 0xba, 0x12, 0xa6, 0xba, 0xea, 0xc2, 0x08, 0x84, 0xab, 0x2e, 0x0c, - 0xa0, 0xb8, 0xea, 0xc2, 0x29, 0x0b, 0xe1, 0xb0, 0xa6, 0xf6, 0x0e, 0xcc, 0x26, 0xd2, 0x0b, 0x7a, - 0x03, 0xa2, 0x0b, 0x14, 0x77, 0x0f, 0x9d, 0xb0, 0x76, 0x95, 0x2e, 0x5c, 0x10, 0xb8, 0xea, 0xc2, - 0x05, 0x81, 0xd7, 0x3e, 0xcb, 0x40, 0x21, 0x8c, 0xa8, 0x13, 0xe9, 0x46, 0x56, 0x21, 0xdf, 0xc7, - 0x1e, 0xbd, 0x24, 0x91, 0x8a, 0x8b, 0x1a, 0x0e, 0x12, 0x8b, 0x1a, 0x0e, 0x92, 0x6b, 0xae, 0xf4, - 0x23, 0xd5, 0x5c, 0x99, 0x89, 0x6b, 0x2e, 0x4c, 0xcf, 0x55, 0x85, 0xbc, 0x18, 0x1e, 0x6e, 0x3c, - 0x3c, 0xd9, 0x86, 0xa7, 0xae, 0x22, 0x63, 0xe2, 0xd4, 0x55, 0x44, 0xa1, 0x3d, 0x38, 0x2d, 0x1c, - 0xc0, 0xf0, 0xd1, 0x1b, 0xc9, 0x50, 0x33, 0xe3, 0x0f, 0xb1, 0x9b, 0x94, 0x8a, 0xed, 0xc3, 0xbd, - 0x04, 0x54, 0x2c, 0x5a, 0x93, 0x38, 0x12, 0x12, 0x06, 0x6e, 0x07, 0xdd, 0x4d, 0xbe, 0xec, 0xf9, - 0x38, 0x24, 0x44, 0xb8, 0x18, 0x12, 0x22, 0xbc, 0xf6, 0xf7, 0x14, 0xcc, 0xc8, 0xef, 0x7b, 0x22, - 0x81, 0xf1, 0x12, 0x14, 0xf1, 0x81, 0xe9, 0xb7, 0x3a, 0xb6, 0x81, 0x79, 0xe7, 0x46, 0xfd, 0x4c, - 0x80, 0x57, 0x6d, 0x43, 0xf2, 0x73, 0x08, 0x13, 0xa3, 0x29, 0x3d, 0x51, 0x34, 0xc5, 0x93, 0xce, - 0xcc, 0x04, 0x93, 0x4e, 0xa5, 0x9f, 0x8a, 0x27, 0xe3, 0xa7, 0xda, 0x97, 0x29, 0x28, 0x27, 0xd3, - 0xee, 0x77, 0x63, 0x0b, 0xca, 0xbb, 0x29, 0x3d, 0xf1, 0x6e, 0x7a, 0x13, 0xa6, 0x49, 0x65, 0xa6, - 0xfb, 0x3e, 0xbf, 0x53, 0x99, 0xa1, 0xc5, 0x15, 0xcb, 0x46, 0x81, 0xb5, 0x16, 0xc2, 0xa5, 0x6c, - 0x24, 0xc0, 0x47, 0x42, 0x37, 0x7b, 0xcc, 0xd0, 0xfd, 0x24, 0x05, 0xd3, 0x5b, 0xb6, 0x71, 0x97, - 0x15, 0x6d, 0xfe, 0x77, 0x65, 0x3d, 0x9f, 0x64, 0x4a, 0xab, 0xcd, 0xc2, 0xb4, 0x54, 0xb5, 0xd5, - 0x3e, 0x65, 0x71, 0x26, 0x7f, 0xae, 0xfe, 0xfd, 0xd6, 0x65, 0x06, 0xa6, 0xc4, 0xf2, 0xaf, 0xd6, - 0x80, 0xd9, 0x44, 0xb5, 0x26, 0xbe, 0x80, 0x36, 0xc9, 0x0b, 0xd4, 0xae, 0xc1, 0xbc, 0xaa, 0x8c, - 0x11, 0xb2, 0x8e, 0x36, 0xc1, 0xe9, 0xcc, 0x75, 0x98, 0x57, 0x95, 0x23, 0xc7, 0x37, 0xe7, 0x0d, - 0x7e, 0xf2, 0xc9, 0x0a, 0x87, 0xe3, 0xf3, 0xff, 0x29, 0xea, 0x9e, 0xe3, 0xfb, 0xcb, 0x6f, 0x41, - 0xd9, 0x09, 0x1f, 0x5a, 0xbc, 0x47, 0x63, 0xdb, 0x92, 0x76, 0x1c, 0x11, 0x6e, 0x23, 0xd1, 0xac, - 0xcd, 0xc8, 0x18, 0x59, 0x0e, 0xef, 0xdf, 0x72, 0x0a, 0x39, 0xcd, 0x44, 0x23, 0x37, 0x23, 0x63, - 0x84, 0xa5, 0xcd, 0x1f, 0xbd, 0xb4, 0xb4, 0xff, 0xcb, 0x92, 0xa6, 0x79, 0x36, 0x71, 0xbf, 0x1a, - 0x5d, 0x86, 0x02, 0xfd, 0xf1, 0x53, 0xdc, 0xf9, 0xd2, 0xd5, 0xa1, 0x30, 0xc9, 0x80, 0x3c, 0x07, - 0xa1, 0x97, 0xa1, 0x18, 0x5d, 0xb9, 0xe6, 0x67, 0x9e, 0x2c, 0xee, 0x42, 0xa0, 0x14, 0x77, 0x21, - 0x90, 0x37, 0xcd, 0x3f, 0x80, 0xb3, 0x63, 0x2f, 0x5b, 0x1f, 0xa7, 0x07, 0x17, 0xba, 0xdf, 0xcc, - 0xb1, 0xba, 0xdf, 0x03, 0x58, 0x54, 0xdf, 0x81, 0x16, 0xb4, 0xa7, 0x8e, 0xd4, 0x1e, 0xaf, 0x7e, - 0x7a, 0xc2, 0xd5, 0x4f, 0xd5, 0xf6, 0xe8, 0xb8, 0x20, 0xba, 0x6b, 0x8c, 0x2e, 0x41, 0xd6, 0xb1, - 0xed, 0x9e, 0xc7, 0x2f, 0x15, 0x50, 0x75, 0x14, 0x20, 0xaa, 0xa3, 0x80, 0x47, 0x18, 0x4e, 0x04, - 0x61, 0x04, 0xc7, 0x37, 0xa7, 0x9f, 0xc0, 0xea, 0x3e, 0x77, 0x19, 0x0a, 0xe1, 0xc1, 0x2d, 0x02, - 0xc8, 0xbd, 0x73, 0x6f, 0xfd, 0xde, 0xfa, 0xb5, 0xf2, 0x29, 0x54, 0x82, 0xfc, 0xd6, 0xfa, 0xed, - 0x6b, 0x37, 0x6f, 0x5f, 0x2f, 0x6b, 0xe4, 0xa1, 0x79, 0xef, 0xf6, 0x6d, 0xf2, 0x90, 0x7a, 0xee, - 0x96, 0x78, 0x19, 0x8c, 0x57, 0x6e, 0x53, 0x50, 0x58, 0x73, 0x1c, 0x9a, 0x42, 0x18, 0xef, 0xfa, - 0xbe, 0x49, 0x76, 0x72, 0x59, 0x43, 0x79, 0x48, 0xdf, 0xb9, 0xb3, 0x59, 0x4e, 0xa1, 0x79, 0x28, - 0x5f, 0xc3, 0xba, 0xd1, 0x33, 0x2d, 0x1c, 0xe6, 0xad, 0x72, 0xba, 0xf1, 0xe0, 0xf7, 0x5f, 0x2f, - 0x6b, 0x5f, 0x7e, 0xbd, 0xac, 0xfd, 0xed, 0xeb, 0x65, 0xed, 0xb3, 0x6f, 0x96, 0x4f, 0x7d, 0xf9, - 0xcd, 0xf2, 0xa9, 0x3f, 0x7f, 0xb3, 0x7c, 0xea, 0xff, 0x2f, 0x77, 0x4d, 0x7f, 0x37, 0x68, 0xd7, - 0x3b, 0x76, 0x9f, 0xff, 0x8a, 0xd3, 0x71, 0x6d, 0x92, 0x20, 0xf8, 0xd3, 0x6a, 0xf2, 0xe7, 0x9d, - 0xbf, 0x4c, 0x9d, 0x5b, 0xa3, 0x8f, 0x5b, 0x8c, 0xae, 0x7e, 0xd3, 0xae, 0x33, 0x00, 0xfd, 0x41, - 0x9f, 0xd7, 0xce, 0xd1, 0x1f, 0xee, 0xbd, 0xf4, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xfb, 0xb5, - 0x74, 0x30, 0x19, 0x3a, 0x00, 0x00, + // 3666 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x3b, 0x4d, 0x6f, 0x1c, 0xc7, + 0x95, 0xea, 0xf9, 0x9e, 0x37, 0xe4, 0x70, 0x54, 0xfc, 0xd0, 0x88, 0xb6, 0x38, 0xd4, 0xc8, 0xbb, + 0x96, 0x0c, 0x7b, 0x28, 0xcb, 0xeb, 0x85, 0x3f, 0x16, 0x36, 0x38, 0x12, 0x2d, 0x89, 0x12, 0x25, + 0x7a, 0x28, 0x79, 0xb5, 0x0b, 0x03, 0xe3, 0x9e, 0xe9, 0xe2, 0xb0, 0xc5, 0x99, 0xee, 0x76, 0x7f, + 0xd0, 0x24, 0x60, 0xec, 0xda, 0x0b, 0xef, 0x9e, 0x7d, 0x59, 0x20, 0xc8, 0x25, 0xbe, 0xe4, 0xe0, + 0x20, 0x39, 0x26, 0xd7, 0x5c, 0x73, 0x08, 0x02, 0x5f, 0x02, 0x04, 0x41, 0x3c, 0x08, 0x6c, 0xe4, + 0x32, 0x87, 0xfc, 0x86, 0xa0, 0x3e, 0xba, 0xbb, 0xaa, 0xa7, 0x46, 0x24, 0x65, 0x51, 0x70, 0x9c, + 0x13, 0xd9, 0xef, 0xbb, 0xeb, 0xbd, 0x7a, 0xfd, 0xde, 0xab, 0x1a, 0x38, 0xe7, 0xec, 0xf6, 0x56, + 0x74, 0x77, 0xa0, 0x1b, 0x3a, 0xde, 0xc3, 0x96, 0xef, 0xad, 0xb0, 0x3f, 0x0d, 0xc7, 0xb5, 0x7d, + 0x1b, 0x4d, 0x89, 0xa8, 0xc5, 0xfa, 0xee, 0x6b, 0x5e, 0xc3, 0xb4, 0x57, 0x74, 0xc7, 0x5c, 0xe9, + 0xda, 0x2e, 0x5e, 0xd9, 0x7b, 0x79, 0xa5, 0x87, 0x2d, 0xec, 0xea, 0x3e, 0x36, 0x18, 0xc7, 0xe2, + 0x45, 0x81, 0xc6, 0xc2, 0xfe, 0x47, 0xb6, 0xbb, 0x6b, 0x5a, 0x3d, 0x15, 0x65, 0xad, 0x67, 0xdb, + 0xbd, 0x3e, 0x5e, 0xa1, 0x4f, 0x9d, 0x60, 0x7b, 0xc5, 0x37, 0x07, 0xd8, 0xf3, 0xf5, 0x81, 0xc3, + 0x09, 0xfe, 0x25, 0x16, 0x35, 0xd0, 0xbb, 0x3b, 0xa6, 0x85, 0xdd, 0x83, 0x15, 0x6a, 0xaf, 0x63, + 0xae, 0xb8, 0xd8, 0xb3, 0x03, 0xb7, 0x8b, 0xc7, 0xc4, 0xbe, 0x61, 0x5a, 0x3e, 0x76, 0x2d, 0xbd, + 0xbf, 0xe2, 0x75, 0x77, 0xb0, 0x11, 0xf4, 0xb1, 0x1b, 0xff, 0x67, 0x77, 0x1e, 0xe2, 0xae, 0xef, + 0x8d, 0x01, 0x18, 0x6f, 0xfd, 0xeb, 0x79, 0x98, 0x5e, 0x23, 0xef, 0xba, 0x85, 0x3f, 0x0c, 0xb0, + 0xd5, 0xc5, 0xe8, 0x12, 0x64, 0x3f, 0x0c, 0x70, 0x80, 0xab, 0xda, 0xb2, 0x76, 0xb1, 0xd8, 0x9c, + 0x1d, 0x0d, 0x6b, 0x33, 0x14, 0xf0, 0xa2, 0x3d, 0x30, 0x7d, 0x3c, 0x70, 0xfc, 0x83, 0x16, 0xa3, + 0x40, 0x6f, 0xc0, 0xd4, 0x43, 0xbb, 0xd3, 0xf6, 0xb0, 0xdf, 0xb6, 0xf4, 0x01, 0xae, 0xa6, 0x28, + 0x47, 0x75, 0x34, 0xac, 0xcd, 0x3d, 0xb4, 0x3b, 0x5b, 0xd8, 0xbf, 0xa3, 0x0f, 0x44, 0x36, 0x88, + 0xa1, 0xe8, 0x25, 0xc8, 0x07, 0x1e, 0x76, 0xdb, 0xa6, 0x51, 0x4d, 0x53, 0xb6, 0xb9, 0xd1, 0xb0, + 0x56, 0x21, 0xa0, 0x9b, 0x86, 0xc0, 0x92, 0x63, 0x10, 0xf4, 0x22, 0xe4, 0x7a, 0xae, 0x1d, 0x38, + 0x5e, 0x35, 0xb3, 0x9c, 0x0e, 0xa9, 0x19, 0x44, 0xa4, 0x66, 0x10, 0x74, 0x17, 0x72, 0xcc, 0x81, + 0xd5, 0xec, 0x72, 0xfa, 0x62, 0xe9, 0xca, 0xf9, 0x86, 0xe8, 0xd5, 0x86, 0xf4, 0xc2, 0xec, 0x89, + 0x09, 0x64, 0x78, 0x51, 0x20, 0x8f, 0x83, 0x5f, 0xcd, 0x42, 0x96, 0xd2, 0xa1, 0x5b, 0x90, 0xef, + 0xba, 0x98, 0xac, 0x7e, 0x15, 0x2d, 0x6b, 0x17, 0x4b, 0x57, 0x16, 0x1b, 0xcc, 0xab, 0x8d, 0xd0, + 0xab, 0x8d, 0x7b, 0xa1, 0x57, 0x9b, 0xf3, 0xa3, 0x61, 0xed, 0x34, 0x27, 0x17, 0xa4, 0x86, 0x12, + 0xd0, 0x26, 0x14, 0xbd, 0xa0, 0x33, 0x30, 0xfd, 0x75, 0xbb, 0x43, 0xd7, 0xbb, 0x74, 0xe5, 0x8c, + 0x6c, 0xea, 0x56, 0x88, 0x6e, 0x9e, 0x19, 0x0d, 0x6b, 0xb3, 0x11, 0x75, 0x2c, 0xed, 0xc6, 0xa9, + 0x56, 0x2c, 0x04, 0xed, 0xc0, 0x8c, 0x8b, 0x1d, 0xd7, 0xb4, 0x5d, 0xd3, 0x37, 0x3d, 0x4c, 0xe4, + 0xa6, 0xa8, 0xdc, 0x73, 0xb2, 0xdc, 0x96, 0x4c, 0xd4, 0x3c, 0x37, 0x1a, 0xd6, 0xce, 0x26, 0x38, + 0x25, 0x1d, 0x49, 0xb1, 0xc8, 0x07, 0x94, 0x00, 0x6d, 0x61, 0x9f, 0xfa, 0xb2, 0x74, 0x65, 0xf9, + 0x91, 0xca, 0xb6, 0xb0, 0xdf, 0x5c, 0x1e, 0x0d, 0x6b, 0xcf, 0x8e, 0xf3, 0x4b, 0x2a, 0x15, 0xf2, + 0x51, 0x1f, 0x2a, 0x22, 0xd4, 0x20, 0x2f, 0x98, 0xa1, 0x3a, 0x97, 0x26, 0xeb, 0x24, 0x54, 0xcd, + 0xa5, 0xd1, 0xb0, 0xb6, 0x98, 0xe4, 0x95, 0xf4, 0x8d, 0x49, 0x26, 0xfe, 0xe9, 0xea, 0x56, 0x17, + 0xf7, 0x89, 0x9a, 0xac, 0xca, 0x3f, 0x57, 0x43, 0x34, 0xf3, 0x4f, 0x44, 0x2d, 0xfb, 0x27, 0x02, + 0xa3, 0xf7, 0x61, 0x2a, 0x7a, 0x20, 0xeb, 0x95, 0xe3, 0x31, 0xa4, 0x16, 0x4a, 0x56, 0x6a, 0x71, + 0x34, 0xac, 0x2d, 0x88, 0x3c, 0x92, 0x68, 0x49, 0x5a, 0x2c, 0xbd, 0xcf, 0x56, 0x26, 0x3f, 0x59, + 0x3a, 0xa3, 0x10, 0xa5, 0xf7, 0xc7, 0x57, 0x44, 0x92, 0x46, 0xa4, 0x93, 0x0d, 0x1c, 0x74, 0xbb, + 0x18, 0x1b, 0xd8, 0xa8, 0x16, 0x54, 0xd2, 0xd7, 0x05, 0x0a, 0x26, 0x5d, 0xe4, 0x91, 0xa5, 0x8b, + 0x18, 0xb2, 0xd6, 0x0f, 0xed, 0xce, 0x9a, 0xeb, 0xda, 0xae, 0x57, 0x2d, 0xaa, 0xd6, 0x7a, 0x3d, + 0x44, 0xb3, 0xb5, 0x8e, 0xa8, 0xe5, 0xb5, 0x8e, 0xc0, 0xdc, 0xde, 0x56, 0x60, 0xdd, 0xc6, 0xba, + 0x87, 0x8d, 0x2a, 0x4c, 0xb0, 0x37, 0xa2, 0x88, 0xec, 0x8d, 0x20, 0x63, 0xf6, 0x46, 0x18, 0x64, + 0x40, 0x99, 0x3d, 0xaf, 0x7a, 0x9e, 0xd9, 0xb3, 0xb0, 0x51, 0x2d, 0x51, 0xf9, 0xcf, 0xaa, 0xe4, + 0x87, 0x34, 0xcd, 0x67, 0x47, 0xc3, 0x5a, 0x55, 0xe6, 0x93, 0x74, 0x24, 0x64, 0xa2, 0x0f, 0x60, + 0x9a, 0x41, 0x5a, 0x81, 0x65, 0x99, 0x56, 0xaf, 0x3a, 0x45, 0x95, 0x3c, 0xa3, 0x52, 0xc2, 0x49, + 0x9a, 0xcf, 0x8c, 0x86, 0xb5, 0x33, 0x12, 0x97, 0xa4, 0x42, 0x16, 0x48, 0x32, 0x06, 0x03, 0xc4, + 0x8e, 0x9d, 0x56, 0x65, 0x8c, 0x75, 0x99, 0x88, 0x65, 0x8c, 0x04, 0xa7, 0x9c, 0x31, 0x12, 0xc8, + 0xd8, 0x1f, 0xdc, 0xc9, 0xe5, 0xc9, 0xfe, 0xe0, 0x7e, 0x16, 0xfc, 0xa1, 0x70, 0xb5, 0x24, 0x0d, + 0x7d, 0xa2, 0xc1, 0xbc, 0xe7, 0xeb, 0x96, 0xa1, 0xf7, 0x6d, 0x0b, 0xdf, 0xb4, 0x7a, 0x2e, 0xf6, + 0xbc, 0x9b, 0xd6, 0xb6, 0x5d, 0xad, 0x50, 0x3d, 0x17, 0x12, 0x89, 0x55, 0x45, 0xda, 0xbc, 0x30, + 0x1a, 0xd6, 0x6a, 0x4a, 0x29, 0x92, 0x66, 0xb5, 0x22, 0xb4, 0x0f, 0xb3, 0xe1, 0x47, 0xfa, 0xbe, + 0x6f, 0xf6, 0x4d, 0x4f, 0xf7, 0x4d, 0xdb, 0xaa, 0x9e, 0xa6, 0xfa, 0xcf, 0x27, 0xf3, 0xd3, 0x18, + 0x61, 0xf3, 0xfc, 0x68, 0x58, 0x3b, 0xa7, 0x90, 0x20, 0xe9, 0x56, 0xa9, 0x88, 0x9d, 0xb8, 0xe9, + 0x62, 0x42, 0x88, 0x8d, 0xea, 0xec, 0x64, 0x27, 0x46, 0x44, 0xa2, 0x13, 0x23, 0xa0, 0xca, 0x89, + 0x11, 0x92, 0x68, 0x72, 0x74, 0xd7, 0x37, 0x89, 0xda, 0x0d, 0xdd, 0xdd, 0xc5, 0x6e, 0x75, 0x4e, + 0xa5, 0x69, 0x53, 0x26, 0x62, 0x9a, 0x12, 0x9c, 0xb2, 0xa6, 0x04, 0x12, 0x7d, 0xae, 0x81, 0x6c, + 0x9a, 0x69, 0x5b, 0x2d, 0xf2, 0xd1, 0xf6, 0xc8, 0xeb, 0xcd, 0x53, 0xa5, 0xcf, 0x3f, 0xe2, 0xf5, + 0x44, 0xf2, 0xe6, 0xf3, 0xa3, 0x61, 0xed, 0xc2, 0x44, 0x69, 0x92, 0x21, 0x93, 0x95, 0xa2, 0x07, + 0x50, 0x22, 0x48, 0x4c, 0xcb, 0x1f, 0xa3, 0xba, 0x40, 0x6d, 0x38, 0x3b, 0x6e, 0x03, 0x27, 0x68, + 0x9e, 0x1d, 0x0d, 0x6b, 0xf3, 0x02, 0x87, 0xa4, 0x47, 0x14, 0x85, 0x3e, 0xd3, 0x80, 0x04, 0xba, + 0xea, 0x4d, 0xcf, 0x50, 0x2d, 0xcf, 0x8d, 0x69, 0x51, 0xbd, 0xe6, 0x73, 0xa3, 0x61, 0x6d, 0x59, + 0x2d, 0x47, 0xd2, 0x3d, 0x41, 0x57, 0x1c, 0x47, 0xd1, 0x47, 0xa2, 0x5a, 0x9d, 0x1c, 0x47, 0x11, + 0x91, 0x18, 0x47, 0x11, 0x50, 0x15, 0x47, 0x11, 0x92, 0x27, 0x83, 0xf7, 0xf4, 0xbe, 0x69, 0xd0, + 0x62, 0xea, 0xec, 0x84, 0x64, 0x10, 0x51, 0x44, 0xc9, 0x20, 0x82, 0x8c, 0x25, 0x83, 0x98, 0x36, + 0x0f, 0x59, 0x2a, 0xa2, 0xfe, 0xc7, 0x1c, 0xcc, 0x2a, 0xb6, 0x1a, 0xc2, 0x30, 0x1d, 0xee, 0xa3, + 0xb6, 0x49, 0x92, 0x44, 0x5a, 0xb5, 0xca, 0xb7, 0x82, 0x0e, 0x76, 0x2d, 0xec, 0x63, 0x2f, 0x94, + 0x41, 0xb3, 0x04, 0xb5, 0xc4, 0x15, 0x20, 0x42, 0x6d, 0x37, 0x25, 0xc2, 0xd1, 0x8f, 0x35, 0xa8, + 0x0e, 0xf4, 0xfd, 0x76, 0x08, 0xf4, 0xda, 0xdb, 0xb6, 0xdb, 0x76, 0xb0, 0x6b, 0xda, 0x06, 0xad, + 0x64, 0x4b, 0x57, 0xfe, 0xed, 0xd0, 0xbc, 0xd0, 0xd8, 0xd0, 0xf7, 0x43, 0xb0, 0xf7, 0x8e, 0xed, + 0x6e, 0x52, 0xf6, 0x35, 0xcb, 0x77, 0x0f, 0x58, 0xc2, 0x1a, 0xa8, 0xf0, 0x82, 0x4d, 0xf3, 0x4a, + 0x02, 0xf4, 0xff, 0x1a, 0x2c, 0xf8, 0xb6, 0xaf, 0xf7, 0xdb, 0xdd, 0x60, 0x10, 0xf4, 0x75, 0xdf, + 0xdc, 0xc3, 0xed, 0xc0, 0xd3, 0x7b, 0x98, 0x97, 0xcd, 0x6f, 0x1e, 0x6e, 0xda, 0x3d, 0xc2, 0x7f, + 0x35, 0x62, 0xbf, 0x4f, 0xb8, 0x99, 0x65, 0xf5, 0xd1, 0xb0, 0xb6, 0xe4, 0x2b, 0xd0, 0x82, 0x61, + 0x73, 0x2a, 0x3c, 0x7a, 0x01, 0x72, 0xa4, 0xad, 0x30, 0x0d, 0x5a, 0x1d, 0xf1, 0x16, 0xe4, 0xa1, + 0xdd, 0x91, 0x1a, 0x83, 0x2c, 0x05, 0x10, 0x5a, 0x37, 0xb0, 0x08, 0x6d, 0x3e, 0xa6, 0x75, 0x03, + 0x4b, 0xa6, 0xa5, 0x80, 0xc5, 0x2f, 0x34, 0x58, 0x9c, 0xbc, 0x94, 0xe8, 0x02, 0xa4, 0x77, 0xf1, + 0x01, 0x6f, 0x7b, 0x4e, 0x8f, 0x86, 0xb5, 0xe9, 0x5d, 0x7c, 0x20, 0x48, 0x21, 0x58, 0xf4, 0x1f, + 0x90, 0xdd, 0xd3, 0xfb, 0x01, 0xe6, 0x55, 0x75, 0xa3, 0xc1, 0x3a, 0xb6, 0x86, 0xd8, 0xb1, 0x35, + 0x9c, 0xdd, 0x1e, 0x01, 0x34, 0x42, 0xaf, 0x37, 0xde, 0x0d, 0x74, 0xcb, 0x37, 0xfd, 0x03, 0x66, + 0x1e, 0x15, 0x20, 0x9a, 0x47, 0x01, 0x6f, 0xa4, 0x5e, 0xd3, 0x16, 0x7f, 0xa2, 0xc1, 0xd9, 0x89, + 0x4b, 0xfa, 0x7d, 0xb0, 0x70, 0x3d, 0x53, 0xd0, 0x2a, 0xa9, 0xf5, 0x4c, 0x21, 0x55, 0x49, 0xd7, + 0x7f, 0x9e, 0x87, 0x62, 0xd4, 0xa0, 0xa0, 0x1b, 0x50, 0x31, 0xb0, 0x11, 0x38, 0x7d, 0xb3, 0x4b, + 0x63, 0x83, 0x38, 0x85, 0x75, 0x84, 0x34, 0x3b, 0x48, 0x38, 0xc9, 0x3d, 0x33, 0x09, 0x14, 0xba, + 0x02, 0x05, 0x5e, 0x88, 0x1f, 0xd0, 0x7d, 0x39, 0xdd, 0x5c, 0x18, 0x0d, 0x6b, 0x28, 0x84, 0x09, + 0xac, 0x11, 0x1d, 0x6a, 0x01, 0xb0, 0xce, 0x76, 0x03, 0xfb, 0x3a, 0x6f, 0x09, 0xaa, 0x72, 0xfc, + 0xde, 0x8d, 0xf0, 0xac, 0x47, 0x8d, 0xe9, 0xc5, 0x1e, 0x35, 0x86, 0xa2, 0xf7, 0x01, 0x06, 0xba, + 0x69, 0x31, 0x3e, 0x5e, 0xff, 0xd7, 0x27, 0x65, 0x88, 0x8d, 0x88, 0x92, 0x49, 0x8f, 0x39, 0x45, + 0xe9, 0x31, 0x14, 0xdd, 0x85, 0x3c, 0xef, 0xc5, 0xab, 0x39, 0xba, 0xdd, 0x96, 0x26, 0x89, 0xe6, + 0x62, 0x69, 0x37, 0xc9, 0x59, 0xc4, 0x6e, 0x92, 0x83, 0xc8, 0xb2, 0xf5, 0xcd, 0x6d, 0xec, 0x9b, + 0x03, 0x4c, 0x77, 0x03, 0x5f, 0xb6, 0x10, 0x26, 0x2e, 0x5b, 0x08, 0x43, 0xaf, 0x01, 0xe8, 0xfe, + 0x86, 0xed, 0xf9, 0x77, 0xad, 0x2e, 0xa6, 0x15, 0x7d, 0x81, 0x99, 0x1f, 0x43, 0x45, 0xf3, 0x63, + 0x28, 0x7a, 0x13, 0x4a, 0x0e, 0xff, 0x82, 0x74, 0xfa, 0x98, 0x56, 0xec, 0x05, 0xf6, 0xc1, 0x13, + 0xc0, 0x02, 0xaf, 0x48, 0x8d, 0xae, 0xc3, 0x4c, 0xd7, 0xb6, 0xba, 0x81, 0xeb, 0x62, 0xab, 0x7b, + 0xb0, 0xa5, 0x6f, 0x63, 0x5a, 0x9d, 0x17, 0x58, 0xa8, 0x24, 0x50, 0x62, 0xa8, 0x24, 0x50, 0xe8, + 0x55, 0x28, 0x46, 0x93, 0x0d, 0x5a, 0x80, 0x17, 0x79, 0xa3, 0x1c, 0x02, 0x05, 0xe6, 0x98, 0x92, + 0x18, 0x6f, 0x7a, 0xd7, 0x78, 0xd0, 0x61, 0x5a, 0x54, 0x73, 0xe3, 0x05, 0xb0, 0x68, 0xbc, 0x00, + 0x16, 0xf2, 0x53, 0xf9, 0xd0, 0xfc, 0xf4, 0x5f, 0x30, 0x8f, 0xf7, 0x49, 0xbe, 0x1f, 0x60, 0xcb, + 0xd7, 0xfb, 0x9b, 0xae, 0xc9, 0xbe, 0x0c, 0xd5, 0x19, 0x55, 0x51, 0xba, 0xa6, 0x22, 0x65, 0x39, + 0x5e, 0x29, 0x45, 0xcc, 0xf1, 0x4a, 0x82, 0x68, 0xbb, 0x4e, 0x57, 0xca, 0xf5, 0x5b, 0x30, 0xaf, + 0x54, 0x40, 0x02, 0xa7, 0x63, 0x1a, 0xf4, 0x99, 0x26, 0x17, 0x8d, 0x05, 0x4e, 0x08, 0x13, 0x03, + 0x27, 0x84, 0xd5, 0x7f, 0xab, 0xc1, 0x9c, 0x2a, 0xf8, 0x13, 0x1b, 0x51, 0x7b, 0x22, 0x1b, 0xf1, + 0x3d, 0x28, 0x38, 0xb6, 0xd1, 0xf6, 0x1c, 0xdc, 0xe5, 0x69, 0x2d, 0xb1, 0x0d, 0x37, 0x6d, 0x63, + 0xcb, 0xc1, 0xdd, 0x7f, 0x37, 0xfd, 0x9d, 0xd5, 0x3d, 0xdb, 0x34, 0x6e, 0x9b, 0x1e, 0xdf, 0x2f, + 0x0e, 0xc3, 0x48, 0xb5, 0x42, 0x9e, 0x03, 0x9b, 0x05, 0xc8, 0x31, 0x2d, 0xf5, 0xdf, 0xa5, 0xa1, + 0x92, 0xdc, 0x70, 0x7f, 0x4f, 0xaf, 0x82, 0x1e, 0x40, 0xde, 0x64, 0xad, 0x08, 0x2f, 0x65, 0xfe, + 0x49, 0x48, 0xfc, 0x8d, 0x78, 0x2e, 0xd9, 0xd8, 0x7b, 0xb9, 0xc1, 0x7b, 0x16, 0xba, 0x04, 0x54, + 0x32, 0xe7, 0x94, 0x25, 0x73, 0x20, 0x6a, 0x41, 0xde, 0xc3, 0xee, 0x1e, 0x09, 0x0e, 0x96, 0x56, + 0x6b, 0xa2, 0xe4, 0xae, 0xed, 0x62, 0x22, 0x73, 0x8b, 0x91, 0xc4, 0x32, 0x39, 0x8f, 0x2c, 0x93, + 0x03, 0xd1, 0x7b, 0x50, 0xec, 0xda, 0xd6, 0xb6, 0xd9, 0xdb, 0xd0, 0x1d, 0x9e, 0x58, 0xcf, 0xa9, + 0xa4, 0x5e, 0x0d, 0x89, 0xf8, 0x78, 0x25, 0x7c, 0x4c, 0x8c, 0x57, 0x22, 0xaa, 0xd8, 0xa1, 0x7f, + 0xcd, 0x00, 0xc4, 0xce, 0x41, 0xaf, 0x43, 0x09, 0xef, 0xe3, 0x6e, 0xe0, 0xdb, 0x74, 0xe4, 0xa8, + 0xc5, 0x93, 0xca, 0x10, 0x2c, 0xed, 0x5e, 0x88, 0xa1, 0x24, 0xc5, 0x58, 0xfa, 0x00, 0x7b, 0x8e, + 0xde, 0x0d, 0x47, 0x9c, 0xd4, 0x98, 0x08, 0x28, 0xa6, 0x98, 0x08, 0x88, 0xfe, 0x19, 0x32, 0x74, + 0x28, 0xca, 0xa6, 0x9b, 0x68, 0x34, 0xac, 0x95, 0x2d, 0x79, 0x1c, 0x4a, 0xf1, 0xe8, 0x6d, 0x98, + 0xde, 0x8d, 0x02, 0x8f, 0xd8, 0x96, 0xa1, 0x0c, 0xb4, 0xc6, 0x8c, 0x11, 0x92, 0x75, 0x53, 0x22, + 0x1c, 0x6d, 0x43, 0x49, 0xb7, 0x2c, 0xdb, 0xa7, 0x5f, 0xcf, 0x70, 0xe2, 0x79, 0x69, 0x52, 0x98, + 0x36, 0x56, 0x63, 0x5a, 0x56, 0xa8, 0xd1, 0xb4, 0x27, 0x48, 0x10, 0xd3, 0x9e, 0x00, 0x46, 0x2d, + 0xc8, 0xf5, 0xf5, 0x0e, 0xee, 0x87, 0x9f, 0xab, 0xe7, 0x26, 0xaa, 0xb8, 0x4d, 0xc9, 0x98, 0x74, + 0x3a, 0x57, 0x65, 0x7c, 0xe2, 0x5c, 0x95, 0x41, 0x16, 0xb7, 0xa1, 0x92, 0xb4, 0xe7, 0x68, 0x55, + 0xce, 0x25, 0xb1, 0xca, 0x29, 0x1e, 0x5a, 0x57, 0xe9, 0x50, 0x12, 0x8c, 0x3a, 0x09, 0x15, 0xf5, + 0x2f, 0x35, 0x98, 0x53, 0xed, 0x5d, 0xb4, 0x21, 0xec, 0x78, 0x8d, 0x4f, 0x6f, 0x14, 0xa1, 0xce, + 0x79, 0x27, 0x6c, 0xf5, 0x78, 0xa3, 0x37, 0xa1, 0x6c, 0xd9, 0x06, 0x6e, 0xeb, 0x44, 0x41, 0xdf, + 0xf4, 0xfc, 0x6a, 0x8a, 0x4e, 0xc4, 0xe9, 0xd4, 0x87, 0x60, 0x56, 0x43, 0x84, 0xc0, 0x3d, 0x2d, + 0x21, 0xea, 0x1f, 0xc1, 0x4c, 0x62, 0x26, 0x2b, 0xd5, 0x5c, 0xa9, 0x23, 0xd6, 0x5c, 0xf1, 0x87, + 0x30, 0x7d, 0xd8, 0x87, 0x90, 0x7d, 0x88, 0xea, 0xff, 0x9b, 0x82, 0x92, 0xd0, 0x20, 0xa3, 0x87, + 0x30, 0xc3, 0x3f, 0xca, 0xa6, 0xd5, 0x63, 0x8d, 0x58, 0x8a, 0x7f, 0x18, 0xc7, 0x0e, 0x2c, 0xd6, + 0xed, 0xce, 0x56, 0x44, 0x4b, 0x3f, 0x8c, 0x74, 0x98, 0xe6, 0x49, 0x30, 0x41, 0x71, 0x59, 0xc6, + 0xa0, 0x07, 0xb0, 0x10, 0x38, 0xa4, 0x3d, 0x6c, 0x7b, 0x7c, 0xf4, 0xdf, 0xb6, 0x82, 0x41, 0x07, + 0xbb, 0xd4, 0xfa, 0x2c, 0x6b, 0x58, 0x18, 0x45, 0x78, 0x36, 0x70, 0x87, 0xe2, 0xc5, 0x86, 0x45, + 0x85, 0x17, 0xd6, 0x21, 0x73, 0xc4, 0x75, 0xb8, 0x01, 0x68, 0x7c, 0x28, 0x2e, 0xf9, 0x40, 0x3b, + 0x9a, 0x0f, 0xea, 0xfb, 0x50, 0x49, 0x8e, 0xba, 0x9f, 0x92, 0x2f, 0x77, 0xa1, 0x18, 0x0d, 0xaa, + 0xd1, 0x8b, 0x90, 0x73, 0xb1, 0xee, 0xd9, 0x16, 0xdf, 0x2d, 0x74, 0xdb, 0x33, 0x88, 0xb8, 0xed, + 0x19, 0xe4, 0x31, 0x94, 0xdd, 0x83, 0x29, 0xb6, 0x48, 0xef, 0x98, 0x7d, 0x1f, 0xbb, 0xe8, 0x1a, + 0xe4, 0x3c, 0x5f, 0xf7, 0xb1, 0x57, 0xd5, 0x96, 0xd3, 0x17, 0xcb, 0x57, 0x16, 0xc6, 0xa7, 0xd0, + 0x04, 0xcd, 0xec, 0x60, 0x94, 0xa2, 0x1d, 0x0c, 0x52, 0xff, 0x1f, 0x0d, 0xa6, 0xc4, 0x61, 0xfb, + 0x93, 0x11, 0x7b, 0xbc, 0xc5, 0xa8, 0x5b, 0xa1, 0x0d, 0x7c, 0xcc, 0x7e, 0xd2, 0x4b, 0xf9, 0xa5, + 0xc6, 0xd6, 0x32, 0x9a, 0xcb, 0xf6, 0xe2, 0x59, 0x08, 0xd9, 0x28, 0x1e, 0x4d, 0x28, 0x47, 0x9d, + 0x85, 0xd0, 0xb4, 0x23, 0xb1, 0x8b, 0x69, 0x47, 0x42, 0x3c, 0x86, 0xad, 0x5f, 0x64, 0xa9, 0xad, + 0xf1, 0xd4, 0x3d, 0xf1, 0x1d, 0x4f, 0x1f, 0xe3, 0x3b, 0xfe, 0x12, 0xe4, 0x69, 0xe2, 0x8c, 0xb6, + 0x29, 0x5d, 0x58, 0x02, 0x92, 0x4f, 0x1c, 0x19, 0xe4, 0x11, 0xe9, 0x22, 0xfb, 0x1d, 0xd3, 0x45, + 0x1b, 0xce, 0xee, 0xe8, 0x5e, 0x3b, 0x4c, 0x70, 0x46, 0x5b, 0xf7, 0xdb, 0xd1, 0x7e, 0xcd, 0xd1, + 0x56, 0x84, 0xce, 0xf1, 0x76, 0x74, 0x6f, 0x2b, 0xa4, 0x59, 0xf5, 0x37, 0xc7, 0x77, 0xef, 0x82, + 0x9a, 0x02, 0xdd, 0x87, 0x79, 0xb5, 0xf0, 0x3c, 0xb5, 0x9c, 0x8e, 0x99, 0xbd, 0x47, 0x4a, 0x9e, + 0x55, 0xa0, 0xd1, 0xa7, 0x1a, 0x54, 0xc9, 0x97, 0xcc, 0xc5, 0x1f, 0x06, 0xa6, 0x8b, 0x49, 0x17, + 0xe1, 0xb5, 0xed, 0x3d, 0xec, 0xf6, 0xf5, 0x03, 0x7e, 0x62, 0x73, 0x7e, 0x3c, 0x6d, 0x6f, 0xda, + 0x46, 0x4b, 0x60, 0x60, 0xaf, 0xe6, 0xc8, 0xc0, 0xbb, 0x4c, 0x88, 0xf8, 0x6a, 0x6a, 0x0a, 0x21, + 0x84, 0xe0, 0x18, 0xb3, 0xa1, 0xd2, 0x61, 0xb3, 0x21, 0x52, 0xad, 0x39, 0xb6, 0xdd, 0xa7, 0x9d, + 0x20, 0xaf, 0xd6, 0xc8, 0xb3, 0x58, 0xad, 0x91, 0x67, 0x71, 0xfc, 0xb1, 0x9e, 0x29, 0x14, 0x2a, + 0xc5, 0xfa, 0xd7, 0x1a, 0x94, 0xe5, 0x43, 0x9e, 0xf1, 0x0d, 0x95, 0x3e, 0xf1, 0x0d, 0x95, 0x39, + 0xc6, 0x6a, 0x64, 0x0f, 0x5b, 0x0d, 0x69, 0xc8, 0xf3, 0x27, 0x0d, 0xa6, 0xa5, 0xf3, 0xa5, 0x1f, + 0xd6, 0xeb, 0xfd, 0x28, 0x05, 0x0b, 0x6a, 0x53, 0x4f, 0xa4, 0xfd, 0xbb, 0x01, 0xa4, 0x90, 0xbb, + 0x19, 0x17, 0x3a, 0xf3, 0x63, 0xdd, 0x1f, 0x5d, 0xa6, 0xb0, 0x0a, 0x1c, 0x3b, 0x7a, 0x0a, 0xd9, + 0xd1, 0x03, 0x28, 0x99, 0xc2, 0x21, 0x57, 0x5a, 0x75, 0x16, 0x21, 0x1e, 0x6d, 0xb1, 0xe9, 0xc6, + 0x84, 0x03, 0x2d, 0x51, 0x54, 0x33, 0x07, 0x19, 0x52, 0x89, 0xd5, 0xf7, 0x20, 0xcf, 0xcd, 0x41, + 0xaf, 0x40, 0x91, 0xe6, 0x4e, 0xda, 0xd1, 0xb0, 0xb2, 0x99, 0x96, 0x14, 0x04, 0x98, 0xb8, 0xe4, + 0x51, 0x08, 0x61, 0xe8, 0x5f, 0x01, 0x48, 0xba, 0xe0, 0x59, 0x33, 0x45, 0x73, 0x0f, 0xed, 0x9c, + 0x1c, 0xdb, 0x18, 0x4b, 0x95, 0xc5, 0x08, 0x58, 0xff, 0x45, 0x0a, 0x4a, 0xe2, 0xb1, 0xda, 0x63, + 0x29, 0xff, 0x18, 0xc2, 0xae, 0xb6, 0xad, 0x1b, 0x06, 0xf9, 0x8b, 0xc3, 0x0f, 0xdb, 0xca, 0xc4, + 0x45, 0x0a, 0xff, 0x5f, 0x0d, 0x39, 0x58, 0x0f, 0x43, 0xaf, 0x0e, 0x98, 0x09, 0x94, 0xa0, 0xb5, + 0x92, 0xc4, 0x2d, 0xee, 0xc2, 0xbc, 0x52, 0x94, 0xd8, 0x79, 0x64, 0x9f, 0x54, 0xe7, 0xf1, 0xd3, + 0x2c, 0xcc, 0x2b, 0x8f, 0x33, 0x13, 0x11, 0x9c, 0x7e, 0x22, 0x11, 0xfc, 0x7f, 0x9a, 0x6a, 0x65, + 0xd9, 0x59, 0xc6, 0xeb, 0x47, 0x38, 0x63, 0x7d, 0x52, 0x6b, 0x2c, 0x87, 0x45, 0xf6, 0xb1, 0x62, + 0x32, 0x77, 0xd4, 0x98, 0x44, 0x97, 0x59, 0x13, 0x47, 0x75, 0xb1, 0x93, 0x86, 0x70, 0x87, 0x26, + 0x54, 0xe5, 0x39, 0x88, 0xf4, 0xf5, 0x21, 0x07, 0x1b, 0x1d, 0x14, 0xe2, 0xbe, 0x9e, 0xd3, 0x24, + 0xa7, 0x07, 0x53, 0x22, 0x5c, 0xc8, 0x7e, 0xc5, 0x63, 0x64, 0x3f, 0x38, 0xf4, 0x18, 0xe4, 0x69, + 0xc6, 0xa6, 0x94, 0x6a, 0x87, 0x1a, 0xcc, 0x24, 0x6e, 0x11, 0xfc, 0xb0, 0xbe, 0x25, 0x9f, 0x68, + 0x50, 0x8c, 0x2e, 0xa9, 0xa0, 0x55, 0xc8, 0x61, 0x76, 0xd1, 0x81, 0xa5, 0x9d, 0xd9, 0xc4, 0xac, + 0x97, 0xe0, 0xf8, 0xb5, 0xb3, 0xc4, 0xdd, 0x86, 0x16, 0x67, 0x7c, 0x8c, 0x82, 0xf9, 0x97, 0x5a, + 0x58, 0x30, 0x8f, 0x59, 0x91, 0xfe, 0xee, 0x56, 0x9c, 0xdc, 0xd2, 0xfd, 0xba, 0x08, 0x59, 0x6a, + 0x0b, 0x69, 0x5e, 0x7d, 0xec, 0x0e, 0x4c, 0x4b, 0xef, 0xd3, 0x50, 0x2c, 0xb0, 0x5d, 0x1d, 0xc2, + 0xc4, 0x5d, 0x1d, 0xc2, 0xd0, 0x0e, 0xcc, 0xc4, 0x23, 0x31, 0x2a, 0x46, 0x7d, 0xeb, 0xed, 0x96, + 0x4c, 0xc4, 0x4e, 0x1b, 0x12, 0x9c, 0xf2, 0xb1, 0x75, 0x02, 0x89, 0x0c, 0x28, 0x77, 0x6d, 0xcb, + 0xd7, 0x4d, 0x0b, 0xbb, 0x4c, 0x51, 0x5a, 0x75, 0xeb, 0xe7, 0xaa, 0x44, 0xc3, 0x06, 0x15, 0x32, + 0x9f, 0x7c, 0xeb, 0x47, 0xc6, 0xa1, 0x0f, 0x60, 0x3a, 0x6c, 0x5c, 0x98, 0x92, 0x8c, 0xea, 0xd6, + 0xcf, 0x9a, 0x48, 0xc2, 0x36, 0x83, 0xc4, 0x25, 0xdf, 0xfa, 0x91, 0x50, 0xa8, 0x0f, 0x15, 0xc7, + 0x36, 0xee, 0x5b, 0xbc, 0x5c, 0xd7, 0x3b, 0x7d, 0xcc, 0xe7, 0xb0, 0x4b, 0x63, 0x05, 0x89, 0x44, + 0xc5, 0x12, 0x75, 0x92, 0x57, 0xbe, 0x47, 0x97, 0xc4, 0xa2, 0xf7, 0x61, 0xaa, 0x4f, 0xfa, 0xb7, + 0xb5, 0x7d, 0xc7, 0x74, 0xb1, 0xa1, 0xbe, 0xf5, 0x76, 0x5b, 0xa0, 0x60, 0x69, 0x52, 0xe4, 0x91, + 0x0f, 0xfb, 0x45, 0x0c, 0xf1, 0xfe, 0x40, 0xdf, 0x6f, 0x05, 0x96, 0xb7, 0xb6, 0xcf, 0x6f, 0x30, + 0xe5, 0x55, 0xde, 0xdf, 0x90, 0x89, 0x98, 0xf7, 0x13, 0x9c, 0xb2, 0xf7, 0x13, 0x48, 0x74, 0x9b, + 0x7e, 0x05, 0x98, 0x4b, 0xd8, 0xed, 0xb7, 0x85, 0xb1, 0xd5, 0x62, 0xde, 0x60, 0x03, 0x17, 0xfe, + 0x24, 0x09, 0x8d, 0x24, 0x70, 0x1f, 0xd0, 0xd7, 0x6e, 0x61, 0x3f, 0x70, 0x2d, 0x6c, 0xf0, 0x36, + 0x6a, 0xdc, 0x07, 0x12, 0x55, 0xe4, 0x03, 0x09, 0x3a, 0xe6, 0x03, 0x09, 0x8b, 0x3e, 0x86, 0xb9, + 0xc4, 0x5d, 0x1e, 0xf6, 0x1e, 0x25, 0xd5, 0x21, 0xc4, 0xba, 0x82, 0x92, 0x75, 0xbc, 0x2a, 0x19, + 0x92, 0x66, 0xa5, 0x16, 0xa2, 0xbd, 0xa7, 0x5b, 0xbd, 0x75, 0xbb, 0x23, 0xc7, 0xdc, 0x94, 0x4a, + 0xfb, 0x75, 0x05, 0x25, 0xd3, 0xae, 0x92, 0x21, 0x6b, 0x57, 0x51, 0x44, 0xf7, 0x76, 0x48, 0x11, + 0x13, 0xdd, 0x6f, 0x53, 0xdd, 0xdb, 0x61, 0x04, 0xc2, 0xbd, 0x1d, 0x06, 0x50, 0xdc, 0xdb, 0xe1, + 0x94, 0x85, 0x70, 0x58, 0x53, 0x7f, 0x17, 0x66, 0x12, 0xe9, 0x05, 0xbd, 0x05, 0xd1, 0x6d, 0x90, + 0x7b, 0x07, 0x4e, 0x58, 0xbb, 0x4a, 0xb7, 0x47, 0x08, 0x5c, 0x75, 0x7b, 0x84, 0xc0, 0xeb, 0x9f, + 0x67, 0xa0, 0x10, 0x46, 0xd4, 0x89, 0x74, 0x23, 0x2b, 0x90, 0x1f, 0x60, 0x8f, 0xde, 0xf8, 0x48, + 0xc5, 0x45, 0x0d, 0x07, 0x89, 0x45, 0x0d, 0x07, 0xc9, 0x35, 0x57, 0xfa, 0xb1, 0x6a, 0xae, 0xcc, + 0x91, 0x6b, 0x2e, 0x4c, 0x0f, 0x89, 0x85, 0xbc, 0x18, 0x1e, 0x6e, 0x3c, 0x3a, 0xd9, 0x86, 0x47, + 0xc8, 0x22, 0x63, 0xe2, 0x08, 0x59, 0x44, 0xa1, 0x5d, 0x38, 0x2d, 0x1c, 0xc0, 0xf0, 0xd1, 0x1b, + 0xc9, 0x50, 0xe5, 0xc9, 0x27, 0xf2, 0x2d, 0x4a, 0xc5, 0xf6, 0xe1, 0x6e, 0x02, 0x2a, 0x16, 0xad, + 0x49, 0x1c, 0x09, 0x09, 0x03, 0x77, 0x82, 0xde, 0x06, 0x5f, 0xf6, 0x7c, 0x1c, 0x12, 0x22, 0x5c, + 0x0c, 0x09, 0x11, 0x5e, 0xff, 0x4b, 0x0a, 0xca, 0xf2, 0xfb, 0x9e, 0x48, 0x60, 0xbc, 0x02, 0x45, + 0xbc, 0x6f, 0xfa, 0xed, 0xae, 0x6d, 0x60, 0xde, 0xb9, 0x51, 0x3f, 0x13, 0xe0, 0x55, 0xdb, 0x90, + 0xfc, 0x1c, 0xc2, 0xc4, 0x68, 0x4a, 0x1f, 0x29, 0x9a, 0xe2, 0x49, 0x67, 0xe6, 0x08, 0x93, 0x4e, + 0xa5, 0x9f, 0x8a, 0x27, 0xe3, 0xa7, 0xfa, 0x57, 0x29, 0xa8, 0x24, 0xd3, 0xee, 0xf7, 0x63, 0x0b, + 0xca, 0xbb, 0x29, 0x7d, 0xe4, 0xdd, 0xf4, 0x36, 0x4c, 0x93, 0xca, 0x4c, 0xf7, 0x7d, 0x7e, 0x41, + 0x34, 0x43, 0x8b, 0x2b, 0x96, 0x8d, 0x02, 0x6b, 0x35, 0x84, 0x4b, 0xd9, 0x48, 0x80, 0x8f, 0x85, + 0x6e, 0xf6, 0x98, 0xa1, 0xfb, 0x69, 0x0a, 0xa6, 0x37, 0x6d, 0xe3, 0x1e, 0x2b, 0xda, 0xfc, 0xef, + 0xcb, 0x7a, 0x3e, 0xcd, 0x94, 0x56, 0x9f, 0x81, 0x69, 0xa9, 0x6a, 0xab, 0x7f, 0xc6, 0xe2, 0x4c, + 0xfe, 0x5c, 0xfd, 0xe3, 0xad, 0x4b, 0x19, 0xa6, 0xc4, 0xf2, 0xaf, 0xde, 0x84, 0x99, 0x44, 0xb5, + 0x26, 0xbe, 0x80, 0x76, 0x94, 0x17, 0xa8, 0x5f, 0x83, 0x39, 0x55, 0x19, 0x23, 0x64, 0x1d, 0xed, + 0x08, 0xa7, 0x33, 0xd7, 0x61, 0x4e, 0x55, 0x8e, 0x1c, 0xdf, 0x9c, 0xb7, 0xf8, 0xc9, 0x27, 0x2b, + 0x1c, 0x8e, 0xcf, 0xff, 0xfb, 0xa8, 0x7b, 0x8e, 0x2f, 0x63, 0xbf, 0x03, 0x15, 0x27, 0x7c, 0x68, + 0xf3, 0x1e, 0x8d, 0x6d, 0x4b, 0xda, 0x71, 0x44, 0xb8, 0xf5, 0x44, 0xb3, 0x56, 0x96, 0x31, 0xb2, + 0x1c, 0xde, 0xbf, 0xe5, 0x14, 0x72, 0x5a, 0x89, 0x46, 0xae, 0x2c, 0x63, 0x84, 0xa5, 0xcd, 0x1f, + 0xbe, 0xb4, 0xb4, 0xff, 0xcb, 0x92, 0xa6, 0x79, 0x26, 0x71, 0x59, 0x1c, 0x5d, 0x86, 0x02, 0xfd, + 0x25, 0x57, 0xdc, 0xf9, 0xd2, 0xd5, 0xa1, 0x30, 0xc9, 0x80, 0x3c, 0x07, 0xa1, 0x57, 0xa1, 0x18, + 0xdd, 0x1f, 0xe7, 0x67, 0x9e, 0x2c, 0xee, 0x42, 0xa0, 0x14, 0x77, 0x21, 0x90, 0x37, 0xcd, 0xff, + 0x0d, 0x67, 0x27, 0xde, 0x1c, 0x3f, 0x4e, 0x0f, 0x2e, 0x74, 0xbf, 0x99, 0x63, 0x75, 0xbf, 0xfb, + 0xb0, 0xa0, 0xbe, 0xd0, 0x2d, 0x68, 0x4f, 0x1d, 0xaa, 0x3d, 0x5e, 0xfd, 0xf4, 0x11, 0x57, 0x3f, + 0x55, 0xdf, 0xa5, 0xe3, 0x82, 0xe8, 0xe2, 0x34, 0xba, 0x04, 0x59, 0xc7, 0xb6, 0xfb, 0x1e, 0xbf, + 0x54, 0x40, 0xd5, 0x51, 0x80, 0xa8, 0x8e, 0x02, 0x1e, 0x63, 0x38, 0x11, 0x84, 0x11, 0x1c, 0x5f, + 0x03, 0x7f, 0x0a, 0xab, 0xfb, 0xc2, 0x65, 0x28, 0x84, 0x07, 0xb7, 0x08, 0x20, 0xf7, 0xee, 0xfd, + 0xb5, 0xfb, 0x6b, 0xd7, 0x2a, 0xa7, 0x50, 0x09, 0xf2, 0x9b, 0x6b, 0x77, 0xae, 0xdd, 0xbc, 0x73, + 0xbd, 0xa2, 0x91, 0x87, 0xd6, 0xfd, 0x3b, 0x77, 0xc8, 0x43, 0xea, 0x85, 0xdb, 0xe2, 0x65, 0x30, + 0x5e, 0xb9, 0x4d, 0x41, 0x61, 0xd5, 0x71, 0x68, 0x0a, 0x61, 0xbc, 0x6b, 0x7b, 0x26, 0xd9, 0xc9, + 0x15, 0x0d, 0xe5, 0x21, 0x7d, 0xf7, 0xee, 0x46, 0x25, 0x85, 0xe6, 0xa0, 0x72, 0x0d, 0xeb, 0x46, + 0xdf, 0xb4, 0x70, 0x98, 0xb7, 0x2a, 0xe9, 0xe6, 0xc3, 0xdf, 0x7c, 0xb3, 0xa4, 0x7d, 0xf5, 0xcd, + 0x92, 0xf6, 0xe7, 0x6f, 0x96, 0xb4, 0xcf, 0xbf, 0x5d, 0x3a, 0xf5, 0xd5, 0xb7, 0x4b, 0xa7, 0xfe, + 0xf0, 0xed, 0xd2, 0xa9, 0xff, 0xbc, 0xdc, 0x33, 0xfd, 0x9d, 0xa0, 0xd3, 0xe8, 0xda, 0x03, 0xfe, + 0x93, 0x54, 0xc7, 0xb5, 0x49, 0x82, 0xe0, 0x4f, 0x2b, 0xc9, 0xdf, 0xaa, 0xfe, 0x2c, 0x75, 0x6e, + 0x95, 0x3e, 0x6e, 0x32, 0xba, 0xc6, 0x4d, 0xbb, 0xc1, 0x00, 0xf4, 0xd7, 0x89, 0x5e, 0x27, 0x47, + 0x7f, 0x85, 0xf8, 0xca, 0xdf, 0x02, 0x00, 0x00, 0xff, 0xff, 0xf9, 0x44, 0xa5, 0x08, 0xe6, 0x3a, + 0x00, 0x00, } func (m *EventSequence) Marshal() (dAtA []byte, err error) { @@ -4608,6 +4667,18 @@ func (m *SubmitJob) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ExperimentalPriceInfo != nil { + { + size, err := m.ExperimentalPriceInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvents(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } if len(m.JobId) > 0 { i -= len(m.JobId) copy(dAtA[i:], m.JobId) @@ -4720,6 +4791,35 @@ func (m *SubmitJob) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ExperimentalPriceInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExperimentalPriceInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExperimentalPriceInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.BidPrice != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.BidPrice)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + func (m *KubernetesMainObject) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -5249,20 +5349,20 @@ func (m *JobSetFilter) MarshalToSizedBuffer(dAtA []byte) (int, error) { var l int _ = l if len(m.States) > 0 { - dAtA40 := make([]byte, len(m.States)*10) - var j39 int + dAtA41 := make([]byte, len(m.States)*10) + var j40 int for _, num := range m.States { for num >= 1<<7 { - dAtA40[j39] = uint8(uint64(num)&0x7f | 0x80) + dAtA41[j40] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j39++ + j40++ } - dAtA40[j39] = uint8(num) - j39++ + dAtA41[j40] = uint8(num) + j40++ } - i -= j39 - copy(dAtA[i:], dAtA40[:j39]) - i = encodeVarintEvents(dAtA, i, uint64(j39)) + i -= j40 + copy(dAtA[i:], dAtA41[:j40]) + i = encodeVarintEvents(dAtA, i, uint64(j40)) i-- dAtA[i] = 0xa } @@ -5297,20 +5397,20 @@ func (m *CancelJobSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x12 } if len(m.States) > 0 { - dAtA42 := make([]byte, len(m.States)*10) - var j41 int + dAtA43 := make([]byte, len(m.States)*10) + var j42 int for _, num := range m.States { for num >= 1<<7 { - dAtA42[j41] = uint8(uint64(num)&0x7f | 0x80) + dAtA43[j42] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j41++ + j42++ } - dAtA42[j41] = uint8(num) - j41++ + dAtA43[j42] = uint8(num) + j42++ } - i -= j41 - copy(dAtA[i:], dAtA42[:j41]) - i = encodeVarintEvents(dAtA, i, uint64(j41)) + i -= j42 + copy(dAtA[i:], dAtA43[:j42]) + i = encodeVarintEvents(dAtA, i, uint64(j42)) i-- dAtA[i] = 0xa } @@ -7438,6 +7538,22 @@ func (m *SubmitJob) Size() (n int) { if l > 0 { n += 1 + l + sovEvents(uint64(l)) } + if m.ExperimentalPriceInfo != nil { + l = m.ExperimentalPriceInfo.Size() + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func (m *ExperimentalPriceInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BidPrice != 0 { + n += 9 + } return n } @@ -10362,6 +10478,103 @@ func (m *SubmitJob) Unmarshal(dAtA []byte) error { } m.JobId = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExperimentalPriceInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExperimentalPriceInfo == nil { + m.ExperimentalPriceInfo = &ExperimentalPriceInfo{} + } + if err := m.ExperimentalPriceInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExperimentalPriceInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExperimentalPriceInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExperimentalPriceInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field BidPrice", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.BidPrice = float64(math.Float64frombits(v)) default: iNdEx = preIndex skippy, err := skipEvents(dAtA[iNdEx:]) diff --git a/pkg/armadaevents/events.proto b/pkg/armadaevents/events.proto index 4b14cf4b402..8905344026b 100644 --- a/pkg/armadaevents/events.proto +++ b/pkg/armadaevents/events.proto @@ -156,6 +156,12 @@ message SubmitJob { bool isDuplicate = 12; // The job id string job_id = 14; + // Pricing Information for the job. Currently experimental + ExperimentalPriceInfo experimentalPriceInfo = 15; +} + +message ExperimentalPriceInfo { + double bidPrice = 1; } // Kubernetes objects that can serve as main objects for an Armada job. From 457db0598226cfc14d93e9f7e400f900809e3a6a Mon Sep 17 00:00:00 2001 From: Chris Martin Date: Mon, 2 Dec 2024 18:12:05 +0000 Subject: [PATCH 11/12] fix spot price metrics (#4071) --- internal/scheduler/metrics/cycle_metrics.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/internal/scheduler/metrics/cycle_metrics.go b/internal/scheduler/metrics/cycle_metrics.go index 5c03b07d075..db515925f74 100644 --- a/internal/scheduler/metrics/cycle_metrics.go +++ b/internal/scheduler/metrics/cycle_metrics.go @@ -340,6 +340,7 @@ func (m *cycleMetrics) describe(ch chan<- *prometheus.Desc) { m.loopNumber.Describe(ch) m.evictedJobs.Describe(ch) m.evictedResources.Describe(ch) + m.spotPrice.Describe(ch) } m.reconciliationCycleTime.Describe(ch) @@ -365,6 +366,7 @@ func (m *cycleMetrics) collect(ch chan<- prometheus.Metric) { m.loopNumber.Collect(ch) m.evictedJobs.Collect(ch) m.evictedResources.Collect(ch) + m.spotPrice.Collect(ch) } m.reconciliationCycleTime.Collect(ch) From 7a76f2453e67f6d1bb3c3f408fa6a58c6f92112f Mon Sep 17 00:00:00 2001 From: Chris Martin Date: Tue, 3 Dec 2024 09:23:28 +0000 Subject: [PATCH 12/12] Per Queue Pricing Metrics (#4072) * per-queue pricing metrics * lint --- internal/common/metrics/domain.go | 9 +++ internal/common/metrics/scheduler_metrics.go | 74 ++++++++++++++++++++ internal/scheduler/metrics.go | 1 + internal/scheduler/metrics_test.go | 13 +++- 4 files changed, 95 insertions(+), 2 deletions(-) diff --git a/internal/common/metrics/domain.go b/internal/common/metrics/domain.go index 86b7bd80c83..138da763a95 100644 --- a/internal/common/metrics/domain.go +++ b/internal/common/metrics/domain.go @@ -19,6 +19,7 @@ type QueueMetrics struct { PriorityClass string Resources ResourceMetrics Durations *FloatMetrics + BidPrices *FloatMetrics } type QueueMetricsRecorder struct { @@ -26,6 +27,7 @@ type QueueMetricsRecorder struct { PriorityClass string resourceRecorder *ResourceMetricsRecorder durationRecorder *FloatMetricsRecorder + bidPriceRecorder *FloatMetricsRecorder } type JobMetricsRecorder struct { @@ -46,6 +48,11 @@ func (r *JobMetricsRecorder) RecordResources(pool string, priorityClass string, recorder.resourceRecorder.Record(resources) } +func (r *JobMetricsRecorder) RecordBidPrice(pool string, priorityClass string, price float64) { + recorder := r.getOrCreateRecorder(pool, priorityClass) + recorder.bidPriceRecorder.Record(price) +} + func (r *JobMetricsRecorder) Metrics() []*QueueMetrics { result := make([]*QueueMetrics, 0, len(r.recordersByPoolAndPriorityClass)) for _, v := range r.recordersByPoolAndPriorityClass { @@ -54,6 +61,7 @@ func (r *JobMetricsRecorder) Metrics() []*QueueMetrics { PriorityClass: v.PriorityClass, Resources: v.resourceRecorder.GetMetrics(), Durations: v.durationRecorder.GetMetrics(), + BidPrices: v.bidPriceRecorder.GetMetrics(), }) } return result @@ -68,6 +76,7 @@ func (r *JobMetricsRecorder) getOrCreateRecorder(pool string, pritorityClass str PriorityClass: pritorityClass, resourceRecorder: NewResourceMetricsRecorder(), durationRecorder: NewDefaultJobDurationMetricsRecorder(), + bidPriceRecorder: NewFloatMetricsRecorder(), } r.recordersByPoolAndPriorityClass[recorderKey] = qmr } diff --git a/internal/common/metrics/scheduler_metrics.go b/internal/common/metrics/scheduler_metrics.go index 80403bf412e..4b99dbc7e36 100644 --- a/internal/common/metrics/scheduler_metrics.go +++ b/internal/common/metrics/scheduler_metrics.go @@ -24,6 +24,48 @@ var QueueDistinctSchedulingKeysDesc = prometheus.NewDesc( nil, ) +var MinQueuePriceQueuedDesc = prometheus.NewDesc( + MetricPrefix+"queue_price_queued_min", + "Minimum price of queued jobs", + []string{"pool", "priorityClass", "queue"}, + nil, +) + +var MaxQueuePriceQueuedDesc = prometheus.NewDesc( + MetricPrefix+"queue_price_queued_max", + "Maximum price of queued jobs", + []string{"pool", "priorityClass", "queue"}, + nil, +) + +var MedianQueuePriceQueuedDesc = prometheus.NewDesc( + MetricPrefix+"queue_price_queued_median", + "Median price of queued jobs", + []string{"pool", "priorityClass", "queue"}, + nil, +) + +var MinQueuePriceRunningDesc = prometheus.NewDesc( + MetricPrefix+"queue_price_running_min", + "Minimum price of running jobs", + []string{"pool", "priorityClass", "queue"}, + nil, +) + +var MaxQueuePriceRunningDesc = prometheus.NewDesc( + MetricPrefix+"queue_price_running_max", + "Maximum price of running jobs", + []string{"pool", "priorityClass", "queue"}, + nil, +) + +var MedianQueuePriceRunningDesc = prometheus.NewDesc( + MetricPrefix+"queue_price_running_median", + "Median price of running jobs", + []string{"pool", "priorityClass", "queue"}, + nil, +) + var QueueResourcesDesc = prometheus.NewDesc( MetricPrefix+"queue_resource_queued", "Resource required by queued jobs", @@ -252,6 +294,10 @@ func CollectQueueMetrics(queueCounts map[string]int, queueDistinctSchedulingKeyC metrics = append(metrics, NewMedianQueueDuration(queueDurations.GetMedian(), m.Pool, m.PriorityClass, q)) } + metrics = append(metrics, NewMinQueuePriceQueuedMetric(m.BidPrices.GetMin(), m.Pool, m.PriorityClass, q)) + metrics = append(metrics, NewMaxQueuePriceQueuedMetric(m.BidPrices.GetMax(), m.Pool, m.PriorityClass, q)) + metrics = append(metrics, NewMedianQueuePriceQueuedMetric(m.BidPrices.GetMedian(), m.Pool, m.PriorityClass, q)) + // Sort the keys so we get a predictable output order resources := maps.Keys(m.Resources) slices.Sort(resources) @@ -277,6 +323,10 @@ func CollectQueueMetrics(queueCounts map[string]int, queueDistinctSchedulingKeyC metrics = append(metrics, NewMedianJobRunDuration(runningJobDurations.GetMedian(), m.Pool, m.PriorityClass, q)) } + metrics = append(metrics, NewMinQueuePriceRunningMetric(m.BidPrices.GetMin(), m.Pool, m.PriorityClass, q)) + metrics = append(metrics, NewMaxQueuePriceRunningMetric(m.BidPrices.GetMax(), m.Pool, m.PriorityClass, q)) + metrics = append(metrics, NewMedianQueuePriceRunningMetric(m.BidPrices.GetMedian(), m.Pool, m.PriorityClass, q)) + // Sort the keys so we get a predicatable output order resources := maps.Keys(m.Resources) slices.Sort(resources) @@ -398,6 +448,30 @@ func NewQueuePriorityMetric(value float64, queue string) prometheus.Metric { return prometheus.MustNewConstMetric(QueuePriorityDesc, prometheus.GaugeValue, value, queue, queue) } +func NewMinQueuePriceQueuedMetric(value float64, queue string, pool string, priorityClass string) prometheus.Metric { + return prometheus.MustNewConstMetric(MinQueuePriceQueuedDesc, prometheus.GaugeValue, value, pool, priorityClass, queue) +} + +func NewMaxQueuePriceQueuedMetric(value float64, queue string, pool string, priorityClass string) prometheus.Metric { + return prometheus.MustNewConstMetric(MaxQueuePriceQueuedDesc, prometheus.GaugeValue, value, pool, priorityClass, queue) +} + +func NewMedianQueuePriceQueuedMetric(value float64, queue string, pool string, priorityClass string) prometheus.Metric { + return prometheus.MustNewConstMetric(MedianQueuePriceQueuedDesc, prometheus.GaugeValue, value, pool, priorityClass, queue) +} + +func NewMinQueuePriceRunningMetric(value float64, queue string, pool string, priorityClass string) prometheus.Metric { + return prometheus.MustNewConstMetric(MinQueuePriceRunningDesc, prometheus.GaugeValue, value, pool, priorityClass, queue) +} + +func NewMaxQueuePriceRunningMetric(value float64, queue string, pool string, priorityClass string) prometheus.Metric { + return prometheus.MustNewConstMetric(MaxQueuePriceRunningDesc, prometheus.GaugeValue, value, pool, priorityClass, queue) +} + +func NewMedianQueuePriceRunningMetric(value float64, queue string, pool string, priorityClass string) prometheus.Metric { + return prometheus.MustNewConstMetric(MedianQueuePriceRunningDesc, prometheus.GaugeValue, value, pool, priorityClass, queue) +} + func NewQueueLabelsMetric(queue string, labels map[string]string) prometheus.Metric { metricLabels := make([]string, 0, len(labels)+len(queueLabelDefaultLabels)) values := make([]string, 0, len(labels)+len(queueLabelDefaultLabels)) diff --git a/internal/scheduler/metrics.go b/internal/scheduler/metrics.go index af9d932fe64..8cc2f3f49d1 100644 --- a/internal/scheduler/metrics.go +++ b/internal/scheduler/metrics.go @@ -222,6 +222,7 @@ func (c *MetricsCollector) updateQueueMetrics(ctx *armadacontext.Context) ([]pro for _, pool := range pools { recorder.RecordJobRuntime(pool, priorityClass, timeInState) recorder.RecordResources(pool, priorityClass, jobResources) + recorder.RecordBidPrice(pool, priorityClass, job.BidPrice()) } } diff --git a/internal/scheduler/metrics_test.go b/internal/scheduler/metrics_test.go index 772101890cc..c2f7d77c117 100644 --- a/internal/scheduler/metrics_test.go +++ b/internal/scheduler/metrics_test.go @@ -29,8 +29,8 @@ func TestMetricsCollector_TestCollect_QueueMetrics(t *testing.T) { runningJobs := make([]*jobdb.Job, 3) for i := 0; i < len(queuedJobs); i++ { startTime := testfixtures.BaseTime.Add(-time.Duration(100*i) * time.Second).UnixNano() - queuedJobs[i] = testfixtures.TestQueuedJobDbJob().WithCreated(startTime) - runningJobs[i] = testfixtures.TestRunningJobDbJob(startTime) + queuedJobs[i] = testfixtures.TestQueuedJobDbJob().WithCreated(startTime).WithBidPrice(float64(i)) + runningJobs[i] = testfixtures.TestRunningJobDbJob(startTime).WithBidPrice(float64(i) + 100) } // Run that has been returned @@ -64,6 +64,9 @@ func TestMetricsCollector_TestCollect_QueueMetrics(t *testing.T) { commonmetrics.NewMinQueueDuration(0, testfixtures.TestPool, testfixtures.TestDefaultPriorityClass, testfixtures.TestQueue), commonmetrics.NewMaxQueueDuration(200, testfixtures.TestPool, testfixtures.TestDefaultPriorityClass, testfixtures.TestQueue), commonmetrics.NewMedianQueueDuration(100, testfixtures.TestPool, testfixtures.TestDefaultPriorityClass, testfixtures.TestQueue), + commonmetrics.NewMinQueuePriceQueuedMetric(0, testfixtures.TestPool, testfixtures.TestDefaultPriorityClass, testfixtures.TestQueue), + commonmetrics.NewMaxQueuePriceQueuedMetric(2, testfixtures.TestPool, testfixtures.TestDefaultPriorityClass, testfixtures.TestQueue), + commonmetrics.NewMedianQueuePriceQueuedMetric(1, testfixtures.TestPool, testfixtures.TestDefaultPriorityClass, testfixtures.TestQueue), commonmetrics.NewQueueResources(3, testfixtures.TestPool, testfixtures.TestDefaultPriorityClass, testfixtures.TestQueue, "cpu"), commonmetrics.NewMinQueueResources(1, testfixtures.TestPool, testfixtures.TestDefaultPriorityClass, testfixtures.TestQueue, "cpu"), commonmetrics.NewMaxQueueResources(1, testfixtures.TestPool, testfixtures.TestDefaultPriorityClass, testfixtures.TestQueue, "cpu"), @@ -92,6 +95,9 @@ func TestMetricsCollector_TestCollect_QueueMetrics(t *testing.T) { commonmetrics.NewMinQueueDuration(200, testfixtures.TestPool, testfixtures.TestDefaultPriorityClass, testfixtures.TestQueue), commonmetrics.NewMaxQueueDuration(200, testfixtures.TestPool, testfixtures.TestDefaultPriorityClass, testfixtures.TestQueue), commonmetrics.NewMedianQueueDuration(200, testfixtures.TestPool, testfixtures.TestDefaultPriorityClass, testfixtures.TestQueue), + commonmetrics.NewMinQueuePriceQueuedMetric(0, testfixtures.TestPool, testfixtures.TestDefaultPriorityClass, testfixtures.TestQueue), + commonmetrics.NewMaxQueuePriceQueuedMetric(0, testfixtures.TestPool, testfixtures.TestDefaultPriorityClass, testfixtures.TestQueue), + commonmetrics.NewMedianQueuePriceQueuedMetric(0, testfixtures.TestPool, testfixtures.TestDefaultPriorityClass, testfixtures.TestQueue), commonmetrics.NewQueueResources(1, testfixtures.TestPool, testfixtures.TestDefaultPriorityClass, testfixtures.TestQueue, "cpu"), commonmetrics.NewMinQueueResources(1, testfixtures.TestPool, testfixtures.TestDefaultPriorityClass, testfixtures.TestQueue, "cpu"), commonmetrics.NewMaxQueueResources(1, testfixtures.TestPool, testfixtures.TestDefaultPriorityClass, testfixtures.TestQueue, "cpu"), @@ -116,6 +122,9 @@ func TestMetricsCollector_TestCollect_QueueMetrics(t *testing.T) { commonmetrics.NewMinJobRunDuration(0, testfixtures.TestPool, testfixtures.TestDefaultPriorityClass, testfixtures.TestQueue), commonmetrics.NewMaxJobRunDuration(200, testfixtures.TestPool, testfixtures.TestDefaultPriorityClass, testfixtures.TestQueue), commonmetrics.NewMedianJobRunDuration(100, testfixtures.TestPool, testfixtures.TestDefaultPriorityClass, testfixtures.TestQueue), + commonmetrics.NewMinQueuePriceRunningMetric(100, testfixtures.TestPool, testfixtures.TestDefaultPriorityClass, testfixtures.TestQueue), + commonmetrics.NewMaxQueuePriceRunningMetric(102, testfixtures.TestPool, testfixtures.TestDefaultPriorityClass, testfixtures.TestQueue), + commonmetrics.NewMedianQueuePriceRunningMetric(101, testfixtures.TestPool, testfixtures.TestDefaultPriorityClass, testfixtures.TestQueue), commonmetrics.NewMinQueueAllocated(1, testfixtures.TestPool, testfixtures.TestDefaultPriorityClass, testfixtures.TestQueue, "cpu"), commonmetrics.NewMaxQueueAllocated(1, testfixtures.TestPool, testfixtures.TestDefaultPriorityClass, testfixtures.TestQueue, "cpu"), commonmetrics.NewMedianQueueAllocated(1, testfixtures.TestPool, testfixtures.TestDefaultPriorityClass, testfixtures.TestQueue, "cpu"),