From 5f6fef348d8af2083e8f6dd6fc96d83e83e1fde5 Mon Sep 17 00:00:00 2001 From: Alexandra Doak Date: Wed, 4 Sep 2024 11:19:46 -0700 Subject: [PATCH 01/11] Updates the heap to capacity converter and max allowed cost --- .../plugins/task_manager/server/lib/get_default_capacity.ts | 4 ++-- .../task_manager/server/task_pool/cost_capacity.test.ts | 2 +- x-pack/plugins/task_manager/server/task_pool/cost_capacity.ts | 3 ++- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/x-pack/plugins/task_manager/server/lib/get_default_capacity.ts b/x-pack/plugins/task_manager/server/lib/get_default_capacity.ts index aeafa0f63c4d7..dff31ae3afd50 100644 --- a/x-pack/plugins/task_manager/server/lib/get_default_capacity.ts +++ b/x-pack/plugins/task_manager/server/lib/get_default_capacity.ts @@ -19,8 +19,8 @@ interface GetDefaultCapacityOpts { const HEAP_TO_CAPACITY_MAP = [ { minHeap: 0, maxHeap: 1, capacity: 10 }, { minHeap: 1, maxHeap: 2, capacity: 15 }, - { minHeap: 2, maxHeap: 4, capacity: 25, backgroundTaskNodeOnly: false }, - { minHeap: 2, maxHeap: 4, capacity: 50, backgroundTaskNodeOnly: true }, + { minHeap: 2, maxHeap: 16, capacity: 25, backgroundTaskNodeOnly: false }, + { minHeap: 2, maxHeap: 16, capacity: 50, backgroundTaskNodeOnly: true }, ]; export function getDefaultCapacity({ diff --git a/x-pack/plugins/task_manager/server/task_pool/cost_capacity.test.ts b/x-pack/plugins/task_manager/server/task_pool/cost_capacity.test.ts index b40c6eb2af37d..31fd90b8c6ec6 100644 --- a/x-pack/plugins/task_manager/server/task_pool/cost_capacity.test.ts +++ b/x-pack/plugins/task_manager/server/task_pool/cost_capacity.test.ts @@ -22,7 +22,7 @@ describe('CostCapacity', () => { const capacity$ = new Subject(); const pool = new CostCapacity({ capacity$, logger }); - expect(pool.capacity).toBe(0); + expect(pool.capacity).toBe(10); capacity$.next(20); expect(pool.capacity).toBe(40); diff --git a/x-pack/plugins/task_manager/server/task_pool/cost_capacity.ts b/x-pack/plugins/task_manager/server/task_pool/cost_capacity.ts index ead7cf1839714..29e214b63bc41 100644 --- a/x-pack/plugins/task_manager/server/task_pool/cost_capacity.ts +++ b/x-pack/plugins/task_manager/server/task_pool/cost_capacity.ts @@ -6,13 +6,14 @@ */ import { Logger } from '@kbn/core/server'; +import { DEFAULT_CAPACITY } from '../config'; import { TaskDefinition } from '../task'; import { TaskRunner } from '../task_running'; import { CapacityOpts, ICapacity } from './types'; import { getCapacityInCost } from './utils'; export class CostCapacity implements ICapacity { - private maxAllowedCost: number = 0; + private maxAllowedCost: number = DEFAULT_CAPACITY; private logger: Logger; constructor(opts: CapacityOpts) { From b45c645aa3f848467a31a813a54bb220be0b57c6 Mon Sep 17 00:00:00 2001 From: Alexandra Doak Date: Wed, 4 Sep 2024 13:32:41 -0700 Subject: [PATCH 02/11] Fixing test --- .../plugins/task_manager/server/task_pool/task_pool.test.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/x-pack/plugins/task_manager/server/task_pool/task_pool.test.ts b/x-pack/plugins/task_manager/server/task_pool/task_pool.test.ts index 86e3dc024257d..4381818bdf821 100644 --- a/x-pack/plugins/task_manager/server/task_pool/task_pool.test.ts +++ b/x-pack/plugins/task_manager/server/task_pool/task_pool.test.ts @@ -130,7 +130,7 @@ describe('TaskPool', () => { expect(pool.availableCapacity()).toEqual(7); }); - test('availableCapacity is 0 until capacity$ pushes a value', async () => { + test('availableCapacity is 10 until capacity$ pushes a value', async () => { const capacity$ = new Subject(); const pool = new TaskPool({ capacity$, @@ -139,9 +139,9 @@ describe('TaskPool', () => { strategy: CLAIM_STRATEGY_UPDATE_BY_QUERY, }); - expect(pool.availableCapacity()).toEqual(0); - capacity$.next(10); expect(pool.availableCapacity()).toEqual(10); + capacity$.next(20); + expect(pool.availableCapacity()).toEqual(20); }); test('does not run tasks that are beyond its available capacity', async () => { From 4f1e76379bf06f76e5e1c55a492e6be4a81e0118 Mon Sep 17 00:00:00 2001 From: Alexandra Doak Date: Wed, 4 Sep 2024 14:43:15 -0700 Subject: [PATCH 03/11] Fixing tests again --- .../task_manager/server/task_pool/task_pool.test.ts | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/x-pack/plugins/task_manager/server/task_pool/task_pool.test.ts b/x-pack/plugins/task_manager/server/task_pool/task_pool.test.ts index 4381818bdf821..562e2f7f69942 100644 --- a/x-pack/plugins/task_manager/server/task_pool/task_pool.test.ts +++ b/x-pack/plugins/task_manager/server/task_pool/task_pool.test.ts @@ -130,7 +130,7 @@ describe('TaskPool', () => { expect(pool.availableCapacity()).toEqual(7); }); - test('availableCapacity is 10 until capacity$ pushes a value', async () => { + test('availableCapacity is 0 until capacity$ pushes a value', async () => { const capacity$ = new Subject(); const pool = new TaskPool({ capacity$, @@ -139,9 +139,9 @@ describe('TaskPool', () => { strategy: CLAIM_STRATEGY_UPDATE_BY_QUERY, }); + expect(pool.availableCapacity()).toEqual(0); + capacity$.next(10); expect(pool.availableCapacity()).toEqual(10); - capacity$.next(20); - expect(pool.availableCapacity()).toEqual(20); }); test('does not run tasks that are beyond its available capacity', async () => { @@ -517,11 +517,11 @@ describe('TaskPool', () => { expect(pool.availableCapacity()).toEqual(14); }); - test('availableCapacity is 0 until capacity$ pushes a value', async () => { + test('availableCapacity is 10 until capacity$ pushes a value', async () => { const capacity$ = new Subject(); const pool = new TaskPool({ capacity$, definitions, logger, strategy: CLAIM_STRATEGY_MGET }); - expect(pool.availableCapacity()).toEqual(0); + expect(pool.availableCapacity()).toEqual(10); capacity$.next(20); expect(pool.availableCapacity()).toEqual(40); }); From a4e282970f3001bb8b30540c40e5c648e1d2117b Mon Sep 17 00:00:00 2001 From: Alexandra Doak Date: Thu, 5 Sep 2024 10:01:09 -0700 Subject: [PATCH 04/11] Adding docker configs --- .../docker_generator/resources/base/bin/kibana-docker | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/dev/build/tasks/os_packages/docker_generator/resources/base/bin/kibana-docker b/src/dev/build/tasks/os_packages/docker_generator/resources/base/bin/kibana-docker index e2ffab235f34e..90122da02b839 100755 --- a/src/dev/build/tasks/os_packages/docker_generator/resources/base/bin/kibana-docker +++ b/src/dev/build/tasks/os_packages/docker_generator/resources/base/bin/kibana-docker @@ -238,6 +238,7 @@ kibana_vars=( xpack.alerting.rules.run.actions.max xpack.alerting.rules.run.alerts.max xpack.alerting.rules.run.actions.connectorTypeOverrides + xpack.alerting.maxScheduledPerMinute xpack.alerts.healthCheck.interval xpack.alerts.invalidateApiKeysTask.interval xpack.alerts.invalidateApiKeysTask.removalDelay @@ -431,6 +432,8 @@ kibana_vars=( xpack.task_manager.event_loop_delay.monitor xpack.task_manager.event_loop_delay.warn_threshold xpack.task_manager.worker_utilization_running_average_window + xpack.discovery.active_nodes_lookback + xpack.discovery.interval xpack.uptime.index serverless ) From 0b36bff2c44ccb2e322f70b633e566c16abe4e62 Mon Sep 17 00:00:00 2001 From: Alexandra Doak Date: Thu, 5 Sep 2024 11:58:21 -0700 Subject: [PATCH 05/11] Updating the docs --- docs/settings/task-manager-settings.asciidoc | 4 ++++ docs/user/alerting/alerting-troubleshooting.asciidoc | 2 +- .../task-manager-production-considerations.asciidoc | 2 +- .../task-manager-troubleshooting.asciidoc | 2 +- 4 files changed, 7 insertions(+), 3 deletions(-) diff --git a/docs/settings/task-manager-settings.asciidoc b/docs/settings/task-manager-settings.asciidoc index 0ea057fb9dee7..3dfb2a4d11e47 100644 --- a/docs/settings/task-manager-settings.asciidoc +++ b/docs/settings/task-manager-settings.asciidoc @@ -23,6 +23,7 @@ How often, in milliseconds, the task manager will look for more work. Defaults How many requests can Task Manager buffer before it rejects new requests. Defaults to 1000. `xpack.task_manager.max_workers`:: +deprecated:[8.16.0] The maximum number of tasks that this Kibana instance will run simultaneously. Defaults to 10. Starting in 8.0, it will not be possible to set the value greater than 100. @@ -48,6 +49,9 @@ Enables event loop delay monitoring, which will log a warning when a task causes `xpack.task_manager.event_loop_delay.warn_threshold`:: Sets the amount of event loop delay during a task execution which will cause a warning to be logged. Defaults to 5000 milliseconds (5 seconds). +`xpack.task_manager.capacity`:: +Sets the number of normal cost tasks that can be run at one time. Can be minimum 5 and maximum 50. Defaults to 10. + [float] [[task-manager-health-settings]] ==== Task Manager Health settings diff --git a/docs/user/alerting/alerting-troubleshooting.asciidoc b/docs/user/alerting/alerting-troubleshooting.asciidoc index a0bcd6dcea290..b1626ba5ca515 100644 --- a/docs/user/alerting/alerting-troubleshooting.asciidoc +++ b/docs/user/alerting/alerting-troubleshooting.asciidoc @@ -197,7 +197,7 @@ If cluster performance becomes degraded from excessive or expensive rules and {k [source,txt] -------------------------------------------------- -xpack.task_manager.max_workers: 1 +xpack.task_manager.capacity: 5 xpack.task_manager.poll_interval: 1h -------------------------------------------------- diff --git a/docs/user/production-considerations/task-manager-production-considerations.asciidoc b/docs/user/production-considerations/task-manager-production-considerations.asciidoc index 8dd27d4e2d5ee..8835ad5c68d28 100644 --- a/docs/user/production-considerations/task-manager-production-considerations.asciidoc +++ b/docs/user/production-considerations/task-manager-production-considerations.asciidoc @@ -85,7 +85,7 @@ By default, each additional {kib} instance will add an additional 10 tasks that Other times it, might be preferable to increase the throughput of individual {kib} instances. -Tweak the *Max Workers* via the <> setting, which allows each {kib} to pull a higher number of tasks per interval. This could impact the performance of each {kib} instance as the workload will be higher. +Tweak the *Capacity* via the <> setting, which allows each {kib} to pull a higher number of tasks per interval. This could impact the performance of each {kib} instance as the workload will be higher. Tweak the *Poll Interval* via the <> setting, which allows each {kib} to pull scheduled tasks at a higher rate. This could impact the performance of the {es} cluster as the workload will be higher. diff --git a/docs/user/production-considerations/task-manager-troubleshooting.asciidoc b/docs/user/production-considerations/task-manager-troubleshooting.asciidoc index a5cbbefbe3160..9ff45caa1e653 100644 --- a/docs/user/production-considerations/task-manager-troubleshooting.asciidoc +++ b/docs/user/production-considerations/task-manager-troubleshooting.asciidoc @@ -1002,7 +1002,7 @@ server log [12:41:33.672] [warn][plugins][taskManager][taskManager] taskManager This log message tells us that Task Manager is not managing to keep up with the sheer amount of work it has been tasked with completing. This might mean that rules are not running at the frequency that was expected (instead of running every 5 minutes, it runs every 7-8 minutes, just as an example). -By default Task Manager is limited to 10 tasks and this can be bumped up by setting a higher number in the kibana.yml file using the `xpack.task_manager.max_workers` configuration. It is important to keep in mind that a higher number of tasks running at any given time means more load on both Kibana and Elasticsearch, so only change this setting if increasing load in your environment makes sense. +By default Task Manager is limited to 10 tasks and this can be bumped up by setting a higher number in the kibana.yml file using the `xpack.task_manager.capacity` configuration. It is important to keep in mind that a higher number of tasks running at any given time means more load on both Kibana and Elasticsearch, so only change this setting if increasing load in your environment makes sense. Another approach to addressing this might be to tell workers to run at a higher rate, rather than adding more of them, which would be configured using xpack.task_manager.poll_interval. This value dictates how often Task Manager checks to see if there’s more work to be done and uses milliseconds (by default it is 3000, which means an interval of 3 seconds). From fed3ca9993c0dfde6369899d2cdc4060087388da Mon Sep 17 00:00:00 2001 From: Alexandra Doak Date: Fri, 6 Sep 2024 07:13:27 -0700 Subject: [PATCH 06/11] Removing config --- .../docker_generator/resources/base/bin/kibana-docker | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/dev/build/tasks/os_packages/docker_generator/resources/base/bin/kibana-docker b/src/dev/build/tasks/os_packages/docker_generator/resources/base/bin/kibana-docker index 90122da02b839..841be2f775336 100755 --- a/src/dev/build/tasks/os_packages/docker_generator/resources/base/bin/kibana-docker +++ b/src/dev/build/tasks/os_packages/docker_generator/resources/base/bin/kibana-docker @@ -432,8 +432,6 @@ kibana_vars=( xpack.task_manager.event_loop_delay.monitor xpack.task_manager.event_loop_delay.warn_threshold xpack.task_manager.worker_utilization_running_average_window - xpack.discovery.active_nodes_lookback - xpack.discovery.interval xpack.uptime.index serverless ) From baf80406520239ece9fa0853479e4ed152a195d7 Mon Sep 17 00:00:00 2001 From: Alexandra Doak Date: Tue, 10 Sep 2024 08:59:04 -0700 Subject: [PATCH 07/11] Updating the config text --- docs/settings/task-manager-settings.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/settings/task-manager-settings.asciidoc b/docs/settings/task-manager-settings.asciidoc index 3dfb2a4d11e47..aa7cc0aa84e1e 100644 --- a/docs/settings/task-manager-settings.asciidoc +++ b/docs/settings/task-manager-settings.asciidoc @@ -50,7 +50,7 @@ Enables event loop delay monitoring, which will log a warning when a task causes Sets the amount of event loop delay during a task execution which will cause a warning to be logged. Defaults to 5000 milliseconds (5 seconds). `xpack.task_manager.capacity`:: -Sets the number of normal cost tasks that can be run at one time. Can be minimum 5 and maximum 50. Defaults to 10. +Controls the number of tasks that can be run at one time. Can be minimum 5 and maximum 50. Defaults to 10. [float] [[task-manager-health-settings]] From 246f978567178b10d0bfea5453d0587d087ddd7d Mon Sep 17 00:00:00 2001 From: Alexi Doak <109488926+doakalexi@users.noreply.github.com> Date: Tue, 10 Sep 2024 18:24:18 -0700 Subject: [PATCH 08/11] Update docs/user/production-considerations/task-manager-troubleshooting.asciidoc Co-authored-by: Lisa Cawley --- .../task-manager-troubleshooting.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/user/production-considerations/task-manager-troubleshooting.asciidoc b/docs/user/production-considerations/task-manager-troubleshooting.asciidoc index 9ff45caa1e653..593c008bd1b89 100644 --- a/docs/user/production-considerations/task-manager-troubleshooting.asciidoc +++ b/docs/user/production-considerations/task-manager-troubleshooting.asciidoc @@ -1002,7 +1002,7 @@ server log [12:41:33.672] [warn][plugins][taskManager][taskManager] taskManager This log message tells us that Task Manager is not managing to keep up with the sheer amount of work it has been tasked with completing. This might mean that rules are not running at the frequency that was expected (instead of running every 5 minutes, it runs every 7-8 minutes, just as an example). -By default Task Manager is limited to 10 tasks and this can be bumped up by setting a higher number in the kibana.yml file using the `xpack.task_manager.capacity` configuration. It is important to keep in mind that a higher number of tasks running at any given time means more load on both Kibana and Elasticsearch, so only change this setting if increasing load in your environment makes sense. +By default Task Manager is limited to 10 tasks and this can be bumped up by setting a higher number in the `kibana.yml` file using the `xpack.task_manager.capacity` configuration. It is important to keep in mind that a higher number of tasks running at any given time means more load on both Kibana and Elasticsearch; only change this setting if increasing load in your environment makes sense. Another approach to addressing this might be to tell workers to run at a higher rate, rather than adding more of them, which would be configured using xpack.task_manager.poll_interval. This value dictates how often Task Manager checks to see if there’s more work to be done and uses milliseconds (by default it is 3000, which means an interval of 3 seconds). From e5a08fb32df3e1e21fa7c6d3633af835c6eec76b Mon Sep 17 00:00:00 2001 From: Alexi Doak <109488926+doakalexi@users.noreply.github.com> Date: Tue, 10 Sep 2024 18:24:43 -0700 Subject: [PATCH 09/11] Update docs/settings/task-manager-settings.asciidoc Co-authored-by: Lisa Cawley --- docs/settings/task-manager-settings.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/settings/task-manager-settings.asciidoc b/docs/settings/task-manager-settings.asciidoc index aa7cc0aa84e1e..1311974e27342 100644 --- a/docs/settings/task-manager-settings.asciidoc +++ b/docs/settings/task-manager-settings.asciidoc @@ -50,7 +50,7 @@ Enables event loop delay monitoring, which will log a warning when a task causes Sets the amount of event loop delay during a task execution which will cause a warning to be logged. Defaults to 5000 milliseconds (5 seconds). `xpack.task_manager.capacity`:: -Controls the number of tasks that can be run at one time. Can be minimum 5 and maximum 50. Defaults to 10. +Controls the number of tasks that can be run at one time. The minimum value is 5 and the maximum is 50. Defaults to 10. [float] [[task-manager-health-settings]] From f56dd2ba89f70027d6e184f4a1af09baa5fa9e51 Mon Sep 17 00:00:00 2001 From: Alexi Doak <109488926+doakalexi@users.noreply.github.com> Date: Wed, 11 Sep 2024 11:00:09 -0700 Subject: [PATCH 10/11] Update docs/user/production-considerations/task-manager-production-considerations.asciidoc Co-authored-by: Lisa Cawley --- .../task-manager-production-considerations.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/user/production-considerations/task-manager-production-considerations.asciidoc b/docs/user/production-considerations/task-manager-production-considerations.asciidoc index 8835ad5c68d28..d54f9ccc54358 100644 --- a/docs/user/production-considerations/task-manager-production-considerations.asciidoc +++ b/docs/user/production-considerations/task-manager-production-considerations.asciidoc @@ -85,7 +85,7 @@ By default, each additional {kib} instance will add an additional 10 tasks that Other times it, might be preferable to increase the throughput of individual {kib} instances. -Tweak the *Capacity* via the <> setting, which allows each {kib} to pull a higher number of tasks per interval. This could impact the performance of each {kib} instance as the workload will be higher. +Tweak the capacity with the <> setting, which enables each {kib} instance to pull a higher number of tasks per interval. This setting can impact the performance of each instance as the workload will be higher. Tweak the *Poll Interval* via the <> setting, which allows each {kib} to pull scheduled tasks at a higher rate. This could impact the performance of the {es} cluster as the workload will be higher. From 7f7c3d3d97ef8b6f89e2528c179fe95bc2703a7d Mon Sep 17 00:00:00 2001 From: lcawl Date: Thu, 12 Sep 2024 09:53:29 -0700 Subject: [PATCH 11/11] [DOCS] Minor formatting edits --- .../production-considerations/index.asciidoc | 2 +- ...manager-production-considerations.asciidoc | 25 +++++++++---------- 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/docs/user/production-considerations/index.asciidoc b/docs/user/production-considerations/index.asciidoc index 22307f6394248..70ebdf0f06da7 100644 --- a/docs/user/production-considerations/index.asciidoc +++ b/docs/user/production-considerations/index.asciidoc @@ -2,6 +2,6 @@ include::production.asciidoc[] include::security-production-considerations.asciidoc[] include::alerting-production-considerations.asciidoc[] include::reporting-production-considerations.asciidoc[] -include::task-manager-production-considerations.asciidoc[] +include::task-manager-production-considerations.asciidoc[leveloffset=+1] include::task-manager-health-monitoring.asciidoc[] include::task-manager-troubleshooting.asciidoc[] diff --git a/docs/user/production-considerations/task-manager-production-considerations.asciidoc b/docs/user/production-considerations/task-manager-production-considerations.asciidoc index d54f9ccc54358..43352d47b76aa 100644 --- a/docs/user/production-considerations/task-manager-production-considerations.asciidoc +++ b/docs/user/production-considerations/task-manager-production-considerations.asciidoc @@ -1,6 +1,5 @@ -[role="xpack"] [[task-manager-production-considerations]] -== Task Manager += Task Manager {kib} Task Manager is leveraged by features such as Alerting, Actions, and Reporting to run mission critical work as persistent background tasks. These background tasks distribute work across multiple {kib} instances. @@ -21,7 +20,7 @@ If you lose this index, all scheduled alerts and actions are lost. [float] [[task-manager-background-tasks]] -=== Running background tasks +== Running background tasks {kib} background tasks are managed as follows: @@ -47,13 +46,13 @@ For detailed troubleshooting guidance, see <>. ============================================== [float] -=== Deployment considerations +== Deployment considerations {es} and {kib} instances use the system clock to determine the current time. To ensure schedules are triggered when expected, synchronize the clocks of all nodes in the cluster using a time service such as http://www.ntp.org/[Network Time Protocol]. [float] [[task-manager-scaling-guidance]] -=== Scaling guidance +== Scaling guidance How you deploy {kib} largely depends on your use case. Predicting the throughout a deployment might require to support Task Management is difficult because features can schedule an unpredictable number of tasks at a variety of scheduled cadences. @@ -61,7 +60,7 @@ However, there is a relatively straight forward method you can follow to produce [float] [[task-manager-default-scaling]] -==== Default scale +=== Default scale By default, {kib} polls for tasks at a rate of 10 tasks every 3 seconds. This means that you can expect a single {kib} instance to support up to 200 _tasks per minute_ (`200/tpm`). @@ -74,24 +73,24 @@ For details on monitoring the health of {kib} Task Manager, follow the guidance [float] [[task-manager-scaling-horizontally]] -==== Scaling horizontally +=== Scaling horizontally At times, the sustainable approach might be to expand the throughput of your cluster by provisioning additional {kib} instances. By default, each additional {kib} instance will add an additional 10 tasks that your cluster can run concurrently, but you can also scale each {kib} instance vertically, if your diagnosis indicates that they can handle the additional workload. [float] [[task-manager-scaling-vertically]] -==== Scaling vertically +=== Scaling vertically Other times it, might be preferable to increase the throughput of individual {kib} instances. Tweak the capacity with the <> setting, which enables each {kib} instance to pull a higher number of tasks per interval. This setting can impact the performance of each instance as the workload will be higher. -Tweak the *Poll Interval* via the <> setting, which allows each {kib} to pull scheduled tasks at a higher rate. This could impact the performance of the {es} cluster as the workload will be higher. +Tweak the poll interval with the <> setting, which enables each {kib} instance to pull scheduled tasks at a higher rate. This setting can impact the performance of the {es} cluster as the workload will be higher. [float] [[task-manager-choosing-scaling-strategy]] -==== Choosing a scaling strategy +=== Choosing a scaling strategy Each scaling strategy comes with its own considerations, and the appropriate strategy largely depends on your use case. @@ -113,7 +112,7 @@ A higher frequency suggests {kib} instances conflict at a high rate, which you c [float] [[task-manager-rough-throughput-estimation]] -==== Rough throughput estimation +=== Rough throughput estimation Predicting the required throughput a deployment might need to support Task Management is difficult, as features can schedule an unpredictable number of tasks at a variety of scheduled cadences. However, a rough lower bound can be estimated, which is then used as a guide. @@ -123,7 +122,7 @@ Throughput is best thought of as a measurements in tasks per minute. A default {kib} instance can support up to `200/tpm`. [float] -===== Automatic estimation +==== Automatic estimation experimental[] @@ -145,7 +144,7 @@ When evaluating the proposed {kib} instance number under `proposed.provisioned_k ============================================================================ [float] -===== Manual estimation +==== Manual estimation By <>, you can make a rough estimate as to the required throughput as a _tasks per minute_ measurement.