Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add target based scaling support for Netherite #265

Draft
wants to merge 6 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,8 @@
</ItemGroup>

<ItemGroup>
<PackageReference Include="Microsoft.Azure.DurableTask.Core" Version="2.11.1" />
<PackageReference Include="Microsoft.Azure.WebJobs.Extensions.DurableTask" Version="2.8.1" />
<PackageReference Include="Microsoft.Azure.DurableTask.Core" Version="2.13.*" />
<PackageReference Include="Microsoft.Azure.WebJobs.Extensions.DurableTask" Version="2.10.0" />
</ItemGroup>

<ItemGroup Condition=" '$(TargetFramework)' != 'netcoreapp2.2' ">
Expand Down
29 changes: 26 additions & 3 deletions src/DurableTask.Netherite.AzureFunctions/NetheriteProvider.cs
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ public override bool TryGetScaleMonitor(
{
if (this.Service.TryGetScalingMonitor(out var monitor))
{
scaleMonitor = new ScaleMonitor(monitor);
scaleMonitor = new ScaleMonitor(monitor, functionId);
monitor.InformationTracer($"ScaleMonitor Constructed, Descriptor.Id={scaleMonitor.Descriptor.Id}");
return true;
}
Expand All @@ -130,6 +130,23 @@ public override bool TryGetScaleMonitor(
}
}

#if !NETSTANDARD
public override bool TryGetTargetScaler(
string functionId,
string functionName,
string hubName,
string connectionName,
out ITargetScaler targetScaler)
{
ILoadPublisherService loadPublisher = this.Service.GetLoadPublisher();
NetheriteMetricsProvider metricsProvider = this.Service.GetNetheriteMetricsProvider(loadPublisher, this.Settings.EventHubsConnection);

targetScaler = new NetheriteTargetScaler(functionId, metricsProvider, this);

return true;
}
#endif

public class NetheriteScaleMetrics : ScaleMetrics
{
public byte[] Metrics { get; set; }
Expand All @@ -142,10 +159,16 @@ class ScaleMonitor : IScaleMonitor<NetheriteScaleMetrics>
readonly DataContractSerializer serializer = new DataContractSerializer(typeof(ScalingMonitor.Metrics));
static Tuple<DateTime, NetheriteScaleMetrics> cachedMetrics;

public ScaleMonitor(ScalingMonitor scalingMonitor)
public ScaleMonitor(ScalingMonitor scalingMonitor, string functionId)
{
this.scalingMonitor = scalingMonitor;
this.descriptor = new ScaleMonitorDescriptor($"DurableTaskTrigger-Netherite-{this.scalingMonitor.TaskHubName}".ToLower());
var descriptorId = $"DurableTaskTrigger-Netherite-{this.scalingMonitor.TaskHubName}".ToLower();

#if NETCOREAPP3_1_OR_GREATER
this.descriptor = new ScaleMonitorDescriptor(descriptorId, functionId);
#else
this.descriptor = new ScaleMonitorDescriptor(descriptorId);
#endif
}

public ScaleMonitorDescriptor Descriptor => this.descriptor;
Expand Down
111 changes: 111 additions & 0 deletions src/DurableTask.Netherite.AzureFunctions/NetheriteTargetScaler.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.

#if !NETSTANDARD
#if !NETCOREAPP2_2
namespace DurableTask.Netherite.AzureFunctions
{
using System;
using System.Linq;
using System.Threading.Tasks;
using DurableTask.Netherite.Scaling;
using Microsoft.Azure.WebJobs.Extensions.DurableTask;
using Microsoft.Azure.WebJobs.Host.Scale;
using static DurableTask.Netherite.Scaling.ScalingMonitor;

public class NetheriteTargetScaler : ITargetScaler
{
readonly NetheriteMetricsProvider metricsProvider;
readonly DurabilityProvider durabilityProvider;
readonly TargetScalerResult scaleResult;

public NetheriteTargetScaler(
string functionId,
NetheriteMetricsProvider metricsProvider,
DurabilityProvider durabilityProvider)
{
this.metricsProvider = metricsProvider;
this.durabilityProvider = durabilityProvider;
this.scaleResult = new TargetScalerResult();
this.TargetScalerDescriptor = new TargetScalerDescriptor(functionId);
}

public TargetScalerDescriptor TargetScalerDescriptor { get; private set; }

public async Task<TargetScalerResult> GetScaleResultAsync(TargetScalerContext context)
{
Metrics metrics = await this.metricsProvider.GetMetricsAsync();

int maxConcurrentActivities = this.durabilityProvider.MaxConcurrentTaskActivityWorkItems;
int maxConcurrentWorkItems = this.durabilityProvider.MaxConcurrentTaskOrchestrationWorkItems;

int target;

if (metrics.TaskHubIsIdle)
{
target = 0; // we need no workers
}

target = 1; // always need at least one worker when we are not idle

// if there is a backlog of activities, ask for enough workers to process them
int activities = metrics.LoadInformation.Where(info => info.Value.IsLoaded()).Sum(info => info.Value.Activities);
if (activities > 0)
{
int requestedWorkers = (activities / maxConcurrentActivities) + 1;
requestedWorkers = Math.Min(requestedWorkers, metrics.LoadInformation.Count); // cannot use more workers than partitions
target = Math.Max(target, requestedWorkers);
}

// if there are load-challenged partitions, ask for a worker for each of them
int numberOfChallengedPartitions = metrics.LoadInformation.Values
.Count(info => info.IsLoaded() || info.WorkItems > maxConcurrentWorkItems);
target = Math.Max(target, numberOfChallengedPartitions);

// Determine how many different workers are currently running
int current = metrics.LoadInformation.Values.Select(info => info.WorkerId).Distinct().Count();

if (target < current)
{
// the target is lower than our current scale. However, before
// scaling in, we check some more things to avoid
// over-aggressive scale-in that could impact performance negatively.

int numberOfNonIdlePartitions = metrics.LoadInformation.Values.Count(info => !PartitionLoadInfo.IsLongIdle(info.LatencyTrend));
if (current > numberOfNonIdlePartitions)
{
// if we have more workers than non-idle partitions, don't immediately go lower than
// the number of non-idle partitions.
target = Math.Max(target, numberOfNonIdlePartitions);
}
else
{
// All partitions are busy, so so we don't want to reduce the worker count unless load is very low.
// Even if all partitions are runnning efficiently, it can be hard to know whether it is wise to reduce the worker count.
// We want to avoid scaling in unnecessarily when we've reached optimal scale-out.
// But we also want to avoid the case where a constant trickle of load after a big scale-out prevents scaling back in.
// To balance these goals, we vote to scale down only by one worker at a time when we see this situation.
bool allPartitionsAreFast = !metrics.LoadInformation.Values.Any(info =>
info.LatencyTrend.Length != PartitionLoadInfo.LatencyTrendLength
|| info.LatencyTrend.Any(c => c == PartitionLoadInfo.MediumLatency || c == PartitionLoadInfo.HighLatency));

if (allPartitionsAreFast)
{
// don't go lower than 1 below current
target = Math.Max(target, current - 1);
}
else
{
// don't go lower than current
target = Math.Max(target, current);
}
}
}

this.scaleResult.TargetWorkerCount = target;
return this.scaleResult;
}
}
}
#endif
#endif
2 changes: 1 addition & 1 deletion src/DurableTask.Netherite/DurableTask.Netherite.csproj
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@
<PackageReference Include="Microsoft.Azure.Storage.Blob" Version="11.2.3" />
<PackageReference Include="Azure.Storage.Blobs" Version="12.13.0" />
<PackageReference Include="Microsoft.FASTER.Core" Version="2.0.16" />
<PackageReference Include="Microsoft.Azure.DurableTask.Core" Version="2.11.1" />
<PackageReference Include="Microsoft.Azure.DurableTask.Core" Version="2.13.*" />
<PackageReference Include="Newtonsoft.Json" Version="13.0.1" />
<PackageReference Include="Microsoft.SourceLink.GitHub" Version="1.1.1" PrivateAssets="All" />
<PackageReference Include="System.Threading.Channels" Version="4.7.1" />
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -212,9 +212,9 @@ public bool TryGetScalingMonitor(out ScalingMonitor monitor)
{
try
{
ILoadPublisherService loadPublisher = string.IsNullOrEmpty(this.Settings.LoadInformationAzureTableName) ?
new AzureBlobLoadPublisher(this.Settings.BlobStorageConnection, this.Settings.HubName)
: new AzureTableLoadPublisher(this.Settings.TableStorageConnection, this.Settings.LoadInformationAzureTableName, this.Settings.HubName);
ILoadPublisherService loadPublisher = this.GetLoadPublisher();

NetheriteMetricsProvider netheriteMetricsProvider = this.GetNetheriteMetricsProvider(loadPublisher, this.Settings.EventHubsConnection);

monitor = new ScalingMonitor(
loadPublisher,
Expand All @@ -223,7 +223,8 @@ public bool TryGetScalingMonitor(out ScalingMonitor monitor)
this.Settings.HubName,
this.TraceHelper.TraceScaleRecommendation,
this.TraceHelper.TraceProgress,
this.TraceHelper.TraceError);
this.TraceHelper.TraceError,
netheriteMetricsProvider);

return true;
}
Expand All @@ -237,6 +238,17 @@ public bool TryGetScalingMonitor(out ScalingMonitor monitor)
return false;
}

internal ILoadPublisherService GetLoadPublisher()
{
return string.IsNullOrEmpty(this.Settings.LoadInformationAzureTableName) ?
new AzureBlobLoadPublisher(this.Settings.BlobStorageConnection, this.Settings.HubName)
: new AzureTableLoadPublisher(this.Settings.TableStorageConnection, this.Settings.LoadInformationAzureTableName, this.Settings.HubName);
}

internal NetheriteMetricsProvider GetNetheriteMetricsProvider(ILoadPublisherService loadPublisher, ConnectionInfo eventHubsConnection)
{
return new NetheriteMetricsProvider(loadPublisher, eventHubsConnection);
}

public void WatchThreads(object _)
{
Expand Down
95 changes: 95 additions & 0 deletions src/DurableTask.Netherite/Scaling/NetheriteMetricsProvider.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.

namespace DurableTask.Netherite.Scaling
{
using System;
using System.Collections.Generic;
using System.Threading;
using System.Threading.Tasks;
using DurableTask.Netherite.EventHubsTransport;
using static DurableTask.Netherite.Scaling.ScalingMonitor;

public class NetheriteMetricsProvider
{
readonly ILoadPublisherService loadPublisher;
readonly ConnectionInfo eventHubsConnection;

public NetheriteMetricsProvider(
ILoadPublisherService loadPublisher,
ConnectionInfo eventHubsConnection)
{
this.loadPublisher = loadPublisher;
this.eventHubsConnection = eventHubsConnection;
}

public virtual async Task<Metrics> GetMetricsAsync()
{
DateTime now = DateTime.UtcNow;
var loadInformation = await this.loadPublisher.QueryAsync(CancellationToken.None).ConfigureAwait(false);
var busy = await this.TaskHubIsIdleAsync(loadInformation).ConfigureAwait(false);

return new Metrics()
{
LoadInformation = loadInformation,
Busy = busy,
Timestamp = now,
};
}

/// <summary>
/// Determines if a taskhub is busy, based on load information for the partitions and on the eventhubs queue positions
/// </summary>
/// <param name="loadInformation"></param>
/// <returns>null if the hub is idle, or a string describing the current non-idle state</returns>
public async Task<string> TaskHubIsIdleAsync(Dictionary<uint, PartitionLoadInfo> loadInformation)
{
// first, check if any of the partitions have queued work or are scheduled to wake up
foreach (var kvp in loadInformation)
{
string busy = kvp.Value.IsBusy();
if (!string.IsNullOrEmpty(busy))
{
return $"P{kvp.Key:D2} {busy}";
}
}

// next, check if any of the entries are not current, in the sense that their input queue position
// does not match the latest queue position


List<long> positions = await Netherite.EventHubsTransport.EventHubsConnections.GetQueuePositionsAsync(this.eventHubsConnection, EventHubsTransport.PartitionHub).ConfigureAwait(false);

if (positions == null)
{
return "eventhubs is missing";
}

for (int i = 0; i < positions.Count; i++)
{
if (!loadInformation.TryGetValue((uint)i, out var loadInfo))
{
return $"P{i:D2} has no load information published yet";
}
if (positions[i] > loadInfo.InputQueuePosition)
{
return $"P{i:D2} has input queue position {loadInfo.InputQueuePosition} which is {positions[(int)i] - loadInfo.InputQueuePosition} behind latest position {positions[i]}";
}
}

// finally, check if we have waited long enough
foreach (var kvp in loadInformation)
{
string latencyTrend = kvp.Value.LatencyTrend;

if (!PartitionLoadInfo.IsLongIdle(latencyTrend))
{
return $"P{kvp.Key:D2} had some activity recently, latency trend is {latencyTrend}";
}
}

// we have concluded that there are no pending work items, timers, or unprocessed input queue entries
return null;
}
}
}
Loading