From 163713d80ca71695d8e6f4141afa94e413d5e1c0 Mon Sep 17 00:00:00 2001 From: xchen Date: Mon, 18 Mar 2024 17:51:45 -0700 Subject: [PATCH 001/131] define batch scrape definition model --- .../Model/Metrics/BatchScrapeDefinition.cs | 80 +++++++++++++++++++ 1 file changed, 80 insertions(+) create mode 100644 src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs diff --git a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs new file mode 100644 index 000000000..3f55d87f9 --- /dev/null +++ b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs @@ -0,0 +1,80 @@ +using System; +using System.Collections.Generic; +using GuardNet; +using Promitor.Core.Contracts; + +namespace Promitor.Core.Scraping.Configuration.Model.Metrics +{ + /// + /// Defines a batch of ScrapeDefinitions to be executed in a single request + /// Scrape definitions within a batch should share + /// 1. The same resource type + /// 2. The same Azure metric scrape target with identical dimensions + /// 3. The same time granularity + /// + public class BatchScrapeDefinition where TResourceDefinition : class, IAzureResourceDefinition + { + /// + /// Creates a new instance of the class. + /// + /// Configuration about the Azure Monitor metric to scrape + /// Configuration about the Azure Monitor metric to scrape + /// The scraping model. + /// Specify a subscription to scrape that defers from the default subscription. + /// + /// The name of the resource group containing the resource to scrape. This should contain the global + /// resource group name if none is overridden at the resource level. + /// + public BatchScrapeDefinition( + List> groupedScrapeDefinitions, + AzureMetricConfiguration azureMetricConfiguration, + Scraping scraping, + TimeSpan aggregationInterval, + string subscriptionId, + string resourceGroupName) + { + Guard.NotNull(groupedScrapeDefinitions, nameof(groupedScrapeDefinitions)); + Guard.NotNull(azureMetricConfiguration, nameof(azureMetricConfiguration)); + Guard.NotNull(scraping, nameof(scraping)); + Guard.NotNull(subscriptionId, nameof(subscriptionId)); + Guard.NotNull(resourceGroupName, nameof(resourceGroupName)); + ScrapeDefinitions = groupedScrapeDefinitions; + AggregationInterval = aggregationInterval; + Scraping = scraping; + SubscriptionId = subscriptionId; + ResourceGroupName = resourceGroupName; + } + + /// + /// A batch of scrape job definitions to be executed as a single request + /// + public List> ScrapeDefinitions { get; set; } = new List>(); + + /// + /// Configuration about the Azure Monitor metric to scrape + /// + public AzureMetricConfiguration AzureMetricConfiguration { get; } + + + /// + /// The scraping model. + /// + public Scraping Scraping { get; } + + /// + /// The Azure subscription to get the metric from. This should be used instead of using + /// the SubscriptionId from because this property will contain + /// the global subscription id if none is overridden at the resource level. + /// + public string SubscriptionId { get; } + + /// + /// The Azure resource group to get the metric from. This should be used instead of using + /// the ResourceGroupName from because this property will contain + /// the global resource group name if none is overridden at the resource level. + /// + public string ResourceGroupName { get; } + + public TimeSpan AggregationInterval{ get; } + } +} \ No newline at end of file From d0f4c734d4ea306e41deea63c11b86d53229d74f Mon Sep 17 00:00:00 2001 From: xchen Date: Tue, 19 Mar 2024 16:55:48 -0700 Subject: [PATCH 002/131] implement skeleton batch scraping flow --- .../Scheduling/ResourcesScrapingJob.cs | 32 +++++++++++++++++-- .../Model/MetricBatchScrapeConfig.cs | 8 +++++ .../Model/Metrics/BatchScrapeDefinition.cs | 12 +++---- .../Configuration/Model/MetricsDeclaration.cs | 1 + .../Interfaces/IScraper.cs | 1 + src/Promitor.Core.Scraping/Scraper.cs | 9 ++++++ 6 files changed, 54 insertions(+), 9 deletions(-) create mode 100644 src/Promitor.Core.Scraping/Configuration/Model/MetricBatchScrapeConfig.cs diff --git a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs index 091a87275..365362dae 100644 --- a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs +++ b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs @@ -11,9 +11,11 @@ using Microsoft.Extensions.Logging; using Microsoft.Extensions.Options; using Promitor.Agents.Scraper.Discovery.Interfaces; +using Promitor.Core; using Promitor.Core.Contracts; using Promitor.Core.Metrics.Interfaces; using Promitor.Core.Metrics.Sinks; +using Promitor.Core.Scraping; using Promitor.Core.Scraping.Configuration.Model; using Promitor.Core.Scraping.Configuration.Model.Metrics; using Promitor.Core.Scraping.Factories; @@ -133,7 +135,6 @@ public async Task ExecuteAsync(CancellationToken cancellationToken) try { var scrapeDefinitions = await GetAllScrapeDefinitions(cancellationToken); - await ScrapeMetrics(scrapeDefinitions, cancellationToken); } catch (OperationCanceledException) @@ -250,8 +251,19 @@ private void GetResourceScrapeDefinition(IAzureResourceDefinition resourceDefini } private async Task ScrapeMetrics(IEnumerable> scrapeDefinitions, CancellationToken cancellationToken) - { + { var tasks = new List(); + var batchScrapingEnabled = this._metricsDeclaration.MetricBatchConfig?.Enabled ?? false; + if (batchScrapingEnabled) { + var batchScrapeDefinitions = groupScrapeDefinitions(scrapeDefinitions, this._metricsDeclaration.MetricBatchConfig.MaxBatchSize, cancellationToken); + + foreach(var batchScrapeDefinition in batchScrapeDefinitions) { + var azureMetricName = batchScrapeDefinition.AzureMetricConfiguration.MetricName; + var resourceType = batchScrapeDefinition.ResourceType; + Logger.LogInformation("Batch scraping Azure Metric {AzureMetricName} for resource type {ResourceType}.", azureMetricName, resourceType); + await ScheduleLimitedConcurrencyAsyncTask(tasks, () => ScrapeMetricBatched(batchScrapeDefinition), cancellationToken); + } + } foreach (var scrapeDefinition in scrapeDefinitions) { @@ -266,6 +278,9 @@ private async Task ScrapeMetrics(IEnumerable batchScrapeDefinition) { + + } private async Task ScrapeMetric(ScrapeDefinition scrapeDefinition) { @@ -284,6 +299,7 @@ private async Task ScrapeMetric(ScrapeDefinition scrap var logAnalyticsClient = new LogAnalyticsClient(_loggerFactory, _metricsDeclaration.AzureMetadata.Cloud, tokenCredential); var scraper = _metricScraperFactory.CreateScraper(scrapeDefinition.Resource.ResourceType, _metricSinkWriter, _azureScrapingSystemMetricsPublisher, azureMonitorClient, logAnalyticsClient); + await scraper.ScrapeAsync(scrapeDefinition); } catch (Exception ex) @@ -292,6 +308,18 @@ private async Task ScrapeMetric(ScrapeDefinition scrap scrapeDefinition.PrometheusMetricDefinition.Name, scrapeDefinition.Resource.ResourceName); } } + + /// + /// groups scrape definitions based on following conditions: + /// 1. Definitions in a batch must target the same resource type + /// 2. Definitions in a batch must target the same Azure metric with identical dimensions + /// 3. Definitions in a batch must have the same time granularity + /// 4. Batch size cannot exceed configured maximum + /// + private List> groupScrapeDefinitions(IEnumerable> allScrapeDefinitions, int maxBatchSize, CancellationToken cancellationToken) + { + return null; + } /// /// Run some task work in the thread pool, but only allow a limited number of threads to go at a time diff --git a/src/Promitor.Core.Scraping/Configuration/Model/MetricBatchScrapeConfig.cs b/src/Promitor.Core.Scraping/Configuration/Model/MetricBatchScrapeConfig.cs new file mode 100644 index 000000000..9103dde1b --- /dev/null +++ b/src/Promitor.Core.Scraping/Configuration/Model/MetricBatchScrapeConfig.cs @@ -0,0 +1,8 @@ +namespace Promitor.Core.Scraping.Configuration.Model +{ + public class MetricBatchScrapeConfig + { + public bool Enabled { get; set; } + public int MaxBatchSize { get; set; } + } +} \ No newline at end of file diff --git a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs index 3f55d87f9..fbb00a0f0 100644 --- a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs +++ b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs @@ -31,7 +31,7 @@ public BatchScrapeDefinition( Scraping scraping, TimeSpan aggregationInterval, string subscriptionId, - string resourceGroupName) + ResourceType resourceType) { Guard.NotNull(groupedScrapeDefinitions, nameof(groupedScrapeDefinitions)); Guard.NotNull(azureMetricConfiguration, nameof(azureMetricConfiguration)); @@ -42,7 +42,7 @@ public BatchScrapeDefinition( AggregationInterval = aggregationInterval; Scraping = scraping; SubscriptionId = subscriptionId; - ResourceGroupName = resourceGroupName; + ResourceType = resourceType; } /// @@ -68,12 +68,10 @@ public BatchScrapeDefinition( /// public string SubscriptionId { get; } - /// - /// The Azure resource group to get the metric from. This should be used instead of using - /// the ResourceGroupName from because this property will contain - /// the global resource group name if none is overridden at the resource level. + /// + /// The Azure resource type shared by all scrape definitions in the batch /// - public string ResourceGroupName { get; } + public ResourceType ResourceType { get; } public TimeSpan AggregationInterval{ get; } } diff --git a/src/Promitor.Core.Scraping/Configuration/Model/MetricsDeclaration.cs b/src/Promitor.Core.Scraping/Configuration/Model/MetricsDeclaration.cs index 3c820975f..b566e0f21 100644 --- a/src/Promitor.Core.Scraping/Configuration/Model/MetricsDeclaration.cs +++ b/src/Promitor.Core.Scraping/Configuration/Model/MetricsDeclaration.cs @@ -6,6 +6,7 @@ namespace Promitor.Core.Scraping.Configuration.Model public class MetricsDeclaration { public AzureMetadata AzureMetadata { get; set; } + public MetricBatchScrapeConfig? MetricBatchConfig { get; set; } public MetricDefaults MetricDefaults { get; set; } = new MetricDefaults(); public List Metrics { get; set; } = new List(); } diff --git a/src/Promitor.Core.Scraping/Interfaces/IScraper.cs b/src/Promitor.Core.Scraping/Interfaces/IScraper.cs index 420957123..87b321c69 100644 --- a/src/Promitor.Core.Scraping/Interfaces/IScraper.cs +++ b/src/Promitor.Core.Scraping/Interfaces/IScraper.cs @@ -7,5 +7,6 @@ namespace Promitor.Core.Scraping.Interfaces public interface IScraper where TResourceDefinition : class, IAzureResourceDefinition { Task ScrapeAsync(ScrapeDefinition scrapeDefinition); + Task BatchScrapeAsync(BatchScrapeDefinition batchScrapeDefinition); } } \ No newline at end of file diff --git a/src/Promitor.Core.Scraping/Scraper.cs b/src/Promitor.Core.Scraping/Scraper.cs index 09be2c757..b9f2306d2 100644 --- a/src/Promitor.Core.Scraping/Scraper.cs +++ b/src/Promitor.Core.Scraping/Scraper.cs @@ -103,6 +103,15 @@ public async Task ScrapeAsync(ScrapeDefinition scrapeD } } + public async Task BatchScrapeAsync(BatchScrapeDefinition batchScrapeDefinition) + { // TODO for batch scrape logic: + // one request for all resources within the batch + // process batched result + // would the large volume of JSON be an issue? + // record telemetry + return null; + } + private const string ScrapeSuccessfulMetricDescription = "Provides an indication that the scraping of the resource was successful"; private const string ScrapeErrorMetricDescription = "Provides an indication that the scraping of the resource has failed"; From 2552197fc42c7ee533620bce2404f5b476c384b1 Mon Sep 17 00:00:00 2001 From: xchen Date: Tue, 19 Mar 2024 17:25:53 -0700 Subject: [PATCH 003/131] complete mapping logic between YAML and scraper domain --- .../v1/Mapping/V1MappingProfile.cs | 1 + .../v1/Model/MetricBatchScrapeConfigV1.cs | 21 +++++++++++++++++++ .../v1/Model/MetricsDeclarationV1.cs | 2 ++ 3 files changed, 24 insertions(+) create mode 100644 src/Promitor.Core.Scraping/Configuration/Serialization/v1/Model/MetricBatchScrapeConfigV1.cs diff --git a/src/Promitor.Core.Scraping/Configuration/Serialization/v1/Mapping/V1MappingProfile.cs b/src/Promitor.Core.Scraping/Configuration/Serialization/v1/Mapping/V1MappingProfile.cs index 9ddea7e2c..4d4d57270 100644 --- a/src/Promitor.Core.Scraping/Configuration/Serialization/v1/Mapping/V1MappingProfile.cs +++ b/src/Promitor.Core.Scraping/Configuration/Serialization/v1/Mapping/V1MappingProfile.cs @@ -14,6 +14,7 @@ public V1MappingProfile() { CreateMap(); CreateMap(); + CreateMap(); CreateMap(); CreateMap(); CreateMap(); diff --git a/src/Promitor.Core.Scraping/Configuration/Serialization/v1/Model/MetricBatchScrapeConfigV1.cs b/src/Promitor.Core.Scraping/Configuration/Serialization/v1/Model/MetricBatchScrapeConfigV1.cs new file mode 100644 index 000000000..389b4cace --- /dev/null +++ b/src/Promitor.Core.Scraping/Configuration/Serialization/v1/Model/MetricBatchScrapeConfigV1.cs @@ -0,0 +1,21 @@ +using System; +using Microsoft.Azure.Management.Monitor.Fluent.Models; + +namespace Promitor.Core.Scraping.Configuration.Serialization.v1.Model +{ + /// + /// Contains settings to scrape metrics in batched API calls + /// + public class MetricBatchScrapeConfigV1 + { + /// + /// Enable batched scraping mode for all metrics in the scraper + /// + public bool Enabled { get; set; } + + /// + /// Maximum number of resources in a batch. Azure Monitor API specifies a max limit of 50 as of March 2024 + /// + public int MaxBatchSize { get; set; } + } +} diff --git a/src/Promitor.Core.Scraping/Configuration/Serialization/v1/Model/MetricsDeclarationV1.cs b/src/Promitor.Core.Scraping/Configuration/Serialization/v1/Model/MetricsDeclarationV1.cs index 4c4e80082..df011517f 100644 --- a/src/Promitor.Core.Scraping/Configuration/Serialization/v1/Model/MetricsDeclarationV1.cs +++ b/src/Promitor.Core.Scraping/Configuration/Serialization/v1/Model/MetricsDeclarationV1.cs @@ -1,4 +1,5 @@ using System.Collections.Generic; +using Promitor.Core.Scraping.Configuration.Model; using Promitor.Core.Serialization.Enum; namespace Promitor.Core.Scraping.Configuration.Serialization.v1.Model @@ -10,6 +11,7 @@ public class MetricsDeclarationV1 { public string Version { get; set; } = SpecVersion.v1.ToString(); public AzureMetadataV1 AzureMetadata { get; set; } + public MetricBatchScrapeConfigV1? MetricBatchScrapeConfig { get; set; } public MetricDefaultsV1 MetricDefaults { get; set; } public IReadOnlyCollection Metrics { get; set; } } From 975eef7c69ebaeee1a7aeb67b73dbedffb1d4df2 Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 20 Mar 2024 17:15:11 -0700 Subject: [PATCH 004/131] start grouping logic implementation via compound keys --- .../Scheduling/ResourcesScrapingJob.cs | 11 ++++- .../Model/Metrics/BatchScrapeDefinition.cs | 43 +++---------------- .../Model/Metrics/ScrapeDefinition.cs | 9 ++++ 3 files changed, 23 insertions(+), 40 deletions(-) diff --git a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs index 365362dae..f0845f0fb 100644 --- a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs +++ b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs @@ -255,7 +255,7 @@ private async Task ScrapeMetrics(IEnumerable(); var batchScrapingEnabled = this._metricsDeclaration.MetricBatchConfig?.Enabled ?? false; if (batchScrapingEnabled) { - var batchScrapeDefinitions = groupScrapeDefinitions(scrapeDefinitions, this._metricsDeclaration.MetricBatchConfig.MaxBatchSize, cancellationToken); + var batchScrapeDefinitions = GroupScrapeDefinitions(scrapeDefinitions, this._metricsDeclaration.MetricBatchConfig.MaxBatchSize, cancellationToken); foreach(var batchScrapeDefinition in batchScrapeDefinitions) { var azureMetricName = batchScrapeDefinition.AzureMetricConfiguration.MetricName; @@ -316,11 +316,18 @@ private async Task ScrapeMetric(ScrapeDefinition scrap /// 3. Definitions in a batch must have the same time granularity /// 4. Batch size cannot exceed configured maximum /// - private List> groupScrapeDefinitions(IEnumerable> allScrapeDefinitions, int maxBatchSize, CancellationToken cancellationToken) + private List> GroupScrapeDefinitions(IEnumerable> allScrapeDefinitions, int maxBatchSize, CancellationToken cancellationToken) { + // first pass to build batches that could exceed max + Dictionary> groupedItems = items.GroupBy(item => CalculateCompoundKey(item)) + .ToDictionary(group => group.Key, group => group.ToList()); + // split to key: List + + // flatten to List return null; } + /// /// Run some task work in the thread pool, but only allow a limited number of threads to go at a time /// (unless max degree of parallelism wasn't configured, in which case mutex is null and no limit is imposed). diff --git a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs index fbb00a0f0..d745eb641 100644 --- a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs +++ b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs @@ -25,24 +25,13 @@ public class BatchScrapeDefinition where TResourceDefinitio /// The name of the resource group containing the resource to scrape. This should contain the global /// resource group name if none is overridden at the resource level. /// - public BatchScrapeDefinition( - List> groupedScrapeDefinitions, - AzureMetricConfiguration azureMetricConfiguration, - Scraping scraping, - TimeSpan aggregationInterval, - string subscriptionId, - ResourceType resourceType) + public BatchScrapeDefinition(List> groupedScrapeDefinitions, ScrapeDefinitionBatchProperties scrapeDefinitionBatchProperties) { Guard.NotNull(groupedScrapeDefinitions, nameof(groupedScrapeDefinitions)); - Guard.NotNull(azureMetricConfiguration, nameof(azureMetricConfiguration)); - Guard.NotNull(scraping, nameof(scraping)); - Guard.NotNull(subscriptionId, nameof(subscriptionId)); - Guard.NotNull(resourceGroupName, nameof(resourceGroupName)); + Guard.NotNull(groupedScrapeDefinitions, nameof(scrapeDefinitionBatchProperties)); + ScrapeDefinitions = groupedScrapeDefinitions; - AggregationInterval = aggregationInterval; - Scraping = scraping; - SubscriptionId = subscriptionId; - ResourceType = resourceType; + ScrapeDefinitionBatchProperties = scrapeDefinitionBatchProperties; } /// @@ -50,29 +39,7 @@ public BatchScrapeDefinition( /// public List> ScrapeDefinitions { get; set; } = new List>(); - /// - /// Configuration about the Azure Monitor metric to scrape - /// - public AzureMetricConfiguration AzureMetricConfiguration { get; } - - - /// - /// The scraping model. - /// - public Scraping Scraping { get; } - - /// - /// The Azure subscription to get the metric from. This should be used instead of using - /// the SubscriptionId from because this property will contain - /// the global subscription id if none is overridden at the resource level. - /// - public string SubscriptionId { get; } - - /// - /// The Azure resource type shared by all scrape definitions in the batch - /// - public ResourceType ResourceType { get; } + public ScrapeDefinitionBatchProperties ScrapeDefinitionBatchProperties { get; set; } - public TimeSpan AggregationInterval{ get; } } } \ No newline at end of file diff --git a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinition.cs b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinition.cs index 2d743d222..21c3552af 100644 --- a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinition.cs +++ b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinition.cs @@ -94,5 +94,14 @@ public ScrapeDefinition( } return AzureMetricConfiguration?.Aggregation?.Interval; } + + public ScrapeDefinitionBatchProperties buildPropertiesForBatch() { + return new ScrapeDefinitionBatchProperties( + this.AzureMetricConfiguration, + this.SubscriptionId, + this.Resource.ResourceType, + this.GetAggregationInterval + ); + } } } From 40289da0e0f1f1ab02060be198bda565450d5243 Mon Sep 17 00:00:00 2001 From: xchen Date: Thu, 21 Mar 2024 12:16:21 -0700 Subject: [PATCH 005/131] flesh out definiion batching logic --- .../Scheduling/ResourcesScrapingJob.cs | 26 +++++++--- .../Model/Metrics/BatchScrapeDefinition.cs | 1 - .../Model/Metrics/ScrapeDefinitionBatch.cs | 50 +++++++++++++++++++ 3 files changed, 70 insertions(+), 7 deletions(-) create mode 100644 src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs diff --git a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs index f0845f0fb..2fdb4ef91 100644 --- a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs +++ b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs @@ -11,6 +11,7 @@ using Microsoft.Extensions.Logging; using Microsoft.Extensions.Options; using Promitor.Agents.Scraper.Discovery.Interfaces; +using Promitor.Agents.Scraper.Validation.MetricDefinitions.ResourceTypes; using Promitor.Core; using Promitor.Core.Contracts; using Promitor.Core.Metrics.Interfaces; @@ -19,6 +20,7 @@ using Promitor.Core.Scraping.Configuration.Model; using Promitor.Core.Scraping.Configuration.Model.Metrics; using Promitor.Core.Scraping.Factories; +using Promitor.Core.Scraping.ResourceTypes; using Promitor.Integrations.Azure.Authentication; using Promitor.Integrations.AzureMonitor.Configuration; using Promitor.Integrations.LogAnalytics; @@ -318,13 +320,25 @@ private async Task ScrapeMetric(ScrapeDefinition scrap /// private List> GroupScrapeDefinitions(IEnumerable> allScrapeDefinitions, int maxBatchSize, CancellationToken cancellationToken) { - // first pass to build batches that could exceed max - Dictionary> groupedItems = items.GroupBy(item => CalculateCompoundKey(item)) - .ToDictionary(group => group.Key, group => group.ToList()); - // split to key: List + + Dictionary>> groupedScrapeDefinitions = allScrapeDefinitions.GroupBy(def => def.buildPropertiesForBatch()) + .ToDictionary(group => group.Key, group => group.ToList()) // first pass to build batches that could exceed max + .ToDictionary(group => group.Key, SplitScrapeDefinitionBatch(group.Value)) // split to right-sized batches + .SelectMany(group => group.Value.SelectMany(batch => new BatchScrapeDefinition(batch, group.Key))); // flatten + return groupedScrapeDefinitions; + } + + /// + /// splits the "raw" batch according to max batch size configured + /// + private List>> SplitScrapeDefinitionBatch(List batchToSplit, int maxBatchSize, CancellationToken cancellationToken) + { + int numNewGroups = (batchToSplit.Count - 1) / 50 + 1; - // flatten to List - return null; + // Distribute items to new groups + return Enumerable.Range(0, batchToSplit) + .Select(i => batchToSplit.Skip(i * 50).Take(50).ToList()) + .ToList(); } diff --git a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs index d745eb641..753dcea5e 100644 --- a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs +++ b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs @@ -40,6 +40,5 @@ public BatchScrapeDefinition(List> grouped public List> ScrapeDefinitions { get; set; } = new List>(); public ScrapeDefinitionBatchProperties ScrapeDefinitionBatchProperties { get; set; } - } } \ No newline at end of file diff --git a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs new file mode 100644 index 000000000..623c8b0d9 --- /dev/null +++ b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs @@ -0,0 +1,50 @@ +using Promitor.Core.Contracts; + +namespace Promitor.Core.Scraping.Configuration.Model.Metrics +{ + /// + /// Defines properties of a batch of scrape definitions + /// + public class ScrapeDefinitionBatchProperties + { + + } + + /// + /// Configuration about the Azure Monitor metric to scrape + /// + public AzureMetricConfiguration AzureMetricConfiguration { get; } + + + /// + /// The scraping model. + /// + public Scraping Scraping { get; } + + /// + /// The Azure subscription to get the metric from. This should be used instead of using + /// the SubscriptionId from because this property will contain + /// the global subscription id if none is overridden at the resource level. + /// + public string SubscriptionId { get; } + + /// + /// The Azure resource type shared by all scrape definitions in the batch + /// + public ResourceType ResourceType { get; } + + public TimeSpan AggregationInterval{ get; } + + public override int GetHashCode() + { + return this.BuildBatchHashKey().GetHashCode(); + } + + /// + /// Builds a namespaced string key to satisfy batch restrictions + /// + private string BuildBatchHashKey() + { + return ""; + } +} \ No newline at end of file From e9e385a65d397edda796e9f2120830de96b4155e Mon Sep 17 00:00:00 2001 From: xchen Date: Fri, 22 Mar 2024 22:12:31 -0700 Subject: [PATCH 006/131] implement scraper batch function + hashcode/equals for grouping --- .../Scheduling/ResourcesScrapingJob.cs | 20 +++++- .../Model/AzureMetricConfiguration.cs | 15 ++++ .../Model/Metrics/ScrapeDefinitionBatch.cs | 21 +++++- src/Promitor.Core.Scraping/Scraper.cs | 70 +++++++++++++++++-- 4 files changed, 115 insertions(+), 11 deletions(-) diff --git a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs index 2fdb4ef91..67a62bb96 100644 --- a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs +++ b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs @@ -281,7 +281,26 @@ private async Task ScrapeMetrics(IEnumerable batchScrapeDefinition) { + try + { + var resourceSubscriptionId = batchScrapeDefinition.ScrapeDefinitionBatchProperties.SubscriptionId; + var azureMonitorClient = _azureMonitorClientFactory.CreateIfNotExists(_metricsDeclaration.AzureMetadata.Cloud, _metricsDeclaration.AzureMetadata.TenantId, + resourceSubscriptionId, _metricSinkWriter, _azureScrapingSystemMetricsPublisher, _resourceMetricDefinitionMemoryCache, _configuration, + _azureMonitorIntegrationConfiguration, _azureMonitorLoggingConfiguration, _loggerFactory); + var tokenCredential = AzureAuthenticationFactory.GetTokenCredential(_metricsDeclaration.AzureMetadata.Cloud.ManagementEndpoint, _metricsDeclaration.AzureMetadata.TenantId, + AzureAuthenticationFactory.GetConfiguredAzureAuthentication(_configuration), new Uri(_metricsDeclaration.AzureMetadata.Cloud.AuthenticationEndpoint)); + var logAnalyticsClient = new LogAnalyticsClient(_loggerFactory, _metricsDeclaration.AzureMetadata.Cloud, tokenCredential); + + var scraper = _metricScraperFactory.CreateScraper(scrapeDefinition.Resource.ResourceType, _metricSinkWriter, _azureScrapingSystemMetricsPublisher, azureMonitorClient, logAnalyticsClient); + + await scraper.BatchScrapeAsync(batchScrapeDefinition); + } + catch (Exception ex) + { + Logger.LogError(ex, "Failed to scrape metric {MetricName} for resource batch {ResourceName}.", + scrapeDefinition.PrometheusMetricDefinition.Name, batchScrapeDefinition.ScrapeDefinitionBatchProperties); + } } private async Task ScrapeMetric(ScrapeDefinition scrapeDefinition) @@ -335,7 +354,6 @@ private List>> SplitScrapeD { int numNewGroups = (batchToSplit.Count - 1) / 50 + 1; - // Distribute items to new groups return Enumerable.Range(0, batchToSplit) .Select(i => batchToSplit.Skip(i * 50).Take(50).ToList()) .ToList(); diff --git a/src/Promitor.Core.Scraping/Configuration/Model/AzureMetricConfiguration.cs b/src/Promitor.Core.Scraping/Configuration/Model/AzureMetricConfiguration.cs index f1bac2974..b513cfabc 100644 --- a/src/Promitor.Core.Scraping/Configuration/Model/AzureMetricConfiguration.cs +++ b/src/Promitor.Core.Scraping/Configuration/Model/AzureMetricConfiguration.cs @@ -45,5 +45,20 @@ public class AzureMetricConfiguration } return Dimensions?.Any(dimension => dimension.Name.Equals(dimensionName, StringComparison.InvariantCultureIgnoreCase)); } + + // A unique string to represent this Azure metric and its configured dimensions + public string ToUniqueStringRepresentation() + { + StringBuilder sb = new StringBuilder(); + sb.Append(Name); + + foreach (var dimension in Dimensions) + { + sb.Append("_"); + sb.Append(dimension.ToString()); + } + + return sb.ToString(); + } } } \ No newline at end of file diff --git a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs index 623c8b0d9..58ed40521 100644 --- a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs +++ b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs @@ -1,3 +1,5 @@ +using System; +using System.Collections.Generic; using Promitor.Core.Contracts; namespace Promitor.Core.Scraping.Configuration.Model.Metrics @@ -5,7 +7,7 @@ namespace Promitor.Core.Scraping.Configuration.Model.Metrics /// /// Defines properties of a batch of scrape definitions /// - public class ScrapeDefinitionBatchProperties + public class ScrapeDefinitionBatchProperties : IEquatable { } @@ -41,10 +43,23 @@ public override int GetHashCode() } /// - /// Builds a namespaced string key to satisfy batch restrictions + /// Builds a namespaced string key to satisfy batch restrictions, in the format of + /// ___ /// private string BuildBatchHashKey() { - return ""; + return string.Join("_", [List.ofAzureMetricConfiguration.ToUniqueStringRepresentation(), SubscriptionId, ResourceType.ToString(), AggregationInterval.ToString()]); + } + + /// + /// Equality comparison override in case of hash collision + /// + public override bool Equals(object obj) + { + if (obj == null || !(obj is MyClass)) + return false; + + MyClass other = (MyClass)obj; + return this.Id == other.Id && this.Name == other.Name; } } \ No newline at end of file diff --git a/src/Promitor.Core.Scraping/Scraper.cs b/src/Promitor.Core.Scraping/Scraper.cs index b9f2306d2..45eb55eb2 100644 --- a/src/Promitor.Core.Scraping/Scraper.cs +++ b/src/Promitor.Core.Scraping/Scraper.cs @@ -104,18 +104,59 @@ public async Task ScrapeAsync(ScrapeDefinition scrapeD } public async Task BatchScrapeAsync(BatchScrapeDefinition batchScrapeDefinition) - { // TODO for batch scrape logic: - // one request for all resources within the batch - // process batched result - // would the large volume of JSON be an issue? - // record telemetry + { + // would the large volume of JSON be an issue? Can it be handled by the SDK? + if (batchScrapeDefinition == null) + { + throw new ArgumentNullException(nameof(batchScrapeDefinition)); + } + + var aggregationInterval = batchScrapeDefinition.ScrapeDefinitionBatchProperties.AggregationInterval; + if (aggregationInterval == null) + { + throw new ArgumentNullException(nameof(scrapeDefinition)); + } + + try + { + var aggregationType = batchScrapeDefinition.ScrapeDefinitionBatchProperties.AzureMetricConfiguration.Aggregation.Type; + var scrapeDefinitions = batchScrapeDefinition.ScrapeDefinitions; + var scrapedMetricResult = await BatchScrapeResourceAsync( + batchScrapeDefinition.ScrapeDefinitionBatchProperties.SubscriptionId, + batchScrapeDefinition, + aggregationType, + aggregationInterval.Value); + + foreach (int i in Enumerable.Range(0, scrapedMetricResult.Count)) + { + var scrapedMetricResult = scrapedMetricResult[i]; + var scrapeDefinition = scrapeDefinitions[i]; + LogMeasuredMetrics(scrapeDefinition, scrapedMetricResult, aggregationInterval); + + await _metricSinkWriter.ReportMetricAsync(scrapeDefinition.PrometheusMetricDefinition.Name, scrapeDefinition.PrometheusMetricDefinition.Description, scrapedMetricResult); + + await ReportScrapingOutcomeAsync(scrapeDefinition, isSuccessful: true, isBatchJob: true) ; + } + } + catch (ErrorResponseException errorResponseException) + { + HandleErrorResponseException(errorResponseException, scrapeDefinition.PrometheusMetricDefinition.Name); + + await ReportScrapingOutcomeAsync(scrapeDefinition, isSuccessful: false, isBatchJob: true); + } + catch (Exception exception) + { + Logger.LogCritical(exception, "Failed to scrape resource for metric '{MetricName}'", batchScrapeDefinition.ScrapeDefinitionBatchProperties.AzureMetricConfiguration.MetricName); + + await ReportScrapingOutcomeAsync(scrapeDefinition, isSuccessful: false, isBatchJob: true); + } return null; } private const string ScrapeSuccessfulMetricDescription = "Provides an indication that the scraping of the resource was successful"; private const string ScrapeErrorMetricDescription = "Provides an indication that the scraping of the resource has failed"; - private async Task ReportScrapingOutcomeAsync(ScrapeDefinition scrapeDefinition, bool isSuccessful) + private async Task ReportScrapingOutcomeAsync(ScrapeDefinition scrapeDefinition, bool isSuccessful, bool isBatchJob = false) { // We reset all values, by default double successfulMetricValue = 0; @@ -138,7 +179,8 @@ private async Task ReportScrapingOutcomeAsync(ScrapeDefinition ScrapeResourceAsync( TResourceDefinition resourceDefinition, AggregationType aggregationType, TimeSpan aggregationInterval); + + /// + /// Scrapes configured resource batch. Should return telemetry for all scrape definitions as a list + /// + /// Metric subscription Id + /// Contains all scrape definitions in the batch and their shared properties(like resource type) + /// Contains the resource cast to the specific resource type. + /// Aggregation for the metric to use + /// Interval that is used to aggregate metrics + protected abstract Task> BatchScrapeResourceAsync( + string subscriptionId, + BatchScrapeDefinition batchScrapeDefinition, + AggregationType aggregationType, + TimeSpan aggregationInterval); /// /// Builds the URI of the resource to scrape From d6fe4ffb3fdcc14034e57f40036ffaac0ef53f2e Mon Sep 17 00:00:00 2001 From: xchen Date: Fri, 22 Mar 2024 22:33:17 -0700 Subject: [PATCH 007/131] implement scraper batch function + hashcode/equals for grouping --- .../Configuration/Model/Metrics/ScrapeDefinitionBatch.cs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs index 58ed40521..c924ca675 100644 --- a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs +++ b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs @@ -48,7 +48,7 @@ public override int GetHashCode() /// private string BuildBatchHashKey() { - return string.Join("_", [List.ofAzureMetricConfiguration.ToUniqueStringRepresentation(), SubscriptionId, ResourceType.ToString(), AggregationInterval.ToString()]); + return string.Join("_", [List.ofAzureMetricConfiguration.ToUniqueStringRepresentation(), SubscriptionId, ResourceType.ToString(), AggregationInterval.ToString]); } /// @@ -56,10 +56,10 @@ private string BuildBatchHashKey() /// public override bool Equals(object obj) { - if (obj == null || !(obj is MyClass)) + if (obj == null || !(obj is ScrapeDefinitionBatchProperties)) return false; - MyClass other = (MyClass)obj; - return this.Id == other.Id && this.Name == other.Name; + MyClass other = (ScrapeDefinitionBatchProperties)obj; + return this.ResourceType == other.ResourceType && this.AzureMetricConfiguration.ToUniqueStringRepresentation() == other.ToUniqueStringRepresentation() && this.SubscriptionId == other.SubscriptionId && this.AggregationInterval.Equals(other.AggregationDeserializer); } } \ No newline at end of file From 6647ef140460d8b5027112a4a7ab3c6b449ea29c Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 1 May 2024 20:09:31 -0700 Subject: [PATCH 008/131] github action image build --- .github/workflows/templates-build-push-image.yml | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/.github/workflows/templates-build-push-image.yml b/.github/workflows/templates-build-push-image.yml index 60a8380f8..bf457cfc8 100644 --- a/.github/workflows/templates-build-push-image.yml +++ b/.github/workflows/templates-build-push-image.yml @@ -1,5 +1,6 @@ +name: Build and Push (Linux) on: - workflow_call: + workflow_dispatch: inputs: image_name: required: true @@ -15,6 +16,8 @@ jobs: linux: name: Build & Push (Linux) runs-on: ubuntu-latest + permissions: + packages: write steps: - name: Checkout Code uses: actions/checkout@v4 @@ -40,7 +43,7 @@ jobs: uses: docker/login-action@v3 with: registry: ghcr.io - username: tomkerkhove + username: locmai password: ${{ secrets.GITHUB_TOKEN }} - name: Build and push preview image @@ -49,5 +52,5 @@ jobs: build-args: VERSION="${{ env.artifact_full_version }}" context: ./src/ file: ./src/${{ inputs.project_name }}/Dockerfile.linux - tags: ${{ env.image_commit_uri }},${{ env.image_latest_uri }} - push: true + tags: ghcr.io/locmai/${{ env.image_commit_uri }},ghcr.io/locmai/${{ env.image_latest_uri }} + push: true \ No newline at end of file From 77640a7a1050b5a39c6972deac22e1b4fce751bf Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 1 May 2024 20:27:55 -0700 Subject: [PATCH 009/131] github action image build --- .github/workflows/templates-build-push-image.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/templates-build-push-image.yml b/.github/workflows/templates-build-push-image.yml index bf457cfc8..16f5d8440 100644 --- a/.github/workflows/templates-build-push-image.yml +++ b/.github/workflows/templates-build-push-image.yml @@ -43,7 +43,7 @@ jobs: uses: docker/login-action@v3 with: registry: ghcr.io - username: locmai + username: hkfgo password: ${{ secrets.GITHUB_TOKEN }} - name: Build and push preview image @@ -52,5 +52,5 @@ jobs: build-args: VERSION="${{ env.artifact_full_version }}" context: ./src/ file: ./src/${{ inputs.project_name }}/Dockerfile.linux - tags: ghcr.io/locmai/${{ env.image_commit_uri }},ghcr.io/locmai/${{ env.image_latest_uri }} + tags: ghcr.io/hkfgo/${{ env.image_commit_uri }},ghcr.io/hkfgo/${{ env.image_latest_uri }} push: true \ No newline at end of file From 9e7c0cdda7c617c98d8665a2c47119c9eb276c3a Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 17 Jul 2024 14:49:57 -0700 Subject: [PATCH 010/131] code batch query client initialization --- .../Extensions/AzureCloudExtensions.cs | 22 +++++++++++- src/Promitor.Core/Promitor.Core.csproj | 2 +- .../AzureMonitorQueryClient.cs | 36 ++++++++++++++++++- .../LegacyAzureMonitorClient.cs | 6 ++++ .../Promitor.Integrations.AzureMonitor.csproj | 2 +- 5 files changed, 64 insertions(+), 4 deletions(-) diff --git a/src/Promitor.Core/Extensions/AzureCloudExtensions.cs b/src/Promitor.Core/Extensions/AzureCloudExtensions.cs index 6dc2a385a..0980bd118 100644 --- a/src/Promitor.Core/Extensions/AzureCloudExtensions.cs +++ b/src/Promitor.Core/Extensions/AzureCloudExtensions.cs @@ -31,7 +31,7 @@ public static AzureEnvironment GetAzureEnvironment(this AzureCloud azureCloud) } /// - /// Get Azure environment information under legacy SDK model + /// Get Azure environment information for Azure.Monitor SDK single resource queries /// /// Microsoft Azure cloud /// Azure environment information for specified cloud @@ -49,6 +49,26 @@ public static MetricsQueryAudience DetermineMetricsClientAudience(this AzureClou } } + /// + /// Get Azure environment information for Azure.Monitor SDK batch queries + /// + /// Microsoft Azure cloud + /// Azure environment information for specified cloud + public static MetricsClientAudience DetermineMetricsClientBatchQueryAudience(this AzureCloud azureCloud) { + switch (azureCloud) + { + case AzureCloud.Global: + return MetricsClientAudience.AzurePublicCloud; + case AzureCloud.UsGov: + return MetricsClientAudience.AzureGovernment; + case AzureCloud.China: + return MetricsClientAudience.AzureChina; + default: + throw new ArgumentOutOfRangeException(nameof(azureCloud), "No Azure environment is known for"); // Azure.Monitory.Query package does not support any other sovereign regions + } + } + + public static Uri GetAzureAuthorityHost(this AzureCloud azureCloud) { switch (azureCloud) diff --git a/src/Promitor.Core/Promitor.Core.csproj b/src/Promitor.Core/Promitor.Core.csproj index 38f35f074..4a4120d4d 100644 --- a/src/Promitor.Core/Promitor.Core.csproj +++ b/src/Promitor.Core/Promitor.Core.csproj @@ -14,7 +14,7 @@ - + diff --git a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs index 17c6754f8..fabb122cc 100644 --- a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs +++ b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs @@ -30,7 +30,8 @@ public class AzureMonitorQueryClient : IAzureMonitorClient { private readonly IOptions _azureMonitorIntegrationConfiguration; private readonly TimeSpan _metricDefinitionCacheDuration = TimeSpan.FromHours(1); - private readonly MetricsQueryClient _metricsQueryClient; + private readonly MetricsQueryClient _metricsQueryClient; // for single resource queries + private readonly MetricsClient _metricsBatchQueryClient; // for batch queries private readonly IMemoryCache _resourceMetricDefinitionMemoryCache; private readonly ILogger _logger; @@ -60,6 +61,7 @@ public AzureMonitorQueryClient(AzureCloud azureCloud, string tenantId, string su _azureMonitorIntegrationConfiguration = azureMonitorIntegrationConfiguration; _logger = loggerFactory.CreateLogger(); _metricsQueryClient = CreateAzureMonitorMetricsClient(azureCloud, tenantId, subscriptionId, azureAuthenticationInfo, metricSinkWriter, azureScrapingSystemMetricsPublisher, azureMonitorLoggingConfiguration); + _metricsBatchQueryClient = CreateAzureMonitorMetricsBatchClient(azureCloud, tenantId, azureAuthenticationInfo, azureMonitorLoggingConfiguration); } /// @@ -133,6 +135,13 @@ public async Task> QueryMetricAsync(string metricName, List return measuredMetrics; } + public Task> BatchQueryMetricAsync(string metricName, List metricDimensions, PromitorMetricAggregationType aggregationType, TimeSpan aggregationInterval, + ListresourceIds, string metricFilter = null, int? metricLimit = null) + { + return null; + } + + private async Task> GetMetricDefinitionsAsync(string resourceId, string metricNamespace) { // Get cached metric definitions @@ -336,5 +345,30 @@ private MetricsQueryClient CreateAzureMonitorMetricsClient(AzureCloud azureCloud } return new MetricsQueryClient(tokenCredential, metricsQueryClientOptions); } + + /// + /// Creates authenticated client to query for metrics + /// + private MetricsClient CreateAzureMonitorMetricsBatchClient(AzureCloud azureCloud, string tenantId, AzureAuthenticationInfo azureAuthenticationInfo, IOptions azureMonitorLoggingConfiguration) { + var metricsClientOptions = new MetricsClientOptions{ + Audience = azureCloud.DetermineMetricsClientBatchQueryAudience(), + Retry = + { + Mode = RetryMode.Exponential, + MaxRetries = 3, + Delay = TimeSpan.FromSeconds(1), + MaxDelay = TimeSpan.FromSeconds(30), + } + }; // retry policy as suggested in the documentation: https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/migrate-to-batch-api?tabs=individual-response#529-throttling-errors + var tokenCredential = AzureAuthenticationFactory.GetTokenCredential(nameof(azureCloud), tenantId, azureAuthenticationInfo, azureCloud.GetAzureAuthorityHost()); + + var azureMonitorLogging = azureMonitorLoggingConfiguration.Value; + if (azureMonitorLogging.IsEnabled) + { + using AzureEventSourceListener traceListener = AzureEventSourceListener.CreateTraceLogger(EventLevel.Informational); + metricsClientOptions.Diagnostics.IsLoggingEnabled = true; + } + return new MetricsClient(new Uri(azureCloud.DetermineMetricsClientBatchQueryAudience().ToString()), tokenCredential, metricsClientOptions); + } } } \ No newline at end of file diff --git a/src/Promitor.Integrations.AzureMonitor/LegacyAzureMonitorClient.cs b/src/Promitor.Integrations.AzureMonitor/LegacyAzureMonitorClient.cs index 0a541a60a..1eed77eec 100644 --- a/src/Promitor.Integrations.AzureMonitor/LegacyAzureMonitorClient.cs +++ b/src/Promitor.Integrations.AzureMonitor/LegacyAzureMonitorClient.cs @@ -65,6 +65,12 @@ public LegacyAzureMonitorClient(AzureEnvironment azureCloud, string tenantId, st _authenticatedAzureSubscription = CreateLegacyAzureClient(azureCloud, tenantId, subscriptionId, azureAuthenticationInfo, loggerFactory, metricSinkWriter, azureScrapingSystemMetricsPublisher, azureMonitorLoggingConfiguration); } + public Task> BatchQueryMetricAsync(string metricName, List metricDimensions, PromitorMetricAggregationType aggregationType, TimeSpan aggregationInterval, + ListresourceIds, string metricFilter = null, int? metricLimit = null) + { + throw new NotSupportedException("Legacy SDK does not support batch queries. Consider migrating to the new Azure.Monitor SDK instead"); + } + /// /// Queries Azure Monitor to get the latest value for a specific metric /// diff --git a/src/Promitor.Integrations.AzureMonitor/Promitor.Integrations.AzureMonitor.csproj b/src/Promitor.Integrations.AzureMonitor/Promitor.Integrations.AzureMonitor.csproj index a6327449e..23d58e9d1 100644 --- a/src/Promitor.Integrations.AzureMonitor/Promitor.Integrations.AzureMonitor.csproj +++ b/src/Promitor.Integrations.AzureMonitor/Promitor.Integrations.AzureMonitor.csproj @@ -14,7 +14,7 @@ - + From c9bd29b10e9dc9b2f7ecdddca41e12f86bd07d1e Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 17 Jul 2024 17:16:13 -0700 Subject: [PATCH 011/131] refactor resource query subroutines as extension methods --- .../AzureMonitorQueryClient.cs | 87 +++++++++++-------- .../Extensions/AzureMonitorMetadataTasks.cs | 51 +++++++++++ .../IAzureMonitorClient.cs | 3 + 3 files changed, 106 insertions(+), 35 deletions(-) create mode 100644 src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorMetadataTasks.cs diff --git a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs index fabb122cc..46ace7b30 100644 --- a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs +++ b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs @@ -23,6 +23,7 @@ using Promitor.Core.Extensions; using Azure.Core.Diagnostics; using System.Diagnostics.Tracing; +using Promitor.Integrations.AzureMonitor.Extensions; namespace Promitor.Integrations.AzureMonitor { @@ -83,13 +84,13 @@ public async Task> QueryMetricAsync(string metricName, List // Get all metrics var startQueryingTime = DateTime.UtcNow; - var metricNamespaces = await GetMetricNamespacesAsync(resourceId); + var metricNamespaces = await _metricsQueryClient.GetAndCacheMetricNamespacesAsync(resourceId, _resourceMetricDefinitionMemoryCache, _metricDefinitionCacheDuration); var metricNamespace = metricNamespaces.SingleOrDefault(); if (metricNamespace == null) { throw new MetricNotFoundException(metricName); } - var metricsDefinitions = await GetMetricDefinitionsAsync(resourceId, metricNamespace); + var metricsDefinitions = await _metricsQueryClient.GetAndCacheMetricDefinitionsAsync(resourceId, metricNamespace, _resourceMetricDefinitionMemoryCache, _metricDefinitionCacheDuration); var metricDefinition = metricsDefinitions.SingleOrDefault(definition => definition.Name.ToUpper() == metricName.ToUpper()); if (metricDefinition == null) { @@ -135,52 +136,68 @@ public async Task> QueryMetricAsync(string metricName, List return measuredMetrics; } - public Task> BatchQueryMetricAsync(string metricName, List metricDimensions, PromitorMetricAggregationType aggregationType, TimeSpan aggregationInterval, + public async Task> BatchQueryMetricAsync(string metricName, List metricDimensions, PromitorMetricAggregationType aggregationType, TimeSpan aggregationInterval, ListresourceIds, string metricFilter = null, int? metricLimit = null) { - return null; - } - - - private async Task> GetMetricDefinitionsAsync(string resourceId, string metricNamespace) - { - // Get cached metric definitions - if (_resourceMetricDefinitionMemoryCache.TryGetValue(resourceId, out IReadOnlyList metricDefinitions)) + Guard.NotNullOrWhitespace(metricName, nameof(metricName)); + Guard.NotLessThan(resourceIds.Count(), 1, nameof(resourceIds)); + + // Get all metrics + var startQueryingTime = DateTime.UtcNow; + var metricNamespaces = await GetMetricNamespacesAsync(resourceIds.First()); + var metricNamespace = metricNamespaces.SingleOrDefault(); + if (metricNamespace == null) { - return metricDefinitions; + throw new MetricNotFoundException(metricName); } - var metricsDefinitions = new List(); - await foreach (var definition in _metricsQueryClient.GetMetricDefinitionsAsync(resourceId, metricNamespace)) + var metricsDefinitions = await GetMetricDefinitionsAsync(resourceIds.First(), metricNamespace); + var metricDefinition = metricsDefinitions.SingleOrDefault(definition => definition.Name.ToUpper() == metricName.ToUpper()); + if (metricDefinition == null) { - metricsDefinitions.Add(definition); + throw new MetricNotFoundException(metricName); } - - // Get from API and cache it - _resourceMetricDefinitionMemoryCache.Set(resourceId, metricsDefinitions, _metricDefinitionCacheDuration); - return metricsDefinitions; - } + var closestAggregationInterval = DetermineAggregationInterval(metricName, aggregationInterval, metricDefinition.MetricAvailabilities); - private async Task> GetMetricNamespacesAsync(string resourceId) - { - // Get cached metric namespaces - var namespaceKey = $"{resourceId}_namespace"; - if (_resourceMetricDefinitionMemoryCache.TryGetValue(namespaceKey, out List metricNamespaces)) + // Get the most recent metric + var metricResult = await GetRelevantMetric(resourceId, metricName, MetricAggregationTypeConverter.AsMetricAggregationType(aggregationType), closestAggregationInterval, metricFilter, metricDimensions, metricLimit, startQueryingTime); + + var seriesForMetric = metricResult.TimeSeries; + if (seriesForMetric.Count < 1) { - return metricNamespaces; - } - var foundMetricNamespaces = new List(); - await foreach (var metricNamespace in _metricsQueryClient.GetMetricNamespacesAsync(resourceId)) + throw new MetricInformationNotFoundException(metricName, "No time series was found", metricDimensions); + } + + var measuredMetrics = new List(); + foreach (var timeseries in seriesForMetric) { - foundMetricNamespaces.Add(metricNamespace.FullyQualifiedName); + // Get the most recent value for that metric, that has a finished time series + // We need to shift the time to ensure that the time series is finalized and not report invalid values + var maxTimeSeriesTime = startQueryingTime.AddMinutes(closestAggregationInterval.TotalMinutes); + + var mostRecentMetricValue = GetMostRecentMetricValue(metricName, timeseries, maxTimeSeriesTime); + + // Get the metric value according to the requested aggregation type + var requestedMetricAggregate = InterpretMetricValue(MetricAggregationTypeConverter.AsMetricAggregationType(aggregationType), mostRecentMetricValue); + try + { + var measuredMetric = metricDimensions.Count > 0 + ? MeasuredMetric.CreateForDimensions(requestedMetricAggregate, metricDimensions, timeseries) + : MeasuredMetric.CreateWithoutDimensions(requestedMetricAggregate); + measuredMetrics.Add(measuredMetric); + } + catch (MissingDimensionException e) + { + _logger.LogWarning("{MetricName} has return a time series with empty value for {Dimension} and the measurements will be dropped", metricName, e.DimensionName); + _logger.LogDebug("The violating time series has content {Details}", JsonConvert.SerializeObject(e.TimeSeries)); + } } - - // Get from API and cache it - _resourceMetricDefinitionMemoryCache.Set(namespaceKey, foundMetricNamespaces, _metricDefinitionCacheDuration); - return foundMetricNamespaces; - } + return measuredMetrics; + } + + private TimeSpan DetermineAggregationInterval(string metricName, TimeSpan requestedAggregationInterval, IReadOnlyList availableMetricPeriods) { // Get perfect match diff --git a/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorMetadataTasks.cs b/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorMetadataTasks.cs new file mode 100644 index 000000000..8687103f5 --- /dev/null +++ b/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorMetadataTasks.cs @@ -0,0 +1,51 @@ +using System; +using System.Collections.Generic; +using System.Threading.Tasks; +using Azure.Monitor.Query; +using Azure.Monitor.Query.Models; +using Microsoft.Extensions.Caching.Memory; + +namespace Promitor.Integrations.AzureMonitor.Extensions +{ + public static class AzureMonitorMetadataTasks + { + public static async Task> GetAndCacheMetricDefinitionsAsync(this MetricsQueryClient metricsQueryClient, string resourceId, string metricNamespace, IMemoryCache resourceMetricDefinitionMemoryCache, TimeSpan cacheDuration) + { + // Get cached metric definitions + if (resourceMetricDefinitionMemoryCache.TryGetValue(resourceId, out IReadOnlyList metricDefinitions)) + { + return metricDefinitions; + } + var metricsDefinitions = new List(); + await foreach (var definition in metricsQueryClient.GetMetricDefinitionsAsync(resourceId, metricNamespace)) + { + metricsDefinitions.Add(definition); + } + + // Get from API and cache it + resourceMetricDefinitionMemoryCache.Set(resourceId, metricsDefinitions, cacheDuration); + + return metricsDefinitions; + } + + public static async Task> GetAndCacheMetricNamespacesAsync(this MetricsQueryClient metricsQueryClient, string resourceId, IMemoryCache resourceMetricDefinitionMemoryCache, TimeSpan cacheDuration) + { + // Get cached metric namespaces + var namespaceKey = $"{resourceId}_namespace"; + if (resourceMetricDefinitionMemoryCache.TryGetValue(namespaceKey, out List metricNamespaces)) + { + return metricNamespaces; + } + var foundMetricNamespaces = new List(); + await foreach (var metricNamespace in metricsQueryClient.GetMetricNamespacesAsync(resourceId)) + { + foundMetricNamespaces.Add(metricNamespace.FullyQualifiedName); + } + + // Get from API and cache it + resourceMetricDefinitionMemoryCache.Set(namespaceKey, foundMetricNamespaces, cacheDuration); + + return foundMetricNamespaces; + } + } +} \ No newline at end of file diff --git a/src/Promitor.Integrations.AzureMonitor/IAzureMonitorClient.cs b/src/Promitor.Integrations.AzureMonitor/IAzureMonitorClient.cs index 743eef465..69b1a7293 100644 --- a/src/Promitor.Integrations.AzureMonitor/IAzureMonitorClient.cs +++ b/src/Promitor.Integrations.AzureMonitor/IAzureMonitorClient.cs @@ -9,5 +9,8 @@ public interface IAzureMonitorClient { public Task> QueryMetricAsync(string metricName, List metricDimensions, PromitorMetricAggregationType aggregationType, TimeSpan aggregationInterval, string resourceId, string metricFilter = null, int? metricLimit = null); + + public Task> BatchQueryMetricAsync(string metricName, List metricDimensions, PromitorMetricAggregationType aggregationType, TimeSpan aggregationInterval, + ListresourceIds, string metricFilter = null, int? metricLimit = null); } } \ No newline at end of file From 43882d60ceb9872267840e6352eaa840397a252f Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 17 Jul 2024 17:42:35 -0700 Subject: [PATCH 012/131] refactor single resource query subroutine as extension --- .../AzureMonitorQueryClient.cs | 79 +---------------- .../Extensions/AzureMonitorQueryTasks.cs | 86 +++++++++++++++++++ 2 files changed, 89 insertions(+), 76 deletions(-) create mode 100644 src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs diff --git a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs index 46ace7b30..99fef42fe 100644 --- a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs +++ b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs @@ -100,7 +100,7 @@ public async Task> QueryMetricAsync(string metricName, List var closestAggregationInterval = DetermineAggregationInterval(metricName, aggregationInterval, metricDefinition.MetricAvailabilities); // Get the most recent metric - var metricResult = await GetRelevantMetric(resourceId, metricName, MetricAggregationTypeConverter.AsMetricAggregationType(aggregationType), closestAggregationInterval, metricFilter, metricDimensions, metricLimit, startQueryingTime); + var metricResult = await _metricsQueryClient.GetRelevantMetricSingleResource(resourceId, metricName, MetricAggregationTypeConverter.AsMetricAggregationType(aggregationType), closestAggregationInterval, metricFilter, metricDimensions, metricLimit, startQueryingTime, _azureMonitorIntegrationConfiguration); var seriesForMetric = metricResult.TimeSeries; if (seriesForMetric.Count < 1) @@ -144,7 +144,7 @@ public async Task> BatchQueryMetricAsync(string metricName, // Get all metrics var startQueryingTime = DateTime.UtcNow; - var metricNamespaces = await GetMetricNamespacesAsync(resourceIds.First()); + var metricNamespaces = await _metricsQueryClient.GetMetricNamespacesAsync(resourceIds.First()); var metricNamespace = metricNamespaces.SingleOrDefault(); if (metricNamespace == null) { @@ -195,9 +195,7 @@ public async Task> BatchQueryMetricAsync(string metricName, return measuredMetrics; } - - - + private TimeSpan DetermineAggregationInterval(string metricName, TimeSpan requestedAggregationInterval, IReadOnlyList availableMetricPeriods) { // Get perfect match @@ -237,48 +235,6 @@ private static TimeSpan GetClosestAggregationInterval(TimeSpan requestedAggregat return closestAggregationInterval; } - private async Task GetRelevantMetric(string resourceId, string metricName, MetricAggregationType metricAggregation, TimeSpan metricInterval, - string metricFilter, List metricDimensions, int? metricLimit, DateTime recordDateTime) - { - MetricsQueryOptions queryOptions; - var querySizeLimit = metricLimit ?? Defaults.MetricDefaults.Limit; - var historyStartingFromInHours = _azureMonitorIntegrationConfiguration.Value.History.StartingFromInHours; - var filter = BuildFilter(metricDimensions, metricFilter); - - if (!string.IsNullOrEmpty(filter)) - { - queryOptions = new MetricsQueryOptions { - Aggregations = { - metricAggregation - }, - Granularity = metricInterval, - Filter = filter, - Size = querySizeLimit, - TimeRange= new QueryTimeRange(new DateTimeOffset(recordDateTime.AddHours(-historyStartingFromInHours)), new DateTimeOffset(recordDateTime)) - }; - } - else - { - queryOptions = new MetricsQueryOptions { - Aggregations= { - metricAggregation - }, - Granularity = metricInterval, - Size = querySizeLimit, - TimeRange= new QueryTimeRange(new DateTimeOffset(recordDateTime.AddHours(-historyStartingFromInHours)), new DateTimeOffset(recordDateTime)) - }; - } - - var metricsQueryResponse = await _metricsQueryClient.QueryResourceAsync(resourceId, [metricName], queryOptions); - var relevantMetric = metricsQueryResponse.Value.Metrics.SingleOrDefault(var => var.Name.ToUpper() == metricName.ToUpper()); - if (relevantMetric == null) - { - throw new MetricNotFoundException(metricName); - } - - return relevantMetric; - } - private MetricValue GetMostRecentMetricValue(string metricName, MetricTimeSeriesElement timeSeries, DateTimeOffset recordDateTime) { var relevantMetricValue = timeSeries.Values.Where(metricValue => metricValue.TimeStamp < recordDateTime) @@ -312,35 +268,6 @@ private MetricValue GetMostRecentMetricValue(string metricName, MetricTimeSeries throw new Exception($"Unable to determine the metrics value for aggregator '{metricAggregation}'"); } } - - private static string BuildFilter(List metricDimensions, string metricFilter) - { - var filterDictionary = new Dictionary(); - metricDimensions.ForEach(metricDimension => filterDictionary.Add(metricDimension, "'*'")); - - if (string.IsNullOrWhiteSpace(metricFilter) == false) { - var filterConditions = metricFilter.Split(" and ").ToList(); - foreach (string condition in filterConditions) - { - string[] parts = condition.Split(" eq ", StringSplitOptions.None); - if (filterDictionary.ContainsKey(parts[0])) - { - filterDictionary[parts[0]] = parts[1]; - } - else - { - filterDictionary.Add(parts[0].Trim(), parts[1]); - } - } - } - - if (filterDictionary.Count > 0) - { - return string.Join(" and ", filterDictionary.Select(kvp => $"{kvp.Key} eq {kvp.Value}")); - } - return null; - } - /// /// Creates authenticated client to query for metrics /// diff --git a/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs b/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs new file mode 100644 index 000000000..a1e01d47e --- /dev/null +++ b/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs @@ -0,0 +1,86 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading.Tasks; +using Azure.Monitor.Query; +using Azure.Monitor.Query.Models; +using Microsoft.Extensions.Options; +using Promitor.Core; +using Promitor.Integrations.AzureMonitor.Configuration; +using Promitor.Integrations.AzureMonitor.Exceptions; + +namespace Promitor.Integrations.AzureMonitor.Extensions +{ + public static class AzureMonitorQueryTasks + { + public static async Task GetRelevantMetricSingleResource(this MetricsQueryClient metricsQueryClient, string resourceId, string metricName, MetricAggregationType metricAggregation, TimeSpan metricInterval, + string metricFilter, List metricDimensions, int? metricLimit, DateTime recordDateTime, IOptions azureMonitorIntegrationConfiguration) + { + MetricsQueryOptions queryOptions; + var querySizeLimit = metricLimit ?? Defaults.MetricDefaults.Limit; + var historyStartingFromInHours = azureMonitorIntegrationConfiguration.Value.History.StartingFromInHours; + var filter = BuildFilter(metricDimensions, metricFilter); + + if (!string.IsNullOrEmpty(filter)) + { + queryOptions = new MetricsQueryOptions { + Aggregations = { + metricAggregation + }, + Granularity = metricInterval, + Filter = filter, + Size = querySizeLimit, + TimeRange= new QueryTimeRange(new DateTimeOffset(recordDateTime.AddHours(-historyStartingFromInHours)), new DateTimeOffset(recordDateTime)) + }; + } + else + { + queryOptions = new MetricsQueryOptions { + Aggregations= { + metricAggregation + }, + Granularity = metricInterval, + Size = querySizeLimit, + TimeRange= new QueryTimeRange(new DateTimeOffset(recordDateTime.AddHours(-historyStartingFromInHours)), new DateTimeOffset(recordDateTime)) + }; + } + + var metricsQueryResponse = await metricsQueryClient.QueryResourceAsync(resourceId, [metricName], queryOptions); + var relevantMetric = metricsQueryResponse.Value.Metrics.SingleOrDefault(var => var.Name.ToUpper() == metricName.ToUpper()); + if (relevantMetric == null) + { + throw new MetricNotFoundException(metricName); + } + + return relevantMetric; + } + + private static string BuildFilter(List metricDimensions, string metricFilter) + { + var filterDictionary = new Dictionary(); + metricDimensions.ForEach(metricDimension => filterDictionary.Add(metricDimension, "'*'")); + + if (string.IsNullOrWhiteSpace(metricFilter) == false) { + var filterConditions = metricFilter.Split(" and ").ToList(); + foreach (string condition in filterConditions) + { + string[] parts = condition.Split(" eq ", StringSplitOptions.None); + if (filterDictionary.ContainsKey(parts[0])) + { + filterDictionary[parts[0]] = parts[1]; + } + else + { + filterDictionary.Add(parts[0].Trim(), parts[1]); + } + } + } + + if (filterDictionary.Count > 0) + { + return string.Join(" and ", filterDictionary.Select(kvp => $"{kvp.Key} eq {kvp.Value}")); + } + return null; + } + } +} \ No newline at end of file From ff143679fefa334abe9d26629ae8ca9c910e94fc Mon Sep 17 00:00:00 2001 From: xchen Date: Fri, 19 Jul 2024 16:06:43 -0700 Subject: [PATCH 013/131] code batch query execution and result processing --- .../AzureMonitorQueryClient.cs | 58 ++++++------------- .../Extensions/AzureMonitorQueryTasks.cs | 52 +++++++++++++++-- 2 files changed, 66 insertions(+), 44 deletions(-) diff --git a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs index 99fef42fe..1ce439d39 100644 --- a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs +++ b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs @@ -6,7 +6,6 @@ using Microsoft.Extensions.Caching.Memory; using Microsoft.Extensions.Logging; using Microsoft.Extensions.Options; -using Promitor.Core; using Promitor.Core.Metrics; using Promitor.Core.Metrics.Interfaces; using Promitor.Core.Metrics.Sinks; @@ -102,38 +101,7 @@ public async Task> QueryMetricAsync(string metricName, List // Get the most recent metric var metricResult = await _metricsQueryClient.GetRelevantMetricSingleResource(resourceId, metricName, MetricAggregationTypeConverter.AsMetricAggregationType(aggregationType), closestAggregationInterval, metricFilter, metricDimensions, metricLimit, startQueryingTime, _azureMonitorIntegrationConfiguration); - var seriesForMetric = metricResult.TimeSeries; - if (seriesForMetric.Count < 1) - { - throw new MetricInformationNotFoundException(metricName, "No time series was found", metricDimensions); - } - - var measuredMetrics = new List(); - foreach (var timeseries in seriesForMetric) - { - // Get the most recent value for that metric, that has a finished time series - // We need to shift the time to ensure that the time series is finalized and not report invalid values - var maxTimeSeriesTime = startQueryingTime.AddMinutes(closestAggregationInterval.TotalMinutes); - - var mostRecentMetricValue = GetMostRecentMetricValue(metricName, timeseries, maxTimeSeriesTime); - - // Get the metric value according to the requested aggregation type - var requestedMetricAggregate = InterpretMetricValue(MetricAggregationTypeConverter.AsMetricAggregationType(aggregationType), mostRecentMetricValue); - try - { - var measuredMetric = metricDimensions.Count > 0 - ? MeasuredMetric.CreateForDimensions(requestedMetricAggregate, metricDimensions, timeseries) - : MeasuredMetric.CreateWithoutDimensions(requestedMetricAggregate); - measuredMetrics.Add(measuredMetric); - } - catch (MissingDimensionException e) - { - _logger.LogWarning("{MetricName} has return a time series with empty value for {Dimension} and the measurements will be dropped", metricName, e.DimensionName); - _logger.LogDebug("The violating time series has content {Details}", JsonConvert.SerializeObject(e.TimeSeries)); - } - } - - return measuredMetrics; + return ProcessMetricResult(metricResult, metricName, startQueryingTime, closestAggregationInterval, aggregationType, metricDimensions); } public async Task> BatchQueryMetricAsync(string metricName, List metricDimensions, PromitorMetricAggregationType aggregationType, TimeSpan aggregationInterval, @@ -142,15 +110,15 @@ public async Task> BatchQueryMetricAsync(string metricName, Guard.NotNullOrWhitespace(metricName, nameof(metricName)); Guard.NotLessThan(resourceIds.Count(), 1, nameof(resourceIds)); - // Get all metrics + // Get all metrics var startQueryingTime = DateTime.UtcNow; - var metricNamespaces = await _metricsQueryClient.GetMetricNamespacesAsync(resourceIds.First()); + var metricNamespaces = await _metricsQueryClient.GetAndCacheMetricNamespacesAsync(resourceIds.First(), _resourceMetricDefinitionMemoryCache, _metricDefinitionCacheDuration); var metricNamespace = metricNamespaces.SingleOrDefault(); if (metricNamespace == null) { throw new MetricNotFoundException(metricName); } - var metricsDefinitions = await GetMetricDefinitionsAsync(resourceIds.First(), metricNamespace); + var metricsDefinitions = await _metricsQueryClient.GetAndCacheMetricDefinitionsAsync(resourceIds.First(), metricNamespace, _resourceMetricDefinitionMemoryCache, _metricDefinitionCacheDuration); var metricDefinition = metricsDefinitions.SingleOrDefault(definition => definition.Name.ToUpper() == metricName.ToUpper()); if (metricDefinition == null) { @@ -160,8 +128,20 @@ public async Task> BatchQueryMetricAsync(string metricName, var closestAggregationInterval = DetermineAggregationInterval(metricName, aggregationInterval, metricDefinition.MetricAvailabilities); // Get the most recent metric - var metricResult = await GetRelevantMetric(resourceId, metricName, MetricAggregationTypeConverter.AsMetricAggregationType(aggregationType), closestAggregationInterval, metricFilter, metricDimensions, metricLimit, startQueryingTime); + var metricResultsList = await _metricsBatchQueryClient.GetRelevantMetricForResourçes(resourceIds, metricName, metricNamespace, MetricAggregationTypeConverter.AsMetricAggregationType(aggregationType), closestAggregationInterval, metricFilter, metricDimensions, metricLimit, startQueryingTime, _azureMonitorIntegrationConfiguration); + //TODO: This is potentially a lot of results to process in a single thread. Think of ways to utilize additional parallelism + return metricResultsList + .Select(metricResult => ProcessMetricResult(metricResult, metricName, startQueryingTime, closestAggregationInterval, aggregationType, metricDimensions)) + .SelectMany(measureMetricsList => measureMetricsList) + .ToList(); + } + + /// + /// Process metrics query response as time series values using the Promitor data model(MeasuredMetric) + /// + private List ProcessMetricResult(MetricResult metricResult, string metricName, DateTime startQueryingTime, TimeSpan closestAggregationInterval, PromitorMetricAggregationType aggregationType, List metricDimensions) + { var seriesForMetric = metricResult.TimeSeries; if (seriesForMetric.Count < 1) { @@ -290,8 +270,8 @@ private MetricsQueryClient CreateAzureMonitorMetricsClient(AzureCloud azureCloud return new MetricsQueryClient(tokenCredential, metricsQueryClientOptions); } - /// - /// Creates authenticated client to query for metrics + /// + /// Creates authenticated client for metrics batch queries /// private MetricsClient CreateAzureMonitorMetricsBatchClient(AzureCloud azureCloud, string tenantId, AzureAuthenticationInfo azureAuthenticationInfo, IOptions azureMonitorLoggingConfiguration) { var metricsClientOptions = new MetricsClientOptions{ diff --git a/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs b/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs index a1e01d47e..efc7bb774 100644 --- a/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs +++ b/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs @@ -2,6 +2,7 @@ using System.Collections.Generic; using System.Linq; using System.Threading.Tasks; +using Azure.Core; using Azure.Monitor.Query; using Azure.Monitor.Query.Models; using Microsoft.Extensions.Options; @@ -46,13 +47,43 @@ public static async Task GetRelevantMetricSingleResource(this Metr } var metricsQueryResponse = await metricsQueryClient.QueryResourceAsync(resourceId, [metricName], queryOptions); - var relevantMetric = metricsQueryResponse.Value.Metrics.SingleOrDefault(var => var.Name.ToUpper() == metricName.ToUpper()); - if (relevantMetric == null) + return GetRelevantMetricResultOrThrow(metricsQueryResponse.Value, metricName); + } + + public static async Task> GetRelevantMetricForResourçes(this MetricsClient metricsClient, List resourceIds, string metricName, string metricNamespace, MetricAggregationType metricAggregation, TimeSpan metricInterval, + string metricFilter, List metricDimensions, int? metricLimit, DateTime recordDateTime, IOptions azureMonitorIntegrationConfiguration) + { + MetricsQueryResourcesOptions queryOptions; + var querySizeLimit = metricLimit ?? Defaults.MetricDefaults.Limit; + var historyStartingFromInHours = azureMonitorIntegrationConfiguration.Value.History.StartingFromInHours; + var filter = BuildFilter(metricDimensions, metricFilter); + List resourceIdentifiers = resourceIds.Select(id => new ResourceIdentifier(id)).ToList(); + + if (!string.IsNullOrEmpty(filter)) { - throw new MetricNotFoundException(metricName); + queryOptions = new MetricsQueryResourcesOptions { + Aggregations = { metricAggregation.ToString() }, + Granularity = metricInterval, + Filter = filter, + Size = querySizeLimit, + TimeRange= new QueryTimeRange(new DateTimeOffset(recordDateTime.AddHours(-historyStartingFromInHours)), new DateTimeOffset(recordDateTime)) + }; + } + else + { + queryOptions = new MetricsQueryResourcesOptions { + Aggregations = { metricAggregation.ToString() }, + Granularity = metricInterval, + Size = querySizeLimit, + TimeRange= new QueryTimeRange(new DateTimeOffset(recordDateTime.AddHours(-historyStartingFromInHours)), new DateTimeOffset(recordDateTime)) + }; } - - return relevantMetric; + + var metricsBatchQueryResponse = await metricsClient.QueryResourcesAsync(resourceIdentifiers, [metricName], metricNamespace, queryOptions); + var metricsQueryResults = metricsBatchQueryResponse.Value; + return metricsQueryResults.Values + .Select(result => GetRelevantMetricResultOrThrow(result, metricName)) + .ToList(); } private static string BuildFilter(List metricDimensions, string metricFilter) @@ -82,5 +113,16 @@ private static string BuildFilter(List metricDimensions, string metricFi } return null; } + + private static MetricResult GetRelevantMetricResultOrThrow(MetricsQueryResult metricsQueryResult, string metricName) + { + var relevantMetric = metricsQueryResult.Metrics.SingleOrDefault(var => var.Name.ToUpper() == metricName.ToUpper()); + if (relevantMetric == null) + { + throw new MetricNotFoundException(metricName); + } + + return relevantMetric; + } } } \ No newline at end of file From 3e3ef54ad6c66cb7bdfc67e2fe5a9361bb1abb3a Mon Sep 17 00:00:00 2001 From: xchen Date: Mon, 29 Jul 2024 23:06:26 -0700 Subject: [PATCH 014/131] make some compile errors go away --- .../Scheduling/ResourcesScrapingJob.cs | 27 ++-- .../Model/Metrics/BatchScrapeDefinition.cs | 8 +- .../Model/Metrics/ScrapeDefinition.cs | 5 +- .../Model/Metrics/ScrapeDefinitionBatch.cs | 119 ++++++++++++------ 4 files changed, 99 insertions(+), 60 deletions(-) diff --git a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs index e6b332d79..9db0177bd 100644 --- a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs +++ b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs @@ -261,8 +261,8 @@ private async Task ScrapeMetrics(IEnumerable ScrapeMetricBatched(batchScrapeDefinition), cancellationToken); } @@ -288,12 +288,13 @@ private async Task ScrapeMetricBatched(BatchScrapeDefinition scrap private List> GroupScrapeDefinitions(IEnumerable> allScrapeDefinitions, int maxBatchSize, CancellationToken cancellationToken) { - Dictionary>> groupedScrapeDefinitions = allScrapeDefinitions.GroupBy(def => def.buildPropertiesForBatch()) - .ToDictionary(group => group.Key, group => group.ToList()) // first pass to build batches that could exceed max - .ToDictionary(group => group.Key, SplitScrapeDefinitionBatch(group.Value)) // split to right-sized batches - .SelectMany(group => group.Value.SelectMany(batch => new BatchScrapeDefinition(batch, group.Key))); // flatten - return groupedScrapeDefinitions; + return allScrapeDefinitions.GroupBy(def => def.buildPropertiesForBatch()) + .ToDictionary(group => group.Key, group => group.ToList()) // first pass to build batches that could exceed max + .ToDictionary(group => group.Key, group => SplitScrapeDefinitionBatch(group.Value, maxBatchSize, cancellationToken)) // split to right-sized batches + .SelectMany(group => group.Value.Select(batch => new BatchScrapeDefinition(group.Key, batch))) + .ToList(); // flatten } /// /// splits the "raw" batch according to max batch size configured /// - private List>> SplitScrapeDefinitionBatch(List batchToSplit, int maxBatchSize, CancellationToken cancellationToken) + private List>> SplitScrapeDefinitionBatch(List> batchToSplit, int maxBatchSize, CancellationToken cancellationToken) { int numNewGroups = (batchToSplit.Count - 1) / 50 + 1; - return Enumerable.Range(0, batchToSplit) + return Enumerable.Range(0, numNewGroups) .Select(i => batchToSplit.Skip(i * 50).Take(50).ToList()) .ToList(); } diff --git a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs index 753dcea5e..b49e6a34e 100644 --- a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs +++ b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs @@ -25,13 +25,13 @@ public class BatchScrapeDefinition where TResourceDefinitio /// The name of the resource group containing the resource to scrape. This should contain the global /// resource group name if none is overridden at the resource level. /// - public BatchScrapeDefinition(List> groupedScrapeDefinitions, ScrapeDefinitionBatchProperties scrapeDefinitionBatchProperties) + public BatchScrapeDefinition(ScrapeDefinitionBatchProperties scrapeDefinitionBatchProperties, List> groupedScrapeDefinitions) { - Guard.NotNull(groupedScrapeDefinitions, nameof(groupedScrapeDefinitions)); Guard.NotNull(groupedScrapeDefinitions, nameof(scrapeDefinitionBatchProperties)); - - ScrapeDefinitions = groupedScrapeDefinitions; + Guard.NotNull(groupedScrapeDefinitions, nameof(groupedScrapeDefinitions)); + ScrapeDefinitionBatchProperties = scrapeDefinitionBatchProperties; + ScrapeDefinitions = groupedScrapeDefinitions; } /// diff --git a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinition.cs b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinition.cs index 21c3552af..2e4ddb1c1 100644 --- a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinition.cs +++ b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinition.cs @@ -98,9 +98,10 @@ public ScrapeDefinition( public ScrapeDefinitionBatchProperties buildPropertiesForBatch() { return new ScrapeDefinitionBatchProperties( this.AzureMetricConfiguration, - this.SubscriptionId, + this.PrometheusMetricDefinition, this.Resource.ResourceType, - this.GetAggregationInterval + this.Scraping, + this.SubscriptionId ); } } diff --git a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs index c924ca675..c8ab95a03 100644 --- a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs +++ b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs @@ -1,5 +1,6 @@ using System; using System.Collections.Generic; +using GuardNet; using Promitor.Core.Contracts; namespace Promitor.Core.Scraping.Configuration.Model.Metrics @@ -9,57 +10,93 @@ namespace Promitor.Core.Scraping.Configuration.Model.Metrics /// public class ScrapeDefinitionBatchProperties : IEquatable { + /// Configuration about the Azure Monitor metric to scrape + /// The details of the prometheus metric that will be created. + /// The scraping model. + /// The resource to scrape. + /// Specify a subscription to scrape that defers from the default subscription. + /// + /// The name of the resource group containing the resource to scrape. This should contain the global + /// resource group name if none is overridden at the resource level. + /// + public ScrapeDefinitionBatchProperties( + AzureMetricConfiguration azureMetricConfiguration, + PrometheusMetricDefinition prometheusMetricDefinition, + ResourceType resourceType, + Scraping scraping, + string subscriptionId) + { + Guard.NotNull(azureMetricConfiguration, nameof(azureMetricConfiguration)); + Guard.NotNull(prometheusMetricDefinition, nameof(prometheusMetricDefinition)); + Guard.NotNull(scraping, nameof(scraping)); + Guard.NotNull(subscriptionId, nameof(subscriptionId)); - } + AzureMetricConfiguration = azureMetricConfiguration; + PrometheusMetricDefinition = prometheusMetricDefinition; + Scraping = scraping; + SubscriptionId = subscriptionId; + ResourceType = resourceType; + } - /// - /// Configuration about the Azure Monitor metric to scrape - /// - public AzureMetricConfiguration AzureMetricConfiguration { get; } + /// + /// Configuration about the Azure Monitor metric to scrape + /// + public AzureMetricConfiguration AzureMetricConfiguration { get; } + /// + /// The details of the prometheus metric that will be created. + /// + public PrometheusMetricDefinition PrometheusMetricDefinition { get; } - /// - /// The scraping model. - /// - public Scraping Scraping { get; } + /// + /// The scraping model. + /// + public Scraping Scraping { get; } - /// - /// The Azure subscription to get the metric from. This should be used instead of using - /// the SubscriptionId from because this property will contain - /// the global subscription id if none is overridden at the resource level. - /// - public string SubscriptionId { get; } + /// + /// The Azure subscription to get the metric from. This should be used instead of using + /// the SubscriptionId from because this property will contain + /// the global subscription id if none is overridden at the resource level. + /// + public string SubscriptionId { get; } - /// - /// The Azure resource type shared by all scrape definitions in the batch - /// - public ResourceType ResourceType { get; } + /// + /// The Azure resource type shared by all scrape definitions in the batch + /// + public ResourceType ResourceType { get; } - public TimeSpan AggregationInterval{ get; } + public TimeSpan AggregationInterval{ get; } - public override int GetHashCode() - { - return this.BuildBatchHashKey().GetHashCode(); - } + public TimeSpan? GetAggregationInterval() + { + return AzureMetricConfiguration?.Aggregation?.Interval; + } - /// - /// Builds a namespaced string key to satisfy batch restrictions, in the format of - /// ___ - /// - private string BuildBatchHashKey() - { - return string.Join("_", [List.ofAzureMetricConfiguration.ToUniqueStringRepresentation(), SubscriptionId, ResourceType.ToString(), AggregationInterval.ToString]); - } + public override int GetHashCode() + { + return this.BuildBatchHashKey().GetHashCode(); + } - /// - /// Equality comparison override in case of hash collision - /// - public override bool Equals(object obj) - { - if (obj == null || !(obj is ScrapeDefinitionBatchProperties)) - return false; + /// + /// Builds a namespaced string key to satisfy batch restrictions, in the format of + /// ___ + /// + private string BuildBatchHashKey() + { + return string.Join("_", [List.ofAzureMetricConfiguration.ToUniqueStringRepresentation(), SubscriptionId, ResourceType.ToString(), AggregationInterval.ToString]); + } + + /// + /// Equality comparison override in case of hash collision + /// + public bool Equals(ScrapeDefinitionBatchProperties obj) + { + if (obj == null || !(obj is ScrapeDefinitionBatchProperties)) + return false; + + ScrapeDefinitionBatchProperties other = (ScrapeDefinitionBatchProperties)obj; + return this.ResourceType == other.ResourceType && this.AzureMetricConfiguration.ToUniqueStringRepresentation() == other.ToUniqueStringRepresentation() && this.SubscriptionId == other.SubscriptionId && this.AggregationInterval.Equals(other.AggregationDeserializer); + } - MyClass other = (ScrapeDefinitionBatchProperties)obj; - return this.ResourceType == other.ResourceType && this.AzureMetricConfiguration.ToUniqueStringRepresentation() == other.ToUniqueStringRepresentation() && this.SubscriptionId == other.SubscriptionId && this.AggregationInterval.Equals(other.AggregationDeserializer); } } \ No newline at end of file From e290f9041c2fd8c678c39b5a9831f02d5ea2d7ab Mon Sep 17 00:00:00 2001 From: xchen Date: Mon, 29 Jul 2024 23:07:10 -0700 Subject: [PATCH 015/131] make some compile errors go away --- src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs index 9db0177bd..e628bd0f8 100644 --- a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs +++ b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs @@ -301,7 +301,7 @@ private async Task ScrapeMetricBatched(BatchScrapeDefinition Date: Wed, 31 Jul 2024 16:15:44 -0700 Subject: [PATCH 016/131] record batch size as histogram metric --- src/Promitor.Core.Scraping/Scraper.cs | 16 ++++++++++++++-- .../AggregatedSystemMetricsPublisher.cs | 18 ++++++++++++++++++ .../IAzureScrapingSystemMetricsPublisher.cs | 9 +++++++++ .../Interfaces/ISystemMetricsPublisher.cs | 10 ++++++++++ .../Metrics/Interfaces/ISystemMetricsSink.cs | 10 ++++++++++ src/Promitor.Core/RuntimeMetricNames.cs | 1 + .../AzureScrapingSystemMetricsPublisher.cs | 9 +++++++++ .../Collectors/PrometheusSystemMetricsSink.cs | 18 ++++++++++++++++++ 8 files changed, 89 insertions(+), 2 deletions(-) diff --git a/src/Promitor.Core.Scraping/Scraper.cs b/src/Promitor.Core.Scraping/Scraper.cs index 5833aa266..2181a9c93 100644 --- a/src/Promitor.Core.Scraping/Scraper.cs +++ b/src/Promitor.Core.Scraping/Scraper.cs @@ -156,8 +156,9 @@ public async Task BatchScrapeAsync(BatchScrapeDefinition scrapeDefinition, bool isSuccessful, bool isBatchJob = false) + private async Task ReportScrapingOutcomeAsync(ScrapeDefinition scrapeDefinition, bool isSuccessful, int batchSize = 0) { // We reset all values, by default double successfulMetricValue = 0; @@ -181,12 +182,23 @@ private async Task ReportScrapingOutcomeAsync(ScrapeDefinition 0) + { + await AzureScrapingSystemMetricsPublisher.WriteHistogramMeasurementAsync(RuntimeMetricNames.BatchSize, BatvhSizeMetricDescription, batchSize, labels); + labels.Add("is_batch", "1"); + } else { + labels.Add("is_batch", "0"); + } // Report! await AzureScrapingSystemMetricsPublisher.WriteGaugeMeasurementAsync(RuntimeMetricNames.ScrapeSuccessful, ScrapeSuccessfulMetricDescription, successfulMetricValue, labels); await AzureScrapingSystemMetricsPublisher.WriteGaugeMeasurementAsync(RuntimeMetricNames.ScrapeError, ScrapeErrorMetricDescription, unsuccessfulMetricValue, labels); + + if (batchSize > 0) + { + await AzureScrapingSystemMetricsPublisher.WriteHistogramMeasurementAsync(RuntimeMetricNames.BatchSize, BatvhSizeMetricDescription, batchSize, labels); + } } private void LogMeasuredMetrics(ScrapeDefinition scrapeDefinition, ScrapeResult scrapedMetricResult, TimeSpan? aggregationInterval) diff --git a/src/Promitor.Core/Metrics/AggregatedSystemMetricsPublisher.cs b/src/Promitor.Core/Metrics/AggregatedSystemMetricsPublisher.cs index 597ec1ed9..735a39be2 100644 --- a/src/Promitor.Core/Metrics/AggregatedSystemMetricsPublisher.cs +++ b/src/Promitor.Core/Metrics/AggregatedSystemMetricsPublisher.cs @@ -30,5 +30,23 @@ public async Task WriteGaugeMeasurementAsync (string name, string description, d await metricCollector.WriteGaugeMeasurementAsync(name, description, value, labels, includeTimestamp); } } + + public async Task WriteHistogramMeasurementAsync(string name, string description, double value, Dictionary labels, bool includeTimestamp) + { + if (_metricSinks == null) + { + return; + } + + foreach (var metricCollector in _metricSinks) + { + if (metricCollector == null) + { + continue; + } + + await metricCollector.WriteHistogramMeasurementAsync(name, description, value, labels, includeTimestamp); + } + } } } diff --git a/src/Promitor.Core/Metrics/Interfaces/IAzureScrapingSystemMetricsPublisher.cs b/src/Promitor.Core/Metrics/Interfaces/IAzureScrapingSystemMetricsPublisher.cs index e723c765f..42110cc2c 100644 --- a/src/Promitor.Core/Metrics/Interfaces/IAzureScrapingSystemMetricsPublisher.cs +++ b/src/Promitor.Core/Metrics/Interfaces/IAzureScrapingSystemMetricsPublisher.cs @@ -13,5 +13,14 @@ public interface IAzureScrapingSystemMetricsPublisher : ISystemMetricsPublisher /// New measured value /// Labels that are applicable for this measurement Task WriteGaugeMeasurementAsync(string name, string description, double value, Dictionary labels); + + /// + /// Records a histogram value + /// + /// Name of the metric + /// Description of the metric + /// New measured value + /// Labels that are applicable for this measurement + Task WriteHistogramMeasurementAsync(string name, string description, double value, Dictionary labels); } } \ No newline at end of file diff --git a/src/Promitor.Core/Metrics/Interfaces/ISystemMetricsPublisher.cs b/src/Promitor.Core/Metrics/Interfaces/ISystemMetricsPublisher.cs index d4e5a07b2..55228c16d 100644 --- a/src/Promitor.Core/Metrics/Interfaces/ISystemMetricsPublisher.cs +++ b/src/Promitor.Core/Metrics/Interfaces/ISystemMetricsPublisher.cs @@ -14,5 +14,15 @@ public interface ISystemMetricsPublisher /// Labels that are applicable for this measurement /// Indication whether or not a timestamp should be reported Task WriteGaugeMeasurementAsync(string name, string description, double value, Dictionary labels, bool includeTimestamp); + + /// + /// Records a histogram measurement + /// + /// Name of the metric + /// Description of the metric + /// New measured value + /// Labels that are applicable for this measurement + /// Indication whether or not a timestamp should be reported + Task WriteHistogramMeasurementAsync(string name, string description, double value, Dictionary labels, bool includeTimestamp); } } diff --git a/src/Promitor.Core/Metrics/Interfaces/ISystemMetricsSink.cs b/src/Promitor.Core/Metrics/Interfaces/ISystemMetricsSink.cs index a32bf6b43..2ec45850f 100644 --- a/src/Promitor.Core/Metrics/Interfaces/ISystemMetricsSink.cs +++ b/src/Promitor.Core/Metrics/Interfaces/ISystemMetricsSink.cs @@ -14,5 +14,15 @@ public interface ISystemMetricsSink /// Labels that are applicable for this measurement /// Indication whether or not a timestamp should be reported Task WriteGaugeMeasurementAsync(string name, string description, double value, Dictionary labels, bool includeTimestamp); + + /// + /// Records a histogram measurement + /// + /// Name of the metric + /// Description of the metric + /// New measured value + /// Labels that are applicable for this measurement + /// Indication whether or not a timestamp should be reported + Task WriteHistogramMeasurementAsync(string name, string description, double value, Dictionary labels, bool includeTimestamp); } } diff --git a/src/Promitor.Core/RuntimeMetricNames.cs b/src/Promitor.Core/RuntimeMetricNames.cs index 1d7b44e5b..75bebddfd 100644 --- a/src/Promitor.Core/RuntimeMetricNames.cs +++ b/src/Promitor.Core/RuntimeMetricNames.cs @@ -8,5 +8,6 @@ public static class RuntimeMetricNames public static string ResourceGraphThrottled => "promitor_ratelimit_resource_graph_throttled"; public static string ScrapeSuccessful => "promitor_scrape_success"; public static string ScrapeError => "promitor_scrape_error"; + public static string BatchSize => "promitor_batch_size"; } } \ No newline at end of file diff --git a/src/Promitor.Integrations.Sinks.Prometheus/Collectors/AzureScrapingSystemMetricsPublisher.cs b/src/Promitor.Integrations.Sinks.Prometheus/Collectors/AzureScrapingSystemMetricsPublisher.cs index c5e46b75c..0fbe430ce 100644 --- a/src/Promitor.Integrations.Sinks.Prometheus/Collectors/AzureScrapingSystemMetricsPublisher.cs +++ b/src/Promitor.Integrations.Sinks.Prometheus/Collectors/AzureScrapingSystemMetricsPublisher.cs @@ -48,5 +48,14 @@ public async Task WriteGaugeMeasurementAsync(string name, string description, do { await _systemMetricsPublisher.WriteGaugeMeasurementAsync(name, description, value, labels, includeTimestamp); } + + public async Task WriteHistogramMeasurementAsync(string name, string description, double value, Dictionary labels) + { + throw new System.NotImplementedException(); + } + public async Task WriteHistogramMeasurementAsync(string name, string description, double value, Dictionary labels, bool includeTimestamp) + { + throw new System.NotImplementedException(); + } } } diff --git a/src/Promitor.Integrations.Sinks.Prometheus/Collectors/PrometheusSystemMetricsSink.cs b/src/Promitor.Integrations.Sinks.Prometheus/Collectors/PrometheusSystemMetricsSink.cs index c307fd074..ce47c5216 100644 --- a/src/Promitor.Integrations.Sinks.Prometheus/Collectors/PrometheusSystemMetricsSink.cs +++ b/src/Promitor.Integrations.Sinks.Prometheus/Collectors/PrometheusSystemMetricsSink.cs @@ -36,5 +36,23 @@ public Task WriteGaugeMeasurementAsync(string name, string description, double v return Task.CompletedTask; } + + /// + /// Records measurement for a histogram instrument + /// + /// Name of the metric + /// Description of the metric + /// New measured value + /// Labels that are applicable for this measurement + /// Indication whether or not a timestamp should be reported + public Task WriteHistogramMeasurementAsync(string name, string description, double value, Dictionary labels, bool includeTimestamp) + { + var orderedLabels = labels.OrderByDescending(kvp => kvp.Key).ToDictionary(kvp => kvp.Key, kvp => kvp.Value); + + // TODO: are histogram instruments created on every invocation? Would that interfere with correctness? + var histogram = _metricFactory.CreateHistogram(name, help: description, includeTimestamp: includeTimestamp, labelNames: orderedLabels.Keys.ToArray()); + histogram.WithLabels(orderedLabels.Values.ToArray()).Observe(value); + return Task.CompletedTask; + } } } From f35fce4b2c534fdfce724d6b3f2206789cede41f Mon Sep 17 00:00:00 2001 From: xchen Date: Fri, 2 Aug 2024 13:16:45 -0700 Subject: [PATCH 017/131] restrict batch scraping to only Azure Monitor scraper for now + define MeasuredMetric wit resource ID --- .../AzureMonitorScraper.cs | 46 +++++++++++++++++++ .../LogAnalyticsScraper.cs | 5 ++ .../ResourceTypes/StorageQueueScraper.cs | 5 ++ src/Promitor.Core.Scraping/Scraper.cs | 10 ++-- .../Extensions/MeasureMetricExtensions.cs | 19 ++++++++ src/Promitor.Core/Metrics/MeasuredMetric.cs | 4 +- .../ResourceAssociatedMeasuredMetric.cs | 33 +++++++++++++ .../AzureMonitorQueryClient.cs | 1 + 8 files changed, 116 insertions(+), 7 deletions(-) create mode 100644 src/Promitor.Core/Extensions/MeasureMetricExtensions.cs create mode 100644 src/Promitor.Core/Metrics/ResourceAssociatedMeasuredMetric.cs diff --git a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs index 81b143e9a..5e4a14253 100644 --- a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs +++ b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs @@ -73,6 +73,52 @@ protected override async Task ScrapeResourceAsync(string subscript return new ScrapeResult(subscriptionId, scrapeDefinition.ResourceGroupName, resourceDefinition.ResourceName, resourceUri, finalMetricValues, metricLabels); } + protected override async Task> BatchScrapeResourceAsync(string subscriptionId, BatchScrapeDefinition batchScrapeDefinition, PromitorMetricAggregationType aggregationType, TimeSpan aggregationInterval) + { + Guard.NotNull(batchScrapeDefinition, nameof(batchScrapeDefinition)); + Guard.NotLessThan(batchScrapeDefinition.ScrapeDefinitions.Count(), 1, nameof(batchScrapeDefinition)); + Guard.NotNull(batchScrapeDefinition.ScrapeDefinitionBatchProperties.AzureMetricConfiguration, nameof(batchScrapeDefinition.ScrapeDefinitionBatchProperties.AzureMetricConfiguration)); + + var metricName = batchScrapeDefinition.ScrapeDefinitionBatchProperties.AzureMetricConfiguration.MetricName; + + // Build list of resource URIs based on definitions in the batch + var resourceUriList = new List(); + foreach (ScrapeDefinition scrapeDefinition in batchScrapeDefinition.ScrapeDefinitions) + { + var resourceUri = BuildResourceUri(subscriptionId, scrapeDefinition, (TResourceDefinition) scrapeDefinition.Resource); + resourceUriList.Add(resourceUri); + } + + var metricFilter = DetermineMetricFilter(metricName, (TResourceDefinition) batchScrapeDefinition.ScrapeDefinitions[0].Resource); + var metricLimit = batchScrapeDefinition.ScrapeDefinitionBatchProperties.AzureMetricConfiguration.Limit; + var dimensionNames = DetermineMetricDimensions(metricName, (TResourceDefinition) batchScrapeDefinition.ScrapeDefinitions[0].Resource, batchScrapeDefinition.ScrapeDefinitionBatchProperties.AzureMetricConfiguration); // TODO: resource definition doesn't seem to be used, can we remove it from function signature? + + var measuredMetrics = new List(); + try + { + // Query Azure Monitor for metrics + measuredMetrics = await AzureMonitorClient.BatchQueryMetricAsync(metricName, dimensionNames, aggregationType, aggregationInterval, resourceUriList, metricFilter, metricLimit); + } + catch (MetricInformationNotFoundException metricsNotFoundException) + { + Logger.LogWarning("No metric information found for metric {MetricName} with dimensions {MetricDimensions}. Details: {Details}", metricsNotFoundException.Name, metricsNotFoundException.Dimensions, metricsNotFoundException.Details); + + var measuredMetric = dimensionNames.Any() + ? MeasuredMetric.CreateForDimensions(dimensionNames) + : MeasuredMetric.CreateWithoutDimensions(null); + measuredMetrics.Add(measuredMetric); + } + + // Provide more metric labels, if we need to + var metricLabels = DetermineMetricLabels((TResourceDefinition) batchScrapeDefinition.ScrapeDefinitions[0].Resource); + + // Enrich measured metrics, in case we need to + var finalMetricValues = EnrichMeasuredMetrics((TResourceDefinition) batchScrapeDefinition.ScrapeDefinitions[0].Resource, dimensionNames, measuredMetrics); + + // We're done! + return new ScrapeResult(subscriptionId, batchScrapeDefinition.ResourceGroupName, resourceDefinition.ResourceName, resourceUri, finalMetricValues, metricLabels); + } + private int? DetermineMetricLimit(ScrapeDefinition scrapeDefinition) { return scrapeDefinition.AzureMetricConfiguration.Limit; diff --git a/src/Promitor.Core.Scraping/LogAnalyticsScraper.cs b/src/Promitor.Core.Scraping/LogAnalyticsScraper.cs index eadfd90a7..bee859da7 100644 --- a/src/Promitor.Core.Scraping/LogAnalyticsScraper.cs +++ b/src/Promitor.Core.Scraping/LogAnalyticsScraper.cs @@ -49,5 +49,10 @@ private Dictionary DetermineMetricLabels(LogAnalyticsResourceDef { return new Dictionary { { "workspace_id", resourceDefinition.WorkspaceId }, {"workspace_name", resourceDefinition.WorkspaceName} }; } + + protected override Task BatchScrapeResourceAsync(string subscriptionId, BatchScrapeDefinition batchScrapeDefinition, PromitorMetricAggregationType aggregationType, TimeSpan aggregationInterval) + { + throw new NotImplementedException("Batch scraping not yet implemented for log analytics"); + } } } \ No newline at end of file diff --git a/src/Promitor.Core.Scraping/ResourceTypes/StorageQueueScraper.cs b/src/Promitor.Core.Scraping/ResourceTypes/StorageQueueScraper.cs index 9a0d4c32b..62e33eb69 100644 --- a/src/Promitor.Core.Scraping/ResourceTypes/StorageQueueScraper.cs +++ b/src/Promitor.Core.Scraping/ResourceTypes/StorageQueueScraper.cs @@ -60,5 +60,10 @@ protected override string BuildResourceUri(string subscriptionId, ScrapeDefiniti { return string.Format(ResourceUriTemplate, subscriptionId, scrapeDefinition.ResourceGroupName, resource.AccountName); } + + protected override Task> BatchScrapeResourceAsync(string subscriptionId, BatchScrapeDefinition batchScrapeDefinition, PromitorMetricAggregationType aggregationType, TimeSpan aggregationInterval) + { + throw new NotImplementedException("Batch scaping is not possible for storage queue resources"); + } } } \ No newline at end of file diff --git a/src/Promitor.Core.Scraping/Scraper.cs b/src/Promitor.Core.Scraping/Scraper.cs index 2181a9c93..47a026e99 100644 --- a/src/Promitor.Core.Scraping/Scraper.cs +++ b/src/Promitor.Core.Scraping/Scraper.cs @@ -115,7 +115,7 @@ public async Task BatchScrapeAsync(BatchScrapeDefinition ScrapeResourceAsync( /// Contains the resource cast to the specific resource type. /// Aggregation for the metric to use /// Interval that is used to aggregate metrics - protected abstract Task> BatchScrapeResourceAsync( + protected abstract Task BatchScrapeResourceAsync( string subscriptionId, BatchScrapeDefinition batchScrapeDefinition, - AggregationType aggregationType, + PromitorMetricAggregationType aggregationType, TimeSpan aggregationInterval); /// diff --git a/src/Promitor.Core/Extensions/MeasureMetricExtensions.cs b/src/Promitor.Core/Extensions/MeasureMetricExtensions.cs new file mode 100644 index 000000000..8edfab2a5 --- /dev/null +++ b/src/Promitor.Core/Extensions/MeasureMetricExtensions.cs @@ -0,0 +1,19 @@ +using Promitor.Core.Metrics; + +namespace Promitor.Core.Extensions +{ + public static class MeasuredMetricExtensions + { + /// + /// Get Azure environment information under legacy SDK model + /// + /// Microsoft Azure cloud + /// Azure environment information for specified cloud + public static ResourceAssociatedMeasuredMetric WithResourceIdAssociation(this MeasuredMetric measuredMetric, string resourceId) + { + return measuredMetric.IsDimensional + ? new ResourceAssociatedMeasuredMetric(measuredMetric.Value, measuredMetric.Dimensions, resourceId) + : new ResourceAssociatedMeasuredMetric(measuredMetric.Value, resourceId); + } + } +} \ No newline at end of file diff --git a/src/Promitor.Core/Metrics/MeasuredMetric.cs b/src/Promitor.Core/Metrics/MeasuredMetric.cs index 4e4c6d675..6563b138c 100644 --- a/src/Promitor.Core/Metrics/MeasuredMetric.cs +++ b/src/Promitor.Core/Metrics/MeasuredMetric.cs @@ -25,12 +25,12 @@ public class MeasuredMetric /// public bool IsDimensional { get; } - private MeasuredMetric(double? value) + protected MeasuredMetric(double? value) { Value = value; } - private MeasuredMetric(double? value, List dimensions) + protected MeasuredMetric(double? value, List dimensions) { Guard.NotAny(dimensions, nameof(dimensions)); diff --git a/src/Promitor.Core/Metrics/ResourceAssociatedMeasuredMetric.cs b/src/Promitor.Core/Metrics/ResourceAssociatedMeasuredMetric.cs new file mode 100644 index 000000000..749cab87b --- /dev/null +++ b/src/Promitor.Core/Metrics/ResourceAssociatedMeasuredMetric.cs @@ -0,0 +1,33 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using Azure.Monitor.Query.Models; +using GuardNet; +using Microsoft.Azure.Management.Monitor.Fluent.Models; +using Microsoft.Azure.Management.ResourceManager.Fluent.Core; +using Promitor.Core.Metrics.Exceptions; + +namespace Promitor.Core.Metrics +{ + /// + /// A subclass of MeasuredMetric model to be used in batch query settings, where metrics across many resources are mixed together in the response. + /// The ResourceId attribute allows grouping/tagging by resource IDs during processing + /// + public class ResourceAssociatedMeasuredMetric : MeasuredMetric + { + /// + /// resourceId associated with this metric + /// + public string ResourceId { get; } + + public ResourceAssociatedMeasuredMetric(double? value, string resourceId) : base(value) + { + ResourceId = resourceId; + } + + public ResourceAssociatedMeasuredMetric(double? value, List dimensions, string resourceId) : base(value, dimensions) + { + ResourceId = resourceId; + } + } +} \ No newline at end of file diff --git a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs index 1ce439d39..634e76f27 100644 --- a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs +++ b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs @@ -142,6 +142,7 @@ public async Task> BatchQueryMetricAsync(string metricName, /// private List ProcessMetricResult(MetricResult metricResult, string metricName, DateTime startQueryingTime, TimeSpan closestAggregationInterval, PromitorMetricAggregationType aggregationType, List metricDimensions) { + metricResult.Id var seriesForMetric = metricResult.TimeSeries; if (seriesForMetric.Count < 1) { From e8cf5e11e46de4f7ec785a17031dc1d06cee844e Mon Sep 17 00:00:00 2001 From: xchen Date: Fri, 2 Aug 2024 16:42:32 -0700 Subject: [PATCH 018/131] return resource ID tagged measure metrics in scrape path --- .../Scheduling/ResourcesScrapingJob.cs | 3 --- src/Promitor.Core.Scraping/Scraper.cs | 2 +- .../ResourceAssociatedMeasuredMetric.cs | 7 ----- .../AzureMonitorQueryClient.cs | 7 +++-- .../Extensions/MetricResultExtension.cs | 26 +++++++++++++++++++ .../IAzureMonitorClient.cs | 2 +- .../LegacyAzureMonitorClient.cs | 2 +- 7 files changed, 32 insertions(+), 17 deletions(-) create mode 100644 src/Promitor.Integrations.AzureMonitor/Extensions/MetricResultExtension.cs diff --git a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs index e628bd0f8..dff22afe7 100644 --- a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs +++ b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs @@ -11,13 +11,10 @@ using Microsoft.Extensions.Logging; using Microsoft.Extensions.Options; using Promitor.Agents.Scraper.Discovery.Interfaces; -using Promitor.Agents.Scraper.Validation.MetricDefinitions.ResourceTypes; -using Promitor.Core; using Promitor.Core.Contracts; using Promitor.Core.Extensions; using Promitor.Core.Metrics.Interfaces; using Promitor.Core.Metrics.Sinks; -using Promitor.Core.Scraping; using Promitor.Core.Scraping.Configuration.Model; using Promitor.Core.Scraping.Configuration.Model.Metrics; using Promitor.Core.Scraping.Factories; diff --git a/src/Promitor.Core.Scraping/Scraper.cs b/src/Promitor.Core.Scraping/Scraper.cs index 47a026e99..762dcd8ff 100644 --- a/src/Promitor.Core.Scraping/Scraper.cs +++ b/src/Promitor.Core.Scraping/Scraper.cs @@ -290,7 +290,7 @@ protected abstract Task ScrapeResourceAsync( /// Contains the resource cast to the specific resource type. /// Aggregation for the metric to use /// Interval that is used to aggregate metrics - protected abstract Task BatchScrapeResourceAsync( + protected abstract Task> BatchScrapeResourceAsync( string subscriptionId, BatchScrapeDefinition batchScrapeDefinition, PromitorMetricAggregationType aggregationType, diff --git a/src/Promitor.Core/Metrics/ResourceAssociatedMeasuredMetric.cs b/src/Promitor.Core/Metrics/ResourceAssociatedMeasuredMetric.cs index 749cab87b..57fc5f0c9 100644 --- a/src/Promitor.Core/Metrics/ResourceAssociatedMeasuredMetric.cs +++ b/src/Promitor.Core/Metrics/ResourceAssociatedMeasuredMetric.cs @@ -1,11 +1,4 @@ -using System; using System.Collections.Generic; -using System.Linq; -using Azure.Monitor.Query.Models; -using GuardNet; -using Microsoft.Azure.Management.Monitor.Fluent.Models; -using Microsoft.Azure.Management.ResourceManager.Fluent.Core; -using Promitor.Core.Metrics.Exceptions; namespace Promitor.Core.Metrics { diff --git a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs index 634e76f27..84146fd84 100644 --- a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs +++ b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs @@ -104,7 +104,7 @@ public async Task> QueryMetricAsync(string metricName, List return ProcessMetricResult(metricResult, metricName, startQueryingTime, closestAggregationInterval, aggregationType, metricDimensions); } - public async Task> BatchQueryMetricAsync(string metricName, List metricDimensions, PromitorMetricAggregationType aggregationType, TimeSpan aggregationInterval, + public async Task> BatchQueryMetricAsync(string metricName, List metricDimensions, PromitorMetricAggregationType aggregationType, TimeSpan aggregationInterval, ListresourceIds, string metricFilter = null, int? metricLimit = null) { Guard.NotNullOrWhitespace(metricName, nameof(metricName)); @@ -132,8 +132,8 @@ public async Task> BatchQueryMetricAsync(string metricName, //TODO: This is potentially a lot of results to process in a single thread. Think of ways to utilize additional parallelism return metricResultsList - .Select(metricResult => ProcessMetricResult(metricResult, metricName, startQueryingTime, closestAggregationInterval, aggregationType, metricDimensions)) - .SelectMany(measureMetricsList => measureMetricsList) + .SelectMany(metricResult => ProcessMetricResult(metricResult, metricName, startQueryingTime, closestAggregationInterval, aggregationType, metricDimensions) + .Select(measuredMetric => measuredMetric.WithResourceIdAssociation(metricResult.ParseResourceIdFromResultId()))) .ToList(); } @@ -142,7 +142,6 @@ public async Task> BatchQueryMetricAsync(string metricName, /// private List ProcessMetricResult(MetricResult metricResult, string metricName, DateTime startQueryingTime, TimeSpan closestAggregationInterval, PromitorMetricAggregationType aggregationType, List metricDimensions) { - metricResult.Id var seriesForMetric = metricResult.TimeSeries; if (seriesForMetric.Count < 1) { diff --git a/src/Promitor.Integrations.AzureMonitor/Extensions/MetricResultExtension.cs b/src/Promitor.Integrations.AzureMonitor/Extensions/MetricResultExtension.cs new file mode 100644 index 000000000..9ce7c3383 --- /dev/null +++ b/src/Promitor.Integrations.AzureMonitor/Extensions/MetricResultExtension.cs @@ -0,0 +1,26 @@ +using System; +using System.Text.RegularExpressions; +using Azure.Monitor.Query.Models; + +namespace Promitor.Integrations.AzureMonitor.Extensions +{ + public static class MetricResultExtension + { + // hacky to to get resource ID since it's not available directly through the SDK model + static string resourceIdPattern = @"^(/subscriptions/[^/]+/resourceGroups/[^/]+/providers/[^/]+/[^/]+/[^/]+)"; + static Regex resourceIdRegex = new Regex(resourceIdPattern, RegexOptions.Compiled); + + public static string ParseResourceIdFromResultId(this MetricResult metricResult) + { + Match match = resourceIdRegex.Match(metricResult.Id); + if (!match.Success || string.IsNullOrEmpty(match.Groups[1].Value)) + { + throw new InvalidOperationException("The expected resource ID pattern was not found in the input string."); + } + + string resourceId = match.Groups[1].Value; + return resourceId; + } + + } +} \ No newline at end of file diff --git a/src/Promitor.Integrations.AzureMonitor/IAzureMonitorClient.cs b/src/Promitor.Integrations.AzureMonitor/IAzureMonitorClient.cs index 69b1a7293..e1a742251 100644 --- a/src/Promitor.Integrations.AzureMonitor/IAzureMonitorClient.cs +++ b/src/Promitor.Integrations.AzureMonitor/IAzureMonitorClient.cs @@ -10,7 +10,7 @@ public interface IAzureMonitorClient public Task> QueryMetricAsync(string metricName, List metricDimensions, PromitorMetricAggregationType aggregationType, TimeSpan aggregationInterval, string resourceId, string metricFilter = null, int? metricLimit = null); - public Task> BatchQueryMetricAsync(string metricName, List metricDimensions, PromitorMetricAggregationType aggregationType, TimeSpan aggregationInterval, + public Task> BatchQueryMetricAsync(string metricName, List metricDimensions, PromitorMetricAggregationType aggregationType, TimeSpan aggregationInterval, ListresourceIds, string metricFilter = null, int? metricLimit = null); } } \ No newline at end of file diff --git a/src/Promitor.Integrations.AzureMonitor/LegacyAzureMonitorClient.cs b/src/Promitor.Integrations.AzureMonitor/LegacyAzureMonitorClient.cs index 1eed77eec..f1da8b47d 100644 --- a/src/Promitor.Integrations.AzureMonitor/LegacyAzureMonitorClient.cs +++ b/src/Promitor.Integrations.AzureMonitor/LegacyAzureMonitorClient.cs @@ -65,7 +65,7 @@ public LegacyAzureMonitorClient(AzureEnvironment azureCloud, string tenantId, st _authenticatedAzureSubscription = CreateLegacyAzureClient(azureCloud, tenantId, subscriptionId, azureAuthenticationInfo, loggerFactory, metricSinkWriter, azureScrapingSystemMetricsPublisher, azureMonitorLoggingConfiguration); } - public Task> BatchQueryMetricAsync(string metricName, List metricDimensions, PromitorMetricAggregationType aggregationType, TimeSpan aggregationInterval, + public Task> BatchQueryMetricAsync(string metricName, List metricDimensions, PromitorMetricAggregationType aggregationType, TimeSpan aggregationInterval, ListresourceIds, string metricFilter = null, int? metricLimit = null) { throw new NotSupportedException("Legacy SDK does not support batch queries. Consider migrating to the new Azure.Monitor SDK instead"); From 10f15990db9c68fede8f8c462beda8c854680972 Mon Sep 17 00:00:00 2001 From: xchen Date: Sun, 4 Aug 2024 15:55:16 -0700 Subject: [PATCH 019/131] cache resource info and use cache for hydration --- .../AzureMonitorScraper.cs | 45 +++++++++++++------ .../ResourceAssociatedMeasuredMetric.cs | 1 + 2 files changed, 33 insertions(+), 13 deletions(-) diff --git a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs index 5e4a14253..af33df8b4 100644 --- a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs +++ b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs @@ -1,10 +1,13 @@ using System; +using System.Collections.Concurrent; using System.Collections.Generic; +using System.Collections.Immutable; using System.Linq; using System.Threading.Tasks; using GuardNet; using Microsoft.Extensions.Logging; using Promitor.Core.Contracts; +using Promitor.Core.Extensions; using Promitor.Core.Metrics; using Promitor.Core.Scraping.Configuration.Model; using Promitor.Core.Scraping.Configuration.Model.Metrics; @@ -18,13 +21,19 @@ namespace Promitor.Core.Scraping /// Type of metric definition that is being used public abstract class AzureMonitorScraper : Scraper where TResourceDefinition : class, IAzureResourceDefinition - { + { + /// + /// A cache to store resource definitions. Used to hydrate resource info from resource ID, when processing batch query results + /// + private readonly ConcurrentDictionary _resourceDefinitions; // using a dictionary for now since IMemoryCache involves layers of injection + /// /// Constructor /// protected AzureMonitorScraper(ScraperConfiguration scraperConfiguration) : base(scraperConfiguration) { + _resourceDefinitions = new ConcurrentDictionary(); } /// @@ -87,17 +96,22 @@ protected override async Task> BatchScrapeResourceAsync(strin { var resourceUri = BuildResourceUri(subscriptionId, scrapeDefinition, (TResourceDefinition) scrapeDefinition.Resource); resourceUriList.Add(resourceUri); + // cache resource info + if (!_resourceDefinitions.ContainsKey(resourceUri)) + { + _resourceDefinitions.TryAdd(resourceUri, scrapeDefinition.Resource); + } } var metricFilter = DetermineMetricFilter(metricName, (TResourceDefinition) batchScrapeDefinition.ScrapeDefinitions[0].Resource); var metricLimit = batchScrapeDefinition.ScrapeDefinitionBatchProperties.AzureMetricConfiguration.Limit; var dimensionNames = DetermineMetricDimensions(metricName, (TResourceDefinition) batchScrapeDefinition.ScrapeDefinitions[0].Resource, batchScrapeDefinition.ScrapeDefinitionBatchProperties.AzureMetricConfiguration); // TODO: resource definition doesn't seem to be used, can we remove it from function signature? - var measuredMetrics = new List(); + var resourceIdTaggedMeasuredMetrics = new List(); try { // Query Azure Monitor for metrics - measuredMetrics = await AzureMonitorClient.BatchQueryMetricAsync(metricName, dimensionNames, aggregationType, aggregationInterval, resourceUriList, metricFilter, metricLimit); + resourceIdTaggedMeasuredMetrics = await AzureMonitorClient.BatchQueryMetricAsync(metricName, dimensionNames, aggregationType, aggregationInterval, resourceUriList, metricFilter, metricLimit); } catch (MetricInformationNotFoundException metricsNotFoundException) { @@ -106,17 +120,22 @@ protected override async Task> BatchScrapeResourceAsync(strin var measuredMetric = dimensionNames.Any() ? MeasuredMetric.CreateForDimensions(dimensionNames) : MeasuredMetric.CreateWithoutDimensions(null); - measuredMetrics.Add(measuredMetric); + resourceIdTaggedMeasuredMetrics.Add(measuredMetric.WithResourceIdAssociation(null)); } - // Provide more metric labels, if we need to - var metricLabels = DetermineMetricLabels((TResourceDefinition) batchScrapeDefinition.ScrapeDefinitions[0].Resource); - - // Enrich measured metrics, in case we need to - var finalMetricValues = EnrichMeasuredMetrics((TResourceDefinition) batchScrapeDefinition.ScrapeDefinitions[0].Resource, dimensionNames, measuredMetrics); + var scrapeResults = new List(); + // group based on resource, then to enrichment per group + var groupedMeasuredMetrics = resourceIdTaggedMeasuredMetrics.GroupBy(measureMetric => measureMetric.ResourceId); + foreach (List resourceMetrics in groupedMeasuredMetrics) + { + var resourceId = resourceMetrics[0].ResourceId; + _resourceDefinitions.TryGetValue(resourceId, out IAzureResourceDefinition resourceDefinition); + var metricLabels = DetermineMetricLabels((TResourceDefinition) batchScrapeDefinition.ScrapeDefinitions[0].Resource); + var finalMetricValues = EnrichMeasuredMetrics((TResourceDefinition) batchScrapeDefinition.ScrapeDefinitions[0].Resource, dimensionNames, resourceMetrics.ToImmutableList()); + scrapeResults.Add(new ScrapeResult(subscriptionId, resourceDefinition.ResourceGroupName, resourceDefinition.ResourceName, resourceId, finalMetricValues, metricLabels)); + } - // We're done! - return new ScrapeResult(subscriptionId, batchScrapeDefinition.ResourceGroupName, resourceDefinition.ResourceName, resourceUri, finalMetricValues, metricLabels); + return scrapeResults; } private int? DetermineMetricLimit(ScrapeDefinition scrapeDefinition) @@ -135,9 +154,9 @@ protected override async Task> BatchScrapeResourceAsync(strin /// List of names of the specified dimensions provided by the scraper. /// Measured metric values that were found /// - protected virtual List EnrichMeasuredMetrics(TResourceDefinition resourceDefinition, List dimensionNames, List metricValues) + protected virtual List EnrichMeasuredMetrics(TResourceDefinition resourceDefinition, List dimensionNames, IReadOnlyList metricValues) { - return metricValues; + return metricValues.ToList(); } /// diff --git a/src/Promitor.Core/Metrics/ResourceAssociatedMeasuredMetric.cs b/src/Promitor.Core/Metrics/ResourceAssociatedMeasuredMetric.cs index 57fc5f0c9..c2bf88895 100644 --- a/src/Promitor.Core/Metrics/ResourceAssociatedMeasuredMetric.cs +++ b/src/Promitor.Core/Metrics/ResourceAssociatedMeasuredMetric.cs @@ -12,6 +12,7 @@ public class ResourceAssociatedMeasuredMetric : MeasuredMetric /// resourceId associated with this metric /// public string ResourceId { get; } + public ResourceAssociatedMeasuredMetric(double? value, string resourceId) : base(value) { From a5966fa30bc6e9b027b3d118a5ccd26de10ae9df Mon Sep 17 00:00:00 2001 From: xchen Date: Sun, 4 Aug 2024 16:26:33 -0700 Subject: [PATCH 020/131] obtain testable build --- .../Model/AzureMetricConfiguration.cs | 3 +- .../Model/Metrics/ScrapeDefinitionBatch.cs | 4 +- .../LogAnalyticsScraper.cs | 2 +- .../ResourceTypes/AzureMessagingScraper.cs | 4 +- .../ResourceTypes/DataShareScraper.cs | 5 +- src/Promitor.Core.Scraping/Scraper.cs | 49 +++++++++++++------ .../OpenTelemetrySystemMetricsSink.cs | 5 ++ 7 files changed, 50 insertions(+), 22 deletions(-) diff --git a/src/Promitor.Core.Scraping/Configuration/Model/AzureMetricConfiguration.cs b/src/Promitor.Core.Scraping/Configuration/Model/AzureMetricConfiguration.cs index b513cfabc..2ac638726 100644 --- a/src/Promitor.Core.Scraping/Configuration/Model/AzureMetricConfiguration.cs +++ b/src/Promitor.Core.Scraping/Configuration/Model/AzureMetricConfiguration.cs @@ -1,6 +1,7 @@ using System; using System.Collections.Generic; using System.Linq; +using System.Text; namespace Promitor.Core.Scraping.Configuration.Model { @@ -50,7 +51,7 @@ public class AzureMetricConfiguration public string ToUniqueStringRepresentation() { StringBuilder sb = new StringBuilder(); - sb.Append(Name); + sb.Append(MetricName); foreach (var dimension in Dimensions) { diff --git a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs index c8ab95a03..48584c6a3 100644 --- a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs +++ b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs @@ -83,7 +83,7 @@ public override int GetHashCode() /// private string BuildBatchHashKey() { - return string.Join("_", [List.ofAzureMetricConfiguration.ToUniqueStringRepresentation(), SubscriptionId, ResourceType.ToString(), AggregationInterval.ToString]); + return string.Join("_", new List{AzureMetricConfiguration.ToUniqueStringRepresentation(), SubscriptionId, ResourceType.ToString(), AggregationInterval.ToString()}); } /// @@ -95,7 +95,7 @@ public bool Equals(ScrapeDefinitionBatchProperties obj) return false; ScrapeDefinitionBatchProperties other = (ScrapeDefinitionBatchProperties)obj; - return this.ResourceType == other.ResourceType && this.AzureMetricConfiguration.ToUniqueStringRepresentation() == other.ToUniqueStringRepresentation() && this.SubscriptionId == other.SubscriptionId && this.AggregationInterval.Equals(other.AggregationDeserializer); + return this.ResourceType == other.ResourceType && this.AzureMetricConfiguration.ToUniqueStringRepresentation() == other.AzureMetricConfiguration.ToUniqueStringRepresentation() && this.SubscriptionId == other.SubscriptionId && this.AggregationInterval.Equals(other.AggregationInterval); } } diff --git a/src/Promitor.Core.Scraping/LogAnalyticsScraper.cs b/src/Promitor.Core.Scraping/LogAnalyticsScraper.cs index bee859da7..fe8d8d1a9 100644 --- a/src/Promitor.Core.Scraping/LogAnalyticsScraper.cs +++ b/src/Promitor.Core.Scraping/LogAnalyticsScraper.cs @@ -50,7 +50,7 @@ private Dictionary DetermineMetricLabels(LogAnalyticsResourceDef return new Dictionary { { "workspace_id", resourceDefinition.WorkspaceId }, {"workspace_name", resourceDefinition.WorkspaceName} }; } - protected override Task BatchScrapeResourceAsync(string subscriptionId, BatchScrapeDefinition batchScrapeDefinition, PromitorMetricAggregationType aggregationType, TimeSpan aggregationInterval) + protected override Task> BatchScrapeResourceAsync(string subscriptionId, BatchScrapeDefinition batchScrapeDefinition, PromitorMetricAggregationType aggregationType, TimeSpan aggregationInterval) { throw new NotImplementedException("Batch scraping not yet implemented for log analytics"); } diff --git a/src/Promitor.Core.Scraping/ResourceTypes/AzureMessagingScraper.cs b/src/Promitor.Core.Scraping/ResourceTypes/AzureMessagingScraper.cs index 88bb8acac..02ec91cc4 100644 --- a/src/Promitor.Core.Scraping/ResourceTypes/AzureMessagingScraper.cs +++ b/src/Promitor.Core.Scraping/ResourceTypes/AzureMessagingScraper.cs @@ -16,7 +16,7 @@ protected AzureMessagingScraper(ScraperConfiguration scraperConfiguration) { } - protected override List EnrichMeasuredMetrics(TResourceDefinition resourceDefinition, List dimensionNames, List metricValues) + protected override List EnrichMeasuredMetrics(TResourceDefinition resourceDefinition, List dimensionNames, IReadOnlyList metricValues) { // Change Azure Monitor Dimension name to more representable value foreach (var measuredMetric in metricValues.Where(metricValue => metricValue.Dimensions.Any())) @@ -24,7 +24,7 @@ protected override List EnrichMeasuredMetrics(TResourceDefinitio measuredMetric.Dimensions[0].Name = EntityNameLabel; } - return metricValues; + return metricValues.ToList(); } protected override Dictionary DetermineMetricLabels(TResourceDefinition resourceDefinition) diff --git a/src/Promitor.Core.Scraping/ResourceTypes/DataShareScraper.cs b/src/Promitor.Core.Scraping/ResourceTypes/DataShareScraper.cs index c5b26abbf..4cc5bdcab 100644 --- a/src/Promitor.Core.Scraping/ResourceTypes/DataShareScraper.cs +++ b/src/Promitor.Core.Scraping/ResourceTypes/DataShareScraper.cs @@ -1,5 +1,6 @@ using System; using System.Collections.Generic; +using System.Collections.Immutable; using System.Linq; using Microsoft.Extensions.Logging; using Promitor.Core.Contracts; @@ -51,7 +52,7 @@ protected override List DetermineMetricDimensions(string metricName, Dat return new List { dimensionName }; } - protected override List EnrichMeasuredMetrics(DataShareResourceDefinition resourceDefinition, List dimensionNames, List metricValues) + protected override List EnrichMeasuredMetrics(DataShareResourceDefinition resourceDefinition, List dimensionNames, IReadOnlyList metricValues) { // Change Azure Monitor dimension name to more representable value foreach (var dimension in metricValues.SelectMany(measuredMetric => measuredMetric.Dimensions.Where(dimension => (dimension.Name == "ShareName" || dimension.Name == "ShareSubscriptionName")))) @@ -59,7 +60,7 @@ protected override List EnrichMeasuredMetrics(DataShareResourceD dimension.Name = "share_name"; } - return metricValues; + return metricValues.ToList(); } private static string GetMetricFilterFieldName(string metricName) diff --git a/src/Promitor.Core.Scraping/Scraper.cs b/src/Promitor.Core.Scraping/Scraper.cs index 762dcd8ff..f3f29fec7 100644 --- a/src/Promitor.Core.Scraping/Scraper.cs +++ b/src/Promitor.Core.Scraping/Scraper.cs @@ -122,43 +122,42 @@ public async Task BatchScrapeAsync(BatchScrapeDefinition scrapeDefinition, bool isSuccessful, int batchSize = 0) + private async Task ReportScrapingOutcomeAsync(ScrapeDefinition scrapeDefinition, bool isSuccessful) { // We reset all values, by default double successfulMetricValue = 0; @@ -183,14 +182,36 @@ private async Task ReportScrapingOutcomeAsync(ScrapeDefinition 0) + + // Report! + await AzureScrapingSystemMetricsPublisher.WriteGaugeMeasurementAsync(RuntimeMetricNames.ScrapeSuccessful, ScrapeSuccessfulMetricDescription, successfulMetricValue, labels); + await AzureScrapingSystemMetricsPublisher.WriteGaugeMeasurementAsync(RuntimeMetricNames.ScrapeError, ScrapeErrorMetricDescription, unsuccessfulMetricValue, labels); + } + + private async Task ReportBatchScrapingOutcomeAsync(BatchScrapeDefinition batchScrapeDefinition, bool isSuccessful, int batchSize) + { + // We reset all values, by default + double successfulMetricValue = 0; + double unsuccessfulMetricValue = 0; + + // Based on the result, we reflect that in the metric + if (isSuccessful) { - await AzureScrapingSystemMetricsPublisher.WriteHistogramMeasurementAsync(RuntimeMetricNames.BatchSize, BatvhSizeMetricDescription, batchSize, labels); - labels.Add("is_batch", "1"); - } else { - labels.Add("is_batch", "0"); + successfulMetricValue = 1; + } + else + { + unsuccessfulMetricValue = 1; } + // Enrich with context + var labels = new Dictionary + { + {"metric_name", batchScrapeDefinition.ScrapeDefinitionBatchProperties.PrometheusMetricDefinition.Name}, + {"resource_type", batchScrapeDefinition.ScrapeDefinitionBatchProperties.ResourceType.ToString()}, + {"subscription_id", batchScrapeDefinition.ScrapeDefinitionBatchProperties.SubscriptionId}, + {"is_batch", "1"}, + }; // Report! await AzureScrapingSystemMetricsPublisher.WriteGaugeMeasurementAsync(RuntimeMetricNames.ScrapeSuccessful, ScrapeSuccessfulMetricDescription, successfulMetricValue, labels); await AzureScrapingSystemMetricsPublisher.WriteGaugeMeasurementAsync(RuntimeMetricNames.ScrapeError, ScrapeErrorMetricDescription, unsuccessfulMetricValue, labels); diff --git a/src/Promitor.Integrations.Sinks.OpenTelemetry/Collectors/OpenTelemetrySystemMetricsSink.cs b/src/Promitor.Integrations.Sinks.OpenTelemetry/Collectors/OpenTelemetrySystemMetricsSink.cs index b446aa786..edc484276 100644 --- a/src/Promitor.Integrations.Sinks.OpenTelemetry/Collectors/OpenTelemetrySystemMetricsSink.cs +++ b/src/Promitor.Integrations.Sinks.OpenTelemetry/Collectors/OpenTelemetrySystemMetricsSink.cs @@ -20,5 +20,10 @@ public async Task WriteGaugeMeasurementAsync(string name, string description, do { await _metricSink.ReportMetricAsync(name, description, value, labels); } + + public Task WriteHistogramMeasurementAsync(string name, string description, double value, Dictionary labels, bool includeTimestamp) + { + throw new System.NotImplementedException("Histogram measurement not yet supported in OpenTelemetry sink"); + } } } From 83761b265d8fc515a66252bc0c0b63303b3ef23c Mon Sep 17 00:00:00 2001 From: xchen Date: Thu, 8 Aug 2024 15:57:37 -0700 Subject: [PATCH 021/131] unit test batch property hashcode and equality implementations --- src/Promitor.Agents.Scraper/Docs/Open-Api.xml | 14 ++ .../Model/AzureMetricConfiguration.cs | 11 +- .../Model/Metrics/ScrapeDefinitionBatch.cs | 6 +- .../ScrapeDefinitionBatchPropertiesTest.cs | 130 ++++++++++++++++++ 4 files changed, 153 insertions(+), 8 deletions(-) create mode 100644 src/Promitor.Tests.Unit/Core/Metrics/ScrapeDefinitionBatchPropertiesTest.cs diff --git a/src/Promitor.Agents.Scraper/Docs/Open-Api.xml b/src/Promitor.Agents.Scraper/Docs/Open-Api.xml index 7299b25fa..20ff78364 100644 --- a/src/Promitor.Agents.Scraper/Docs/Open-Api.xml +++ b/src/Promitor.Agents.Scraper/Docs/Open-Api.xml @@ -269,6 +269,20 @@ means to obtain a logger logger to use for scraping detail + + + groups scrape definitions based on following conditions: + 1. Definitions in a batch must target the same resource type + 2. Definitions in a batch must target the same Azure metric with identical dimensions + 3. Definitions in a batch must have the same time granularity + 4. Batch size cannot exceed configured maximum + + + + + splits the "raw" batch according to max batch size configured + + Run some task work in the thread pool, but only allow a limited number of threads to go at a time diff --git a/src/Promitor.Core.Scraping/Configuration/Model/AzureMetricConfiguration.cs b/src/Promitor.Core.Scraping/Configuration/Model/AzureMetricConfiguration.cs index 2ac638726..bd4d8654f 100644 --- a/src/Promitor.Core.Scraping/Configuration/Model/AzureMetricConfiguration.cs +++ b/src/Promitor.Core.Scraping/Configuration/Model/AzureMetricConfiguration.cs @@ -52,11 +52,12 @@ public string ToUniqueStringRepresentation() { StringBuilder sb = new StringBuilder(); sb.Append(MetricName); - - foreach (var dimension in Dimensions) - { - sb.Append("_"); - sb.Append(dimension.ToString()); + if (Dimensions != null) { + foreach (var dimension in Dimensions) + { + sb.Append("_"); + sb.Append(dimension.ToString()); + } } return sb.ToString(); diff --git a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs index 48584c6a3..5ee57ce22 100644 --- a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs +++ b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs @@ -83,7 +83,7 @@ public override int GetHashCode() /// private string BuildBatchHashKey() { - return string.Join("_", new List{AzureMetricConfiguration.ToUniqueStringRepresentation(), SubscriptionId, ResourceType.ToString(), AggregationInterval.ToString()}); + return string.Join("_", new List {AzureMetricConfiguration.ToUniqueStringRepresentation(), SubscriptionId, ResourceType.ToString(), AggregationInterval.ToString()}); } /// @@ -94,8 +94,8 @@ public bool Equals(ScrapeDefinitionBatchProperties obj) if (obj == null || !(obj is ScrapeDefinitionBatchProperties)) return false; - ScrapeDefinitionBatchProperties other = (ScrapeDefinitionBatchProperties)obj; - return this.ResourceType == other.ResourceType && this.AzureMetricConfiguration.ToUniqueStringRepresentation() == other.AzureMetricConfiguration.ToUniqueStringRepresentation() && this.SubscriptionId == other.SubscriptionId && this.AggregationInterval.Equals(other.AggregationInterval); + ScrapeDefinitionBatchProperties other = obj; + return ResourceType == other.ResourceType && AzureMetricConfiguration.ToUniqueStringRepresentation() == other.AzureMetricConfiguration.ToUniqueStringRepresentation() && SubscriptionId == other.SubscriptionId && AggregationInterval.Equals(other.AggregationInterval); } } diff --git a/src/Promitor.Tests.Unit/Core/Metrics/ScrapeDefinitionBatchPropertiesTest.cs b/src/Promitor.Tests.Unit/Core/Metrics/ScrapeDefinitionBatchPropertiesTest.cs new file mode 100644 index 000000000..0a77a63cc --- /dev/null +++ b/src/Promitor.Tests.Unit/Core/Metrics/ScrapeDefinitionBatchPropertiesTest.cs @@ -0,0 +1,130 @@ +using System; +using System.Collections.Generic; +using System.ComponentModel; +using System.IO; +using System.Runtime.Serialization.Formatters.Binary; +using AutoMapper; +using Bogus.DataSets; +using Promitor.Core.Metrics; +using Promitor.Core.Scraping.Configuration.Model; +using Promitor.Core.Scraping.Configuration.Model.Metrics; +using Promitor.Core.Scraping.Configuration.Serialization.v1.Mapping; +using Promitor.Core.Scraping.Configuration.Serialization.v1.Model; +using Xunit; + +namespace Promitor.Tests.Unit.Core.Metrics +{ + [Category("Unit")] + public class ScrapeDefinitionBatchPropertiesTest + { + private readonly IMapper _mapper; // to model instantiation happen + private readonly static string _azureMetricNameBase = "promitor_batch_test_metric"; + private readonly static PrometheusMetricDefinition _prometheusMetricDefinition = + new("promitor_batch_test", "test", new Dictionary()); + private readonly static string _subscriptionId = "subscription"; + private readonly static AzureMetricConfigurationV1 _azureMetricConfigurationBase = new AzureMetricConfigurationV1 + { + MetricName = _azureMetricNameBase, + Aggregation = new MetricAggregationV1 + { + Type = PromitorMetricAggregationType.Average + }, + }; + + public ScrapeDefinitionBatchPropertiesTest() + { + var config = new MapperConfiguration(c => c.AddProfile()); + _mapper = config.CreateMapper(); + } + + [Fact] + public void BuildBatchHashKeySameResultNoDimensions() + { + var azureMetricConfiguration = _mapper.Map(_azureMetricConfigurationBase); + var scraping = new Promitor.Core.Scraping.Configuration.Model.Scraping + { + Schedule = "5 4 3 2 1" + }; + var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); + var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); + + var hashCode1 = batchProperties.GetHashCode(); + var hashCode2 = batchProperties2.GetHashCode(); + Assert.Equal(hashCode1, hashCode2); + } + + [Fact] + public void BuildBatchHashKeySameResultIdenticalDimensions() + { + var azureMetricConfiguration = _mapper.Map(_azureMetricConfigurationBase); + azureMetricConfiguration.Dimensions = [new MetricDimension{Name = "Dimension1"}, new MetricDimension{Name = "Dimension2"}]; + + var scraping = new Promitor.Core.Scraping.Configuration.Model.Scraping + { + Schedule = "5 4 3 2 1" + }; + var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); + var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); + + var hashCode1 = batchProperties.GetHashCode(); + var hashCode2 = batchProperties2.GetHashCode(); + Assert.Equal(hashCode1, hashCode2); + } + + [Fact] + public void BuildBatchHashKeyDifferentResultDifferentDimensions() + { + var azureMetricConfiguration1 = _mapper.Map(_azureMetricConfigurationBase); + azureMetricConfiguration1.Dimensions = [new MetricDimension{Name = "Dimension1"}, new MetricDimension{Name = "Dimension2"}]; + var azureMetricConfiguration2 = _mapper.Map(_azureMetricConfigurationBase); + azureMetricConfiguration1.Dimensions = [new MetricDimension{Name = "DiffDimension1"}, new MetricDimension{Name = "DiffDimension2"}]; + + + var scraping = new Promitor.Core.Scraping.Configuration.Model.Scraping + { + Schedule = "5 4 3 2 1" + }; + var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration1, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); + var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration2, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); + + var hashCode1 = batchProperties.GetHashCode(); + var hashCode2 = batchProperties2.GetHashCode(); + Assert.NotEqual(hashCode1, hashCode2); + } + + [Fact] + public void BuildBatchHashKeyDifferentResultDifferentSubscription() + { + var azureMetricConfiguration = _mapper.Map(_azureMetricConfigurationBase); + + var scraping = new Promitor.Core.Scraping.Configuration.Model.Scraping + { + Schedule = "5 4 3 2 1" + }; + var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); + var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: "subscription2"); + + var hashCode1 = batchProperties.GetHashCode(); + var hashCode2 = batchProperties2.GetHashCode(); + Assert.NotEqual(hashCode1, hashCode2); + } + + [Fact] + public void BuildBatchHashKeyDifferentResultDifferentResourceType() + { + var azureMetricConfiguration = _mapper.Map(_azureMetricConfigurationBase); + + var scraping = new Promitor.Core.Scraping.Configuration.Model.Scraping + { + Schedule = "5 4 3 2 1" + }; + var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); + var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.LoadBalancer, scraping: scraping, subscriptionId: "subscription2"); + + var hashCode1 = batchProperties.GetHashCode(); + var hashCode2 = batchProperties2.GetHashCode(); + Assert.NotEqual(hashCode1, hashCode2); + } + + } +} \ No newline at end of file From 94120b5b139e80de7b10990b5f50a1b8ebaef9c6 Mon Sep 17 00:00:00 2001 From: xchen Date: Thu, 8 Aug 2024 16:35:30 -0700 Subject: [PATCH 022/131] fix bug in MetricConfiguration to unique string implementation --- .../Model/AzureMetricConfiguration.cs | 4 +- .../ScrapeDefinitionBatchPropertiesTest.cs | 67 +++++++++++++------ 2 files changed, 48 insertions(+), 23 deletions(-) diff --git a/src/Promitor.Core.Scraping/Configuration/Model/AzureMetricConfiguration.cs b/src/Promitor.Core.Scraping/Configuration/Model/AzureMetricConfiguration.cs index bd4d8654f..ad06bd44b 100644 --- a/src/Promitor.Core.Scraping/Configuration/Model/AzureMetricConfiguration.cs +++ b/src/Promitor.Core.Scraping/Configuration/Model/AzureMetricConfiguration.cs @@ -53,10 +53,10 @@ public string ToUniqueStringRepresentation() StringBuilder sb = new StringBuilder(); sb.Append(MetricName); if (Dimensions != null) { - foreach (var dimension in Dimensions) + foreach (var dimension in Dimensions) { sb.Append("_"); - sb.Append(dimension.ToString()); + sb.Append(dimension.Name); } } diff --git a/src/Promitor.Tests.Unit/Core/Metrics/ScrapeDefinitionBatchPropertiesTest.cs b/src/Promitor.Tests.Unit/Core/Metrics/ScrapeDefinitionBatchPropertiesTest.cs index 0a77a63cc..bac524edb 100644 --- a/src/Promitor.Tests.Unit/Core/Metrics/ScrapeDefinitionBatchPropertiesTest.cs +++ b/src/Promitor.Tests.Unit/Core/Metrics/ScrapeDefinitionBatchPropertiesTest.cs @@ -5,6 +5,7 @@ using System.Runtime.Serialization.Formatters.Binary; using AutoMapper; using Bogus.DataSets; +using Microsoft.AspNetCore.Mvc.ModelBinding; using Promitor.Core.Metrics; using Promitor.Core.Scraping.Configuration.Model; using Promitor.Core.Scraping.Configuration.Model.Metrics; @@ -30,6 +31,10 @@ public class ScrapeDefinitionBatchPropertiesTest Type = PromitorMetricAggregationType.Average }, }; + private readonly static ScrapingV1 _scrapingBase = new ScrapingV1 + { + Schedule = "5 4 3 2 1" + }; public ScrapeDefinitionBatchPropertiesTest() { @@ -41,10 +46,7 @@ public ScrapeDefinitionBatchPropertiesTest() public void BuildBatchHashKeySameResultNoDimensions() { var azureMetricConfiguration = _mapper.Map(_azureMetricConfigurationBase); - var scraping = new Promitor.Core.Scraping.Configuration.Model.Scraping - { - Schedule = "5 4 3 2 1" - }; + var scraping = _mapper.Map(_scrapingBase); var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); @@ -59,10 +61,8 @@ public void BuildBatchHashKeySameResultIdenticalDimensions() var azureMetricConfiguration = _mapper.Map(_azureMetricConfigurationBase); azureMetricConfiguration.Dimensions = [new MetricDimension{Name = "Dimension1"}, new MetricDimension{Name = "Dimension2"}]; - var scraping = new Promitor.Core.Scraping.Configuration.Model.Scraping - { - Schedule = "5 4 3 2 1" - }; + var scraping = _mapper.Map(_scrapingBase); + var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); @@ -77,13 +77,29 @@ public void BuildBatchHashKeyDifferentResultDifferentDimensions() var azureMetricConfiguration1 = _mapper.Map(_azureMetricConfigurationBase); azureMetricConfiguration1.Dimensions = [new MetricDimension{Name = "Dimension1"}, new MetricDimension{Name = "Dimension2"}]; var azureMetricConfiguration2 = _mapper.Map(_azureMetricConfigurationBase); - azureMetricConfiguration1.Dimensions = [new MetricDimension{Name = "DiffDimension1"}, new MetricDimension{Name = "DiffDimension2"}]; + azureMetricConfiguration2.Dimensions = [new MetricDimension{Name = "DiffDimension1"}, new MetricDimension{Name = "DiffDimension2"}]; + var scraping = _mapper.Map(_scrapingBase); + + var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration1, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); + var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration2, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); + + var hashCode1 = batchProperties.GetHashCode(); + var hashCode2 = batchProperties2.GetHashCode(); + Assert.NotEqual(hashCode1, hashCode2); + } + + [Fact] + public void BuildBatchHashKeyDifferentResultDifferentMetricName() + { + var azureMetricConfiguration1 = _mapper.Map(_azureMetricConfigurationBase); + azureMetricConfiguration1.Dimensions = [new MetricDimension{Name = "Dimension1"}, new MetricDimension{Name = "Dimension2"}]; + var azureMetricConfiguration2 = _mapper.Map(_azureMetricConfigurationBase); + azureMetricConfiguration2.Dimensions = [new MetricDimension{Name = "Dimension1"}, new MetricDimension{Name = "Dimension2"}]; + azureMetricConfiguration2.MetricName = "diffName"; + + var scraping = _mapper.Map(_scrapingBase); - var scraping = new Promitor.Core.Scraping.Configuration.Model.Scraping - { - Schedule = "5 4 3 2 1" - }; var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration1, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration2, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); @@ -96,11 +112,8 @@ public void BuildBatchHashKeyDifferentResultDifferentDimensions() public void BuildBatchHashKeyDifferentResultDifferentSubscription() { var azureMetricConfiguration = _mapper.Map(_azureMetricConfigurationBase); + var scraping = _mapper.Map(_scrapingBase); - var scraping = new Promitor.Core.Scraping.Configuration.Model.Scraping - { - Schedule = "5 4 3 2 1" - }; var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: "subscription2"); @@ -113,11 +126,8 @@ public void BuildBatchHashKeyDifferentResultDifferentSubscription() public void BuildBatchHashKeyDifferentResultDifferentResourceType() { var azureMetricConfiguration = _mapper.Map(_azureMetricConfigurationBase); + var scraping = _mapper.Map(_scrapingBase); - var scraping = new Promitor.Core.Scraping.Configuration.Model.Scraping - { - Schedule = "5 4 3 2 1" - }; var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.LoadBalancer, scraping: scraping, subscriptionId: "subscription2"); @@ -126,5 +136,20 @@ public void BuildBatchHashKeyDifferentResultDifferentResourceType() Assert.NotEqual(hashCode1, hashCode2); } + [Fact] + public void BuildBatchHashKeyDifferentResultDifferentSchedule() + { + var azureMetricConfiguration = _mapper.Map(_azureMetricConfigurationBase); + var scraping1 = _mapper.Map(_scrapingBase); + var scraping2 = _mapper.Map(_scrapingBase); + scraping2.Schedule = "6 4 3 2 1"; + + var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping1, subscriptionId: _subscriptionId); + var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping2, subscriptionId: "subscription2"); + + var hashCode1 = batchProperties.GetHashCode(); + var hashCode2 = batchProperties2.GetHashCode(); + Assert.NotEqual(hashCode1, hashCode2); + } } } \ No newline at end of file From ca59552a4735106597b1851ab3cdc718a3511bb9 Mon Sep 17 00:00:00 2001 From: xchen Date: Thu, 8 Aug 2024 16:54:37 -0700 Subject: [PATCH 023/131] move resource batching routines as separate static class --- .../Scheduling/ResourcesScrapingJob.cs | 33 +--------------- .../AzureResourceDefinitionBatching.cs | 39 +++++++++++++++++++ 2 files changed, 41 insertions(+), 31 deletions(-) create mode 100644 src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs diff --git a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs index dff22afe7..1adda9cae 100644 --- a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs +++ b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs @@ -15,6 +15,7 @@ using Promitor.Core.Extensions; using Promitor.Core.Metrics.Interfaces; using Promitor.Core.Metrics.Sinks; +using Promitor.Core.Scraping.Batching; using Promitor.Core.Scraping.Configuration.Model; using Promitor.Core.Scraping.Configuration.Model.Metrics; using Promitor.Core.Scraping.Factories; @@ -255,7 +256,7 @@ private async Task ScrapeMetrics(IEnumerable(); var batchScrapingEnabled = this._metricsDeclaration.MetricBatchConfig?.Enabled ?? false; if (batchScrapingEnabled) { - var batchScrapeDefinitions = GroupScrapeDefinitions(scrapeDefinitions, this._metricsDeclaration.MetricBatchConfig.MaxBatchSize, cancellationToken); + var batchScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions(scrapeDefinitions, this._metricsDeclaration.MetricBatchConfig.MaxBatchSize, cancellationToken); foreach(var batchScrapeDefinition in batchScrapeDefinitions) { var azureMetricName = batchScrapeDefinition.ScrapeDefinitionBatchProperties.AzureMetricConfiguration.MetricName; @@ -330,36 +331,6 @@ private async Task ScrapeMetric(ScrapeDefinition scrap scrapeDefinition.PrometheusMetricDefinition.Name, scrapeDefinition.Resource.ResourceName); } } - - /// - /// groups scrape definitions based on following conditions: - /// 1. Definitions in a batch must target the same resource type - /// 2. Definitions in a batch must target the same Azure metric with identical dimensions - /// 3. Definitions in a batch must have the same time granularity - /// 4. Batch size cannot exceed configured maximum - /// - private List> GroupScrapeDefinitions(IEnumerable> allScrapeDefinitions, int maxBatchSize, CancellationToken cancellationToken) - { - - return allScrapeDefinitions.GroupBy(def => def.buildPropertiesForBatch()) - .ToDictionary(group => group.Key, group => group.ToList()) // first pass to build batches that could exceed max - .ToDictionary(group => group.Key, group => SplitScrapeDefinitionBatch(group.Value, maxBatchSize, cancellationToken)) // split to right-sized batches - .SelectMany(group => group.Value.Select(batch => new BatchScrapeDefinition(group.Key, batch))) - .ToList(); // flatten - } - - /// - /// splits the "raw" batch according to max batch size configured - /// - private List>> SplitScrapeDefinitionBatch(List> batchToSplit, int maxBatchSize, CancellationToken cancellationToken) - { - int numNewGroups = (batchToSplit.Count - 1) / 50 + 1; - - return Enumerable.Range(0, numNewGroups) - .Select(i => batchToSplit.Skip(i * 50).Take(50).ToList()) - .ToList(); - } - /// /// Run some task work in the thread pool, but only allow a limited number of threads to go at a time diff --git a/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs b/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs new file mode 100644 index 000000000..f3eb70c8f --- /dev/null +++ b/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs @@ -0,0 +1,39 @@ +using System.Collections.Generic; +using System.Linq; +using System.Threading; +using Promitor.Core.Contracts; +using Promitor.Core.Scraping.Configuration.Model.Metrics; + +namespace Promitor.Core.Scraping.Batching +{ + public static class AzureResourceDefinitionBatching + { + /// + /// groups scrape definitions based on following conditions: + /// 1. Definitions in a batch must target the same resource type + /// 2. Definitions in a batch must target the same Azure metric with identical dimensions + /// 3. Definitions in a batch must have the same time granularity + /// 4. Batch size cannot exceed configured maximum + /// + public static List> GroupScrapeDefinitions(IEnumerable> allScrapeDefinitions, int maxBatchSize, CancellationToken cancellationToken) + { + return allScrapeDefinitions.GroupBy(def => def.buildPropertiesForBatch()) + .ToDictionary(group => group.Key, group => group.ToList()) // first pass to build batches that could exceed max + .ToDictionary(group => group.Key, group => SplitScrapeDefinitionBatch(group.Value, maxBatchSize, cancellationToken)) // split to right-sized batches + .SelectMany(group => group.Value.Select(batch => new BatchScrapeDefinition(group.Key, batch))) + .ToList(); // flatten + } + + /// + /// splits the "raw" batch according to max batch size configured + /// + private static List>> SplitScrapeDefinitionBatch(List> batchToSplit, int maxBatchSize, CancellationToken cancellationToken) + { + int numNewGroups = (batchToSplit.Count - 1) / 50 + 1; + + return Enumerable.Range(0, numNewGroups) + .Select(i => batchToSplit.Skip(i * 50).Take(50).ToList()) + .ToList(); + } + } +} \ No newline at end of file From 106bfbd6d8860f78fad0ac63e9222670a43827e6 Mon Sep 17 00:00:00 2001 From: xchen Date: Thu, 8 Aug 2024 23:17:49 -0700 Subject: [PATCH 024/131] unit test scrape definition batching logic --- src/Promitor.Agents.Scraper/Docs/Open-Api.xml | 14 -- .../AzureResourceDefinitionBatchingTests.cs | 146 ++++++++++++++++++ 2 files changed, 146 insertions(+), 14 deletions(-) create mode 100644 src/Promitor.Tests.Unit/Core/Scraping/Batching/AzureResourceDefinitionBatchingTests.cs diff --git a/src/Promitor.Agents.Scraper/Docs/Open-Api.xml b/src/Promitor.Agents.Scraper/Docs/Open-Api.xml index 20ff78364..7299b25fa 100644 --- a/src/Promitor.Agents.Scraper/Docs/Open-Api.xml +++ b/src/Promitor.Agents.Scraper/Docs/Open-Api.xml @@ -269,20 +269,6 @@ means to obtain a logger logger to use for scraping detail - - - groups scrape definitions based on following conditions: - 1. Definitions in a batch must target the same resource type - 2. Definitions in a batch must target the same Azure metric with identical dimensions - 3. Definitions in a batch must have the same time granularity - 4. Batch size cannot exceed configured maximum - - - - - splits the "raw" batch according to max batch size configured - - Run some task work in the thread pool, but only allow a limited number of threads to go at a time diff --git a/src/Promitor.Tests.Unit/Core/Scraping/Batching/AzureResourceDefinitionBatchingTests.cs b/src/Promitor.Tests.Unit/Core/Scraping/Batching/AzureResourceDefinitionBatchingTests.cs new file mode 100644 index 000000000..3529cb3ae --- /dev/null +++ b/src/Promitor.Tests.Unit/Core/Scraping/Batching/AzureResourceDefinitionBatchingTests.cs @@ -0,0 +1,146 @@ +using System.Collections.Generic; +using System.ComponentModel; +using System.Threading; +using AutoMapper; +using Promitor.Core.Contracts; +using Promitor.Core.Metrics; +using Promitor.Core.Scraping.Batching; +using Promitor.Core.Scraping.Configuration.Model; +using Promitor.Core.Scraping.Configuration.Model.Metrics; +using Promitor.Core.Scraping.Configuration.Serialization.v1.Mapping; +using Promitor.Core.Scraping.Configuration.Serialization.v1.Model; +using Xunit; + +namespace Promitor.Tests.Unit.Core.Metrics +{ + [Category("Unit")] + public class AzureResourceDefinitionBatchingTests + { + private readonly IMapper _mapper; // to model instantiation happen + private readonly static string _azureMetricNameBase = "promitor_batch_test_metric"; + private readonly static PrometheusMetricDefinition _prometheusMetricDefinition = + new("promitor_batch_test", "test", new Dictionary()); + private readonly static string _subscriptionId = "subscription"; + private readonly static AzureMetricConfigurationV1 _azureMetricConfigurationBase = new AzureMetricConfigurationV1 + { + MetricName = _azureMetricNameBase, + Aggregation = new MetricAggregationV1 + { + Type = PromitorMetricAggregationType.Average + }, + }; + private readonly static ScrapingV1 _scrapingBase = new ScrapingV1 + { + Schedule = "5 4 3 2 1" + }; + private readonly static string _resourceGroupName = "batch_test_group"; + private readonly static int _batchSize = 50; + + public AzureResourceDefinitionBatchingTests() + { + var config = new MapperConfiguration(c => c.AddProfile()); + _mapper = config.CreateMapper(); + } + + [Fact] + public void IdenticalBatchPropertiesShouldBatchTogether() + { + var azureMetricConfiguration = _mapper.Map(_azureMetricConfigurationBase); + var scraping = _mapper.Map(_scrapingBase); + var logAnalyticsConfiguration = new LogAnalyticsConfiguration(); + var scrapeDefinitions = BuildScrapeDefinitionBatch( + azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, scraping: scraping, + resourceType: ResourceType.StorageAccount, subscriptionId: _subscriptionId, resourceGroupName: _resourceGroupName, 10 + ); + var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions(scrapeDefinitions, maxBatchSize: _batchSize, CancellationToken.None); + // expect one batch of 10 + Assert.Single(groupedScrapeDefinitions); + Assert.Equal(10, groupedScrapeDefinitions[0].ScrapeDefinitions.Count); + } + + [Fact] + public void BatchShouldSplitAccordingToConfiguredBatchSize() + { + var azureMetricConfiguration = _mapper.Map(_azureMetricConfigurationBase); + var scraping = _mapper.Map(_scrapingBase); + var logAnalyticsConfiguration = new LogAnalyticsConfiguration(); + var scrapeDefinitions = BuildScrapeDefinitionBatch( + azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, scraping: scraping, + resourceType: ResourceType.StorageAccount, subscriptionId: _subscriptionId, resourceGroupName: _resourceGroupName, 130 + ); + var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions(scrapeDefinitions, maxBatchSize: _batchSize, CancellationToken.None); + // expect three batches adding up to total size + Assert.Equal(3, groupedScrapeDefinitions.Count); + Assert.Equal(130, CountTotalScrapeDefinitions(groupedScrapeDefinitions)); + } + + [Fact] + public void DifferentBatchPropertiesShouldBatchSeparately() + { + var azureMetricConfiguration = _mapper.Map(_azureMetricConfigurationBase); + var scraping = _mapper.Map(_scrapingBase); + var logAnalyticsConfiguration = new LogAnalyticsConfiguration(); + var scrapeDefinitions = BuildScrapeDefinitionBatch( + azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, scraping: scraping, + resourceType: ResourceType.StorageAccount, subscriptionId: _subscriptionId, resourceGroupName: _resourceGroupName, 10 + ); + var differentScrapeDefinitions = BuildScrapeDefinitionBatch( + azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, scraping: scraping, + resourceType: ResourceType.BlobStorage, subscriptionId: _subscriptionId, resourceGroupName: _resourceGroupName, 10 + ); + var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions, .. differentScrapeDefinitions], maxBatchSize: _batchSize, CancellationToken.None); + // expect two batch of 10 each + Assert.Equal(2, groupedScrapeDefinitions.Count); + Assert.Equal(10, groupedScrapeDefinitions[0].ScrapeDefinitions.Count); + Assert.Equal(10, groupedScrapeDefinitions[1].ScrapeDefinitions.Count); + } + + [Fact] + public void MixedBatchShouldSplitAccordingToConfiguredBatchSize() + { + var azureMetricConfiguration = _mapper.Map(_azureMetricConfigurationBase); + var scraping = _mapper.Map(_scrapingBase); + var logAnalyticsConfiguration = new LogAnalyticsConfiguration(); + var scrapeDefinitions = BuildScrapeDefinitionBatch( + azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, scraping: scraping, + resourceType: ResourceType.StorageAccount, subscriptionId: _subscriptionId, resourceGroupName: _resourceGroupName, 130 + ); + var differentScrapeDefinitions = BuildScrapeDefinitionBatch( + azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, scraping: scraping, + resourceType: ResourceType.BlobStorage, subscriptionId: _subscriptionId, resourceGroupName: _resourceGroupName, 120 + ); + var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions, .. differentScrapeDefinitions], maxBatchSize: _batchSize, CancellationToken.None); + // expect two batch of 10 each + Assert.Equal(6, groupedScrapeDefinitions.Count); + Assert.Equal(250, CountTotalScrapeDefinitions(groupedScrapeDefinitions)); + } + + private static List> BuildScrapeDefinitionBatch( + AzureMetricConfiguration azureMetricConfiguration, + LogAnalyticsConfiguration logAnalyticsConfiguration, + PrometheusMetricDefinition prometheusMetricDefinition, + Promitor.Core.Scraping.Configuration.Model.Scraping scraping, + ResourceType resourceType, + string subscriptionId, + string resourceGroupName, + int size) + { + // builds a batch of scrape definitions of specified size, each sharing properties passed in through function parameters + var batch = new List>(); + for (var resoureceIdCounter = 0; resoureceIdCounter < size; resoureceIdCounter++) + { + var resourceName = "resource" + resoureceIdCounter.ToString(); + var resourceDefinition = new AzureResourceDefinition(resourceType, subscriptionId, resourceGroupName, resourceName: resourceName, uniqueName: resourceName); + batch.Add(new ScrapeDefinition(azureMetricConfiguration, logAnalyticsConfiguration, prometheusMetricDefinition, scraping, resourceDefinition, subscriptionId, resourceGroupName)); + } + return batch; + } + + private static int CountTotalScrapeDefinitions(List> groupedScrapeDefinitions) + { + var count = 0; + groupedScrapeDefinitions.ForEach(batch => count += batch.ScrapeDefinitions.Count); + return count; + } + } +} \ No newline at end of file From cad075230566f476f306660c2166eeedfda3e967 Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 14 Aug 2024 13:43:36 -0700 Subject: [PATCH 025/131] Move batching to runtime configuration --- .../Scheduling/ResourcesScrapingJob.cs | 4 +- .../Model/MetricBatchScrapeConfig.cs | 8 ---- .../Configuration/Model/MetricsDeclaration.cs | 1 - .../v1/Mapping/V1MappingProfile.cs | 1 - .../v1/Model/MetricBatchScrapeConfigV1.cs | 21 ----------- .../v1/Model/MetricsDeclarationV1.cs | 1 - .../AzureMonitorIntegrationConfiguration.cs | 1 + .../AzureMonitorMetricBatchScrapeConfig.cs | 8 ++++ .../RuntimeConfigurationUnitTest.cs | 37 +++++++++++++++++++ .../Config/RuntimeConfigurationGenerator.cs | 15 +++++++- 10 files changed, 62 insertions(+), 35 deletions(-) delete mode 100644 src/Promitor.Core.Scraping/Configuration/Model/MetricBatchScrapeConfig.cs delete mode 100644 src/Promitor.Core.Scraping/Configuration/Serialization/v1/Model/MetricBatchScrapeConfigV1.cs create mode 100644 src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs diff --git a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs index 1adda9cae..2dab98986 100644 --- a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs +++ b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs @@ -254,9 +254,9 @@ private void GetResourceScrapeDefinition(IAzureResourceDefinition resourceDefini private async Task ScrapeMetrics(IEnumerable> scrapeDefinitions, CancellationToken cancellationToken) { var tasks = new List(); - var batchScrapingEnabled = this._metricsDeclaration.MetricBatchConfig?.Enabled ?? false; + var batchScrapingEnabled = this._azureMonitorIntegrationConfiguration.Value.MetricsBatching?.Enabled ?? false; if (batchScrapingEnabled) { - var batchScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions(scrapeDefinitions, this._metricsDeclaration.MetricBatchConfig.MaxBatchSize, cancellationToken); + var batchScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions(scrapeDefinitions, this._azureMonitorIntegrationConfiguration.Value.MetricsBatching.MaxBatchSize, cancellationToken); foreach(var batchScrapeDefinition in batchScrapeDefinitions) { var azureMetricName = batchScrapeDefinition.ScrapeDefinitionBatchProperties.AzureMetricConfiguration.MetricName; diff --git a/src/Promitor.Core.Scraping/Configuration/Model/MetricBatchScrapeConfig.cs b/src/Promitor.Core.Scraping/Configuration/Model/MetricBatchScrapeConfig.cs deleted file mode 100644 index 9103dde1b..000000000 --- a/src/Promitor.Core.Scraping/Configuration/Model/MetricBatchScrapeConfig.cs +++ /dev/null @@ -1,8 +0,0 @@ -namespace Promitor.Core.Scraping.Configuration.Model -{ - public class MetricBatchScrapeConfig - { - public bool Enabled { get; set; } - public int MaxBatchSize { get; set; } - } -} \ No newline at end of file diff --git a/src/Promitor.Core.Scraping/Configuration/Model/MetricsDeclaration.cs b/src/Promitor.Core.Scraping/Configuration/Model/MetricsDeclaration.cs index b566e0f21..3c820975f 100644 --- a/src/Promitor.Core.Scraping/Configuration/Model/MetricsDeclaration.cs +++ b/src/Promitor.Core.Scraping/Configuration/Model/MetricsDeclaration.cs @@ -6,7 +6,6 @@ namespace Promitor.Core.Scraping.Configuration.Model public class MetricsDeclaration { public AzureMetadata AzureMetadata { get; set; } - public MetricBatchScrapeConfig? MetricBatchConfig { get; set; } public MetricDefaults MetricDefaults { get; set; } = new MetricDefaults(); public List Metrics { get; set; } = new List(); } diff --git a/src/Promitor.Core.Scraping/Configuration/Serialization/v1/Mapping/V1MappingProfile.cs b/src/Promitor.Core.Scraping/Configuration/Serialization/v1/Mapping/V1MappingProfile.cs index 4d4d57270..9ddea7e2c 100644 --- a/src/Promitor.Core.Scraping/Configuration/Serialization/v1/Mapping/V1MappingProfile.cs +++ b/src/Promitor.Core.Scraping/Configuration/Serialization/v1/Mapping/V1MappingProfile.cs @@ -14,7 +14,6 @@ public V1MappingProfile() { CreateMap(); CreateMap(); - CreateMap(); CreateMap(); CreateMap(); CreateMap(); diff --git a/src/Promitor.Core.Scraping/Configuration/Serialization/v1/Model/MetricBatchScrapeConfigV1.cs b/src/Promitor.Core.Scraping/Configuration/Serialization/v1/Model/MetricBatchScrapeConfigV1.cs deleted file mode 100644 index 389b4cace..000000000 --- a/src/Promitor.Core.Scraping/Configuration/Serialization/v1/Model/MetricBatchScrapeConfigV1.cs +++ /dev/null @@ -1,21 +0,0 @@ -using System; -using Microsoft.Azure.Management.Monitor.Fluent.Models; - -namespace Promitor.Core.Scraping.Configuration.Serialization.v1.Model -{ - /// - /// Contains settings to scrape metrics in batched API calls - /// - public class MetricBatchScrapeConfigV1 - { - /// - /// Enable batched scraping mode for all metrics in the scraper - /// - public bool Enabled { get; set; } - - /// - /// Maximum number of resources in a batch. Azure Monitor API specifies a max limit of 50 as of March 2024 - /// - public int MaxBatchSize { get; set; } - } -} diff --git a/src/Promitor.Core.Scraping/Configuration/Serialization/v1/Model/MetricsDeclarationV1.cs b/src/Promitor.Core.Scraping/Configuration/Serialization/v1/Model/MetricsDeclarationV1.cs index df011517f..8f7dae4dd 100644 --- a/src/Promitor.Core.Scraping/Configuration/Serialization/v1/Model/MetricsDeclarationV1.cs +++ b/src/Promitor.Core.Scraping/Configuration/Serialization/v1/Model/MetricsDeclarationV1.cs @@ -11,7 +11,6 @@ public class MetricsDeclarationV1 { public string Version { get; set; } = SpecVersion.v1.ToString(); public AzureMetadataV1 AzureMetadata { get; set; } - public MetricBatchScrapeConfigV1? MetricBatchScrapeConfig { get; set; } public MetricDefaultsV1 MetricDefaults { get; set; } public IReadOnlyCollection Metrics { get; set; } } diff --git a/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorIntegrationConfiguration.cs b/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorIntegrationConfiguration.cs index 577f15979..d053cd18d 100644 --- a/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorIntegrationConfiguration.cs +++ b/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorIntegrationConfiguration.cs @@ -4,5 +4,6 @@ public class AzureMonitorIntegrationConfiguration { public AzureMonitorHistoryConfiguration History { get; set; } = new(); public bool UseAzureMonitorSdk { get; set; } = true; + public AzureMonitorMetricBatchScrapeConfig MetricsBatching { get; set; } = new(); } } \ No newline at end of file diff --git a/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs b/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs new file mode 100644 index 000000000..7315a04b6 --- /dev/null +++ b/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs @@ -0,0 +1,8 @@ +namespace Promitor.Integrations.AzureMonitor.Configuration +{ + public class AzureMonitorMetricBatchScrapeConfig + { + public bool Enabled { get; set; } + public int MaxBatchSize { get; set; } + } +} \ No newline at end of file diff --git a/src/Promitor.Tests.Unit/Configuration/RuntimeConfigurationUnitTest.cs b/src/Promitor.Tests.Unit/Configuration/RuntimeConfigurationUnitTest.cs index 0fb5a064e..88c63ee8b 100644 --- a/src/Promitor.Tests.Unit/Configuration/RuntimeConfigurationUnitTest.cs +++ b/src/Promitor.Tests.Unit/Configuration/RuntimeConfigurationUnitTest.cs @@ -132,6 +132,43 @@ public async Task RuntimeConfiguration_OverrideNewSdkFlagForAzureMonitorIntegrat Assert.False(runtimeConfiguration.AzureMonitor.Integration.UseAzureMonitorSdk); } + [Fact] + public async Task RuntimeConfiguration_HasNoBatchingConfigurationForAzureMonitorIntegration_DefaultsToDisabled() + { + // Arrange + var configuration = await RuntimeConfigurationGenerator.WithServerConfiguration() + .WithAzureMonitorIntegration(useAzureMonitorSdk: null) + .GenerateAsync(); + + // Act + var runtimeConfiguration = configuration.Get(); + + // Assert + Assert.NotNull(runtimeConfiguration); + Assert.NotNull(runtimeConfiguration.AzureMonitor); + Assert.NotNull(runtimeConfiguration.AzureMonitor.Integration); + Assert.False(runtimeConfiguration.AzureMonitor.Integration.MetricsBatching.Enabled); + } + + [Fact] + public async Task RuntimeConfiguration_BatchingConfigurationForAzureMonitorIntegration_UsesConfigured() + { + // Arrange + var configuration = await RuntimeConfigurationGenerator.WithServerConfiguration() + .WithAzureMonitorIntegration(batchSize: 50) + .GenerateAsync(); + + // Act + var runtimeConfiguration = configuration.Get(); + + // Assert + Assert.NotNull(runtimeConfiguration); + Assert.NotNull(runtimeConfiguration.AzureMonitor); + Assert.NotNull(runtimeConfiguration.AzureMonitor.Integration); + Assert.True(runtimeConfiguration.AzureMonitor.Integration.MetricsBatching.Enabled); + Assert.Equal(50, runtimeConfiguration.AzureMonitor.Integration.MetricsBatching.MaxBatchSize); + } + [Theory] [InlineData(true)] diff --git a/src/Promitor.Tests.Unit/Generators/Config/RuntimeConfigurationGenerator.cs b/src/Promitor.Tests.Unit/Generators/Config/RuntimeConfigurationGenerator.cs index 0c4751a2b..8c0c41d94 100644 --- a/src/Promitor.Tests.Unit/Generators/Config/RuntimeConfigurationGenerator.cs +++ b/src/Promitor.Tests.Unit/Generators/Config/RuntimeConfigurationGenerator.cs @@ -235,7 +235,7 @@ public RuntimeConfigurationGenerator WithAzureMonitorLogging(bool isEnabled = tr return this; } - public RuntimeConfigurationGenerator WithAzureMonitorIntegration(int? startingFromInHours = 100, bool? useAzureMonitorSdk = true) + public RuntimeConfigurationGenerator WithAzureMonitorIntegration(int? startingFromInHours = 100, bool? useAzureMonitorSdk = true, int? batchSize = 0) { _runtimeConfiguration.AzureMonitor ??= new AzureMonitorConfiguration(); _runtimeConfiguration.AzureMonitor.Integration ??= new AzureMonitorIntegrationConfiguration(); @@ -252,6 +252,11 @@ public RuntimeConfigurationGenerator WithAzureMonitorIntegration(int? startingFr _runtimeConfiguration.AzureMonitor.Integration.UseAzureMonitorSdk = useAzureMonitorSdk.Value; } + if (batchSize != null) + { + _runtimeConfiguration.AzureMonitor.Integration.MetricsBatching = new AzureMonitorMetricBatchScrapeConfig {Enabled = true, MaxBatchSize = batchSize.Value}; + } + return this; } @@ -344,10 +349,18 @@ public async Task GenerateAsync() if (_runtimeConfiguration?.AzureMonitor.Integration?.History != null) { + // configurationBuilder.AppendLine(" integration:"); + + // configurationBuilder.AppendLine($" useAzureMonitorSdk: {_runtimeConfiguration?.AzureMonitor.Integration.UseAzureMonitorSdk}"); + // configurationBuilder.AppendLine(" history:"); + // configurationBuilder.AppendLine($" startingFromInHours: {_runtimeConfiguration?.AzureMonitor.Integration.History.StartingFromInHours}"); configurationBuilder.AppendLine(" integration:"); configurationBuilder.AppendLine($" useAzureMonitorSdk: {_runtimeConfiguration?.AzureMonitor.Integration.UseAzureMonitorSdk}"); configurationBuilder.AppendLine(" history:"); configurationBuilder.AppendLine($" startingFromInHours: {_runtimeConfiguration?.AzureMonitor.Integration.History.StartingFromInHours}"); + configurationBuilder.AppendLine(" metricsBatching:"); + configurationBuilder.AppendLine($" enabled: {_runtimeConfiguration?.AzureMonitor.Integration.MetricsBatching.Enabled}"); + configurationBuilder.AppendLine($" maxBatchSize: {_runtimeConfiguration?.AzureMonitor.Integration.MetricsBatching.MaxBatchSize}"); } if (_runtimeConfiguration?.AzureMonitor.Logging != null) From 30b535fca98228c8b811a65a43666e6cba8e9857 Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 14 Aug 2024 13:55:00 -0700 Subject: [PATCH 026/131] fix test --- .../Generators/Config/RuntimeConfigurationGenerator.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Promitor.Tests.Unit/Generators/Config/RuntimeConfigurationGenerator.cs b/src/Promitor.Tests.Unit/Generators/Config/RuntimeConfigurationGenerator.cs index 8c0c41d94..a7ef73637 100644 --- a/src/Promitor.Tests.Unit/Generators/Config/RuntimeConfigurationGenerator.cs +++ b/src/Promitor.Tests.Unit/Generators/Config/RuntimeConfigurationGenerator.cs @@ -235,7 +235,7 @@ public RuntimeConfigurationGenerator WithAzureMonitorLogging(bool isEnabled = tr return this; } - public RuntimeConfigurationGenerator WithAzureMonitorIntegration(int? startingFromInHours = 100, bool? useAzureMonitorSdk = true, int? batchSize = 0) + public RuntimeConfigurationGenerator WithAzureMonitorIntegration(int? startingFromInHours = 100, bool? useAzureMonitorSdk = true, int? batchSize = null) { _runtimeConfiguration.AzureMonitor ??= new AzureMonitorConfiguration(); _runtimeConfiguration.AzureMonitor.Integration ??= new AzureMonitorIntegrationConfiguration(); From 963b71a06ee9e0ea4f448a17f65c28a707ce0086 Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 14 Aug 2024 14:23:44 -0700 Subject: [PATCH 027/131] configure CI to do batch scraping --- config/promitor/scraper/ci-runtime.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/config/promitor/scraper/ci-runtime.yaml b/config/promitor/scraper/ci-runtime.yaml index b8eefbcd5..1182cf074 100644 --- a/config/promitor/scraper/ci-runtime.yaml +++ b/config/promitor/scraper/ci-runtime.yaml @@ -15,6 +15,9 @@ telemetry: verbosity: trace defaultVerbosity: trace azureMonitor: + metricsBatching: + enabled: true + maxBatchSize: 2 logging: isEnabled: false resourceDiscovery: From e1a8365dd31e95b8d7cc2dedcbf4255fd1a224c9 Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 14 Aug 2024 14:38:45 -0700 Subject: [PATCH 028/131] add logging --- .../Scheduling/ResourcesScrapingJob.cs | 21 ++++++++++--------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs index 2dab98986..1d1c9ad6a 100644 --- a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs +++ b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs @@ -255,26 +255,27 @@ private async Task ScrapeMetrics(IEnumerable(); var batchScrapingEnabled = this._azureMonitorIntegrationConfiguration.Value.MetricsBatching?.Enabled ?? false; + Logger.LogInformation("Parsed batch config: {Enabled}, {BatchSize}", this._azureMonitorIntegrationConfiguration.Value.MetricsBatching.Enabled, this._azureMonitorIntegrationConfiguration.Value.MetricsBatching.MaxBatchSize); if (batchScrapingEnabled) { var batchScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions(scrapeDefinitions, this._azureMonitorIntegrationConfiguration.Value.MetricsBatching.MaxBatchSize, cancellationToken); foreach(var batchScrapeDefinition in batchScrapeDefinitions) { var azureMetricName = batchScrapeDefinition.ScrapeDefinitionBatchProperties.AzureMetricConfiguration.MetricName; var resourceType = batchScrapeDefinition.ScrapeDefinitionBatchProperties.ResourceType; - Logger.LogInformation("Batch scraping Azure Metric {AzureMetricName} for resource type {ResourceType}.", azureMetricName, resourceType); + Logger.LogInformation("Executing batch scrape job of size {BatchSize} for Azure Metric {AzureMetricName} for resource type {ResourceType}.", batchScrapeDefinition.ScrapeDefinitions.Count, azureMetricName, resourceType); await ScheduleLimitedConcurrencyAsyncTask(tasks, () => ScrapeMetricBatched(batchScrapeDefinition), cancellationToken); } - } - - foreach (var scrapeDefinition in scrapeDefinitions) - { - cancellationToken.ThrowIfCancellationRequested(); + } else { + foreach (var scrapeDefinition in scrapeDefinitions) + { + cancellationToken.ThrowIfCancellationRequested(); - var metricName = scrapeDefinition.PrometheusMetricDefinition.Name; - var resourceType = scrapeDefinition.Resource.ResourceType; - Logger.LogInformation("Scraping {MetricName} for resource type {ResourceType}.", metricName, resourceType); + var metricName = scrapeDefinition.PrometheusMetricDefinition.Name; + var resourceType = scrapeDefinition.Resource.ResourceType; + Logger.LogInformation("Scraping {MetricName} for resource type {ResourceType}.", metricName, resourceType); - await ScheduleLimitedConcurrencyAsyncTask(tasks, () => ScrapeMetric(scrapeDefinition), cancellationToken); + await ScheduleLimitedConcurrencyAsyncTask(tasks, () => ScrapeMetric(scrapeDefinition), cancellationToken); + } } await Task.WhenAll(tasks); From a93f065759040c0af93b6fa59756a930875f5cf0 Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 14 Aug 2024 15:01:55 -0700 Subject: [PATCH 029/131] add logging --- config/promitor/scraper/ci-runtime.yaml | 7 ++++--- .../Scheduling/ResourcesScrapingJob.cs | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/config/promitor/scraper/ci-runtime.yaml b/config/promitor/scraper/ci-runtime.yaml index 1182cf074..932f7d324 100644 --- a/config/promitor/scraper/ci-runtime.yaml +++ b/config/promitor/scraper/ci-runtime.yaml @@ -15,9 +15,10 @@ telemetry: verbosity: trace defaultVerbosity: trace azureMonitor: - metricsBatching: - enabled: true - maxBatchSize: 2 + integration: + metricsBatching: + enabled: true + maxBatchSize: 2 logging: isEnabled: false resourceDiscovery: diff --git a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs index 1d1c9ad6a..f0211f164 100644 --- a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs +++ b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs @@ -19,7 +19,6 @@ using Promitor.Core.Scraping.Configuration.Model; using Promitor.Core.Scraping.Configuration.Model.Metrics; using Promitor.Core.Scraping.Factories; -using Promitor.Core.Scraping.ResourceTypes; using Promitor.Integrations.Azure.Authentication; using Promitor.Integrations.AzureMonitor.Configuration; using Promitor.Integrations.LogAnalytics; @@ -256,6 +255,7 @@ private async Task ScrapeMetrics(IEnumerable(); var batchScrapingEnabled = this._azureMonitorIntegrationConfiguration.Value.MetricsBatching?.Enabled ?? false; Logger.LogInformation("Parsed batch config: {Enabled}, {BatchSize}", this._azureMonitorIntegrationConfiguration.Value.MetricsBatching.Enabled, this._azureMonitorIntegrationConfiguration.Value.MetricsBatching.MaxBatchSize); + Logger.LogInformation("Parsed SDK runtime config {Enabled}", this._azureMonitorIntegrationConfiguration.Value.UseAzureMonitorSdk); if (batchScrapingEnabled) { var batchScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions(scrapeDefinitions, this._azureMonitorIntegrationConfiguration.Value.MetricsBatching.MaxBatchSize, cancellationToken); From 4342ae2b63755d987ea4f1123a12201743425f84 Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 14 Aug 2024 15:20:58 -0700 Subject: [PATCH 030/131] add logging on scraper --- src/Promitor.Core.Scraping/AzureMonitorScraper.cs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs index af33df8b4..69675ce7a 100644 --- a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs +++ b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs @@ -111,6 +111,7 @@ protected override async Task> BatchScrapeResourceAsync(strin try { // Query Azure Monitor for metrics + Logger.LogWarning("Querying Azure Monitor for metric {MetricName} with batch size {BatchSize}", metricName, resourceUriList.Count); resourceIdTaggedMeasuredMetrics = await AzureMonitorClient.BatchQueryMetricAsync(metricName, dimensionNames, aggregationType, aggregationInterval, resourceUriList, metricFilter, metricLimit); } catch (MetricInformationNotFoundException metricsNotFoundException) @@ -133,6 +134,7 @@ protected override async Task> BatchScrapeResourceAsync(strin var metricLabels = DetermineMetricLabels((TResourceDefinition) batchScrapeDefinition.ScrapeDefinitions[0].Resource); var finalMetricValues = EnrichMeasuredMetrics((TResourceDefinition) batchScrapeDefinition.ScrapeDefinitions[0].Resource, dimensionNames, resourceMetrics.ToImmutableList()); scrapeResults.Add(new ScrapeResult(subscriptionId, resourceDefinition.ResourceGroupName, resourceDefinition.ResourceName, resourceId, finalMetricValues, metricLabels)); + Logger.LogWarning("Processed {MetricsCount} measured metrics for Metric {MetricName} and resource {ResourceName}", finalMetricValues.Count, metricName, resourceId); } return scrapeResults; From 6b0dcbb18c35bb4febaeca126afb38949a2167ea Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 14 Aug 2024 15:53:07 -0700 Subject: [PATCH 031/131] add logging on scraper --- .../AzureMonitorQueryClient.cs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs index 84146fd84..29a2d967f 100644 --- a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs +++ b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs @@ -129,7 +129,8 @@ public async Task> BatchQueryMetricAsync( // Get the most recent metric var metricResultsList = await _metricsBatchQueryClient.GetRelevantMetricForResourçes(resourceIds, metricName, metricNamespace, MetricAggregationTypeConverter.AsMetricAggregationType(aggregationType), closestAggregationInterval, metricFilter, metricDimensions, metricLimit, startQueryingTime, _azureMonitorIntegrationConfiguration); - + _logger.LogWarning("Azure monitor has returned {ResultsCount} results for metric {MetricName}", metricResultsList.Count, metricName); + //TODO: This is potentially a lot of results to process in a single thread. Think of ways to utilize additional parallelism return metricResultsList .SelectMany(metricResult => ProcessMetricResult(metricResult, metricName, startQueryingTime, closestAggregationInterval, aggregationType, metricDimensions) @@ -168,7 +169,7 @@ private List ProcessMetricResult(MetricResult metricResult, stri } catch (MissingDimensionException e) { - _logger.LogWarning("{MetricName} has return a time series with empty value for {Dimension} and the measurements will be dropped", metricName, e.DimensionName); + _logger.LogWarning("{MetricName} has returned a time series with empty value for {Dimension} and the measurements will be dropped", metricName, e.DimensionName); _logger.LogDebug("The violating time series has content {Details}", JsonConvert.SerializeObject(e.TimeSeries)); } } From f7730580cd1de74e612046b92fd5ac3db8a8d121 Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 14 Aug 2024 16:11:45 -0700 Subject: [PATCH 032/131] add logging on scraper --- src/Promitor.Core.Scraping/Scraper.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Promitor.Core.Scraping/Scraper.cs b/src/Promitor.Core.Scraping/Scraper.cs index f3f29fec7..d5542c43d 100644 --- a/src/Promitor.Core.Scraping/Scraper.cs +++ b/src/Promitor.Core.Scraping/Scraper.cs @@ -98,7 +98,7 @@ public async Task ScrapeAsync(ScrapeDefinition scrapeD } catch (Exception exception) { - Logger.LogCritical(exception, "Failed to scrape resource for metric '{MetricName}'", scrapeDefinition.PrometheusMetricDefinition.Name); + Logger.LogCritical(exception, "Failed to scrape resource for metric '{MetricName}'. Details: {Details}", scrapeDefinition.PrometheusMetricDefinition.Name, exception.ToString()); await ReportScrapingOutcomeAsync(scrapeDefinition, isSuccessful: false); } From c3aa159e4e23dcc5f8da1081c06b2da35790cf02 Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 14 Aug 2024 17:38:57 -0700 Subject: [PATCH 033/131] log exception details --- .../Scheduling/ResourcesScrapingJob.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs index f0211f164..4992b8350 100644 --- a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs +++ b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs @@ -299,8 +299,8 @@ private async Task ScrapeMetricBatched(BatchScrapeDefinition Date: Wed, 14 Aug 2024 18:28:26 -0700 Subject: [PATCH 034/131] add more logging on query tasks --- .../AzureMonitorQueryClient.cs | 2 +- .../Extensions/AzureMonitorQueryTasks.cs | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs index 29a2d967f..a2c60c5c9 100644 --- a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs +++ b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs @@ -128,7 +128,7 @@ public async Task> BatchQueryMetricAsync( var closestAggregationInterval = DetermineAggregationInterval(metricName, aggregationInterval, metricDefinition.MetricAvailabilities); // Get the most recent metric - var metricResultsList = await _metricsBatchQueryClient.GetRelevantMetricForResourçes(resourceIds, metricName, metricNamespace, MetricAggregationTypeConverter.AsMetricAggregationType(aggregationType), closestAggregationInterval, metricFilter, metricDimensions, metricLimit, startQueryingTime, _azureMonitorIntegrationConfiguration); + var metricResultsList = await _metricsBatchQueryClient.GetRelevantMetricForResourçes(resourceIds, metricName, metricNamespace, MetricAggregationTypeConverter.AsMetricAggregationType(aggregationType), closestAggregationInterval, metricFilter, metricDimensions, metricLimit, startQueryingTime, _azureMonitorIntegrationConfiguration, _logger); _logger.LogWarning("Azure monitor has returned {ResultsCount} results for metric {MetricName}", metricResultsList.Count, metricName); //TODO: This is potentially a lot of results to process in a single thread. Think of ways to utilize additional parallelism diff --git a/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs b/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs index efc7bb774..38ac40c3b 100644 --- a/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs +++ b/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs @@ -5,6 +5,7 @@ using Azure.Core; using Azure.Monitor.Query; using Azure.Monitor.Query.Models; +using Microsoft.Extensions.Logging; using Microsoft.Extensions.Options; using Promitor.Core; using Promitor.Integrations.AzureMonitor.Configuration; @@ -51,7 +52,7 @@ public static async Task GetRelevantMetricSingleResource(this Metr } public static async Task> GetRelevantMetricForResourçes(this MetricsClient metricsClient, List resourceIds, string metricName, string metricNamespace, MetricAggregationType metricAggregation, TimeSpan metricInterval, - string metricFilter, List metricDimensions, int? metricLimit, DateTime recordDateTime, IOptions azureMonitorIntegrationConfiguration) + string metricFilter, List metricDimensions, int? metricLimit, DateTime recordDateTime, IOptions azureMonitorIntegrationConfiguration, ILogger logger) { MetricsQueryResourcesOptions queryOptions; var querySizeLimit = metricLimit ?? Defaults.MetricDefaults.Limit; @@ -78,9 +79,11 @@ public static async Task> GetRelevantMetricForResourçes(this TimeRange= new QueryTimeRange(new DateTimeOffset(recordDateTime.AddHours(-historyStartingFromInHours)), new DateTimeOffset(recordDateTime)) }; } + logger.LogWarning("Batch query options: {Options}", queryOptions); var metricsBatchQueryResponse = await metricsClient.QueryResourcesAsync(resourceIdentifiers, [metricName], metricNamespace, queryOptions); var metricsQueryResults = metricsBatchQueryResponse.Value; + logger.LogWarning("Got response"); return metricsQueryResults.Values .Select(result => GetRelevantMetricResultOrThrow(result, metricName)) .ToList(); From 3cc8359b27a9d612c65f297ad73d1b6373a41eae Mon Sep 17 00:00:00 2001 From: xchen Date: Fri, 16 Aug 2024 15:50:09 -0700 Subject: [PATCH 035/131] make region configurable --- config/promitor/scraper/ci-runtime.yaml | 1 + .../AzureMonitorQueryClient.cs | 8 +++++--- .../Configuration/AzureMonitorMetricBatchScrapeConfig.cs | 3 +++ .../Extensions/AzureMonitorQueryTasks.cs | 2 +- 4 files changed, 10 insertions(+), 4 deletions(-) diff --git a/config/promitor/scraper/ci-runtime.yaml b/config/promitor/scraper/ci-runtime.yaml index 932f7d324..12ad9bfb0 100644 --- a/config/promitor/scraper/ci-runtime.yaml +++ b/config/promitor/scraper/ci-runtime.yaml @@ -19,6 +19,7 @@ azureMonitor: metricsBatching: enabled: true maxBatchSize: 2 + azureRegion: europe logging: isEnabled: false resourceDiscovery: diff --git a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs index a2c60c5c9..961405018 100644 --- a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs +++ b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs @@ -61,7 +61,7 @@ public AzureMonitorQueryClient(AzureCloud azureCloud, string tenantId, string su _azureMonitorIntegrationConfiguration = azureMonitorIntegrationConfiguration; _logger = loggerFactory.CreateLogger(); _metricsQueryClient = CreateAzureMonitorMetricsClient(azureCloud, tenantId, subscriptionId, azureAuthenticationInfo, metricSinkWriter, azureScrapingSystemMetricsPublisher, azureMonitorLoggingConfiguration); - _metricsBatchQueryClient = CreateAzureMonitorMetricsBatchClient(azureCloud, tenantId, azureAuthenticationInfo, azureMonitorLoggingConfiguration); + _metricsBatchQueryClient = CreateAzureMonitorMetricsBatchClient(azureCloud, tenantId, azureAuthenticationInfo, azureMonitorIntegrationConfiguration, azureMonitorLoggingConfiguration); } /// @@ -274,7 +274,8 @@ private MetricsQueryClient CreateAzureMonitorMetricsClient(AzureCloud azureCloud /// /// Creates authenticated client for metrics batch queries /// - private MetricsClient CreateAzureMonitorMetricsBatchClient(AzureCloud azureCloud, string tenantId, AzureAuthenticationInfo azureAuthenticationInfo, IOptions azureMonitorLoggingConfiguration) { + private MetricsClient CreateAzureMonitorMetricsBatchClient(AzureCloud azureCloud, string tenantId, AzureAuthenticationInfo azureAuthenticationInfo, IOptions azureMonitorIntegrationConfiguration, IOptions azureMonitorLoggingConfiguration) { + var azureRegion = azureMonitorIntegrationConfiguration.Value.MetricsBatching.AzureRegion; var metricsClientOptions = new MetricsClientOptions{ Audience = azureCloud.DetermineMetricsClientBatchQueryAudience(), Retry = @@ -293,7 +294,8 @@ private MetricsClient CreateAzureMonitorMetricsBatchClient(AzureCloud azureCloud using AzureEventSourceListener traceListener = AzureEventSourceListener.CreateTraceLogger(EventLevel.Informational); metricsClientOptions.Diagnostics.IsLoggingEnabled = true; } - return new MetricsClient(new Uri(azureCloud.DetermineMetricsClientBatchQueryAudience().ToString()), tokenCredential, metricsClientOptions); + _logger.LogWarning("Using batch scraping API URL: {URL}", $"{azureRegion}.{azureCloud.DetermineMetricsClientBatchQueryAudience()}"); + return new MetricsClient(new Uri($"{azureRegion}.{azureCloud.DetermineMetricsClientBatchQueryAudience()}"), tokenCredential, metricsClientOptions); } } } \ No newline at end of file diff --git a/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs b/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs index 7315a04b6..4c4deb40a 100644 --- a/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs +++ b/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs @@ -1,8 +1,11 @@ +using Azure.Core; + namespace Promitor.Integrations.AzureMonitor.Configuration { public class AzureMonitorMetricBatchScrapeConfig { public bool Enabled { get; set; } public int MaxBatchSize { get; set; } + public string AzureRegion { get; set; } // Batch scrape endpoints are deployed by region } } \ No newline at end of file diff --git a/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs b/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs index 38ac40c3b..35430f754 100644 --- a/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs +++ b/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs @@ -79,7 +79,7 @@ public static async Task> GetRelevantMetricForResourçes(this TimeRange= new QueryTimeRange(new DateTimeOffset(recordDateTime.AddHours(-historyStartingFromInHours)), new DateTimeOffset(recordDateTime)) }; } - logger.LogWarning("Batch query options: {Options}", queryOptions); + logger.LogWarning("Batch query options: {Options}", queryOptions.ToString()); var metricsBatchQueryResponse = await metricsClient.QueryResourcesAsync(resourceIdentifiers, [metricName], metricNamespace, queryOptions); var metricsQueryResults = metricsBatchQueryResponse.Value; From 7a8fc274ac7219a8553162d426d767224026ea8e Mon Sep 17 00:00:00 2001 From: xchen Date: Fri, 16 Aug 2024 16:07:32 -0700 Subject: [PATCH 036/131] try fixing regional metrics URL --- .../AzureMonitorQueryClient.cs | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs index 961405018..31c0cc49b 100644 --- a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs +++ b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs @@ -294,8 +294,21 @@ private MetricsClient CreateAzureMonitorMetricsBatchClient(AzureCloud azureCloud using AzureEventSourceListener traceListener = AzureEventSourceListener.CreateTraceLogger(EventLevel.Informational); metricsClientOptions.Diagnostics.IsLoggingEnabled = true; } - _logger.LogWarning("Using batch scraping API URL: {URL}", $"{azureRegion}.{azureCloud.DetermineMetricsClientBatchQueryAudience()}"); - return new MetricsClient(new Uri($"{azureRegion}.{azureCloud.DetermineMetricsClientBatchQueryAudience()}"), tokenCredential, metricsClientOptions); + _logger.LogWarning("Using batch scraping API URL: {URL}", InsertRegionIntoUrl(azureRegion, azureCloud.DetermineMetricsClientBatchQueryAudience().ToString())); + return new MetricsClient(new Uri(InsertRegionIntoUrl(azureRegion, azureCloud.DetermineMetricsClientBatchQueryAudience().ToString())), tokenCredential, metricsClientOptions); + } + + private static string InsertRegionIntoUrl(string region, string baseUrl) + { + // Find the position where ".metrics" starts in the URL + int metricsIndex = baseUrl.IndexOf(".metrics"); + + // Split the base URL into two parts: before and after the ".metrics" + string beforeMetrics = baseUrl.Substring(0, metricsIndex); + string afterMetrics = baseUrl.Substring(metricsIndex); + + // Concatenate the region between the two parts + return $"{beforeMetrics}.{region}{afterMetrics}"; } } } \ No newline at end of file From bb71f0d69bf4869d6bb75518d6bbe377eeb80f4e Mon Sep 17 00:00:00 2001 From: xchen Date: Fri, 16 Aug 2024 22:12:02 -0700 Subject: [PATCH 037/131] fix batch API URL formatting --- .../AzureMonitorQueryClient.cs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs index 31c0cc49b..ed1db22d4 100644 --- a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs +++ b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs @@ -298,17 +298,17 @@ private MetricsClient CreateAzureMonitorMetricsBatchClient(AzureCloud azureCloud return new MetricsClient(new Uri(InsertRegionIntoUrl(azureRegion, azureCloud.DetermineMetricsClientBatchQueryAudience().ToString())), tokenCredential, metricsClientOptions); } - private static string InsertRegionIntoUrl(string region, string baseUrl) + public static string InsertRegionIntoUrl(string region, string baseUrl) { // Find the position where ".metrics" starts in the URL - int metricsIndex = baseUrl.IndexOf(".metrics"); + int metricsIndex = baseUrl.IndexOf("metrics"); // Split the base URL into two parts: before and after the ".metrics" string beforeMetrics = baseUrl.Substring(0, metricsIndex); string afterMetrics = baseUrl.Substring(metricsIndex); // Concatenate the region between the two parts - return $"{beforeMetrics}.{region}{afterMetrics}"; + return $"{beforeMetrics}{region}.{afterMetrics}"; } } } \ No newline at end of file From c79750d7a7691992147e8f200ccc74b936ae8bf9 Mon Sep 17 00:00:00 2001 From: xchen Date: Fri, 16 Aug 2024 22:20:23 -0700 Subject: [PATCH 038/131] fix batch API URL formatting --- config/promitor/scraper/ci-runtime.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/promitor/scraper/ci-runtime.yaml b/config/promitor/scraper/ci-runtime.yaml index 12ad9bfb0..c51ecf74b 100644 --- a/config/promitor/scraper/ci-runtime.yaml +++ b/config/promitor/scraper/ci-runtime.yaml @@ -19,7 +19,7 @@ azureMonitor: metricsBatching: enabled: true maxBatchSize: 2 - azureRegion: europe + azureRegion: westeurope logging: isEnabled: false resourceDiscovery: From 5a1fc335e6fab6d96801e2f1f47d966d7eefce1e Mon Sep 17 00:00:00 2001 From: xchen Date: Fri, 16 Aug 2024 22:35:17 -0700 Subject: [PATCH 039/131] debug query range --- .../Extensions/AzureMonitorQueryTasks.cs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs b/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs index 35430f754..6e57f31b9 100644 --- a/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs +++ b/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs @@ -56,7 +56,8 @@ public static async Task> GetRelevantMetricForResourçes(this { MetricsQueryResourcesOptions queryOptions; var querySizeLimit = metricLimit ?? Defaults.MetricDefaults.Limit; - var historyStartingFromInHours = azureMonitorIntegrationConfiguration.Value.History.StartingFromInHours; + //var historyStartingFromInHours = azureMonitorIntegrationConfiguration.Value.History.StartingFromInHours; + var historyStartingFromInHours = 2; var filter = BuildFilter(metricDimensions, metricFilter); List resourceIdentifiers = resourceIds.Select(id => new ResourceIdentifier(id)).ToList(); @@ -79,7 +80,7 @@ public static async Task> GetRelevantMetricForResourçes(this TimeRange= new QueryTimeRange(new DateTimeOffset(recordDateTime.AddHours(-historyStartingFromInHours)), new DateTimeOffset(recordDateTime)) }; } - logger.LogWarning("Batch query options: {Options}", queryOptions.ToString()); + logger.LogWarning("Batch query range: {Range}", queryOptions.TimeRange); var metricsBatchQueryResponse = await metricsClient.QueryResourcesAsync(resourceIdentifiers, [metricName], metricNamespace, queryOptions); var metricsQueryResults = metricsBatchQueryResponse.Value; From be7b18b0b5540b93eaf27cd04deee78cba6e61a9 Mon Sep 17 00:00:00 2001 From: xchen Date: Fri, 16 Aug 2024 22:53:14 -0700 Subject: [PATCH 040/131] use different time range instantiation --- .../Extensions/AzureMonitorQueryTasks.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs b/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs index 6e57f31b9..06bd793d7 100644 --- a/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs +++ b/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs @@ -68,7 +68,7 @@ public static async Task> GetRelevantMetricForResourçes(this Granularity = metricInterval, Filter = filter, Size = querySizeLimit, - TimeRange= new QueryTimeRange(new DateTimeOffset(recordDateTime.AddHours(-historyStartingFromInHours)), new DateTimeOffset(recordDateTime)) + TimeRange= new QueryTimeRange(TimeSpan.FromHours(historyStartingFromInHours)) }; } else @@ -77,7 +77,7 @@ public static async Task> GetRelevantMetricForResourçes(this Aggregations = { metricAggregation.ToString() }, Granularity = metricInterval, Size = querySizeLimit, - TimeRange= new QueryTimeRange(new DateTimeOffset(recordDateTime.AddHours(-historyStartingFromInHours)), new DateTimeOffset(recordDateTime)) + TimeRange= new QueryTimeRange(TimeSpan.FromHours(historyStartingFromInHours)) }; } logger.LogWarning("Batch query range: {Range}", queryOptions.TimeRange); From a58318fb4af666c422ff116ed08bc318918d2141 Mon Sep 17 00:00:00 2001 From: xchen Date: Fri, 16 Aug 2024 23:06:32 -0700 Subject: [PATCH 041/131] use different time range instantiation --- .../AzureMonitorQueryClient.cs | 2 +- .../Extensions/AzureMonitorQueryTasks.cs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs index ed1db22d4..a27bfbc39 100644 --- a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs +++ b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs @@ -128,7 +128,7 @@ public async Task> BatchQueryMetricAsync( var closestAggregationInterval = DetermineAggregationInterval(metricName, aggregationInterval, metricDefinition.MetricAvailabilities); // Get the most recent metric - var metricResultsList = await _metricsBatchQueryClient.GetRelevantMetricForResourçes(resourceIds, metricName, metricNamespace, MetricAggregationTypeConverter.AsMetricAggregationType(aggregationType), closestAggregationInterval, metricFilter, metricDimensions, metricLimit, startQueryingTime, _azureMonitorIntegrationConfiguration, _logger); + var metricResultsList = await _metricsBatchQueryClient.GetRelevantMetricForResources(resourceIds, metricName, metricNamespace, MetricAggregationTypeConverter.AsMetricAggregationType(aggregationType), closestAggregationInterval, metricFilter, metricDimensions, metricLimit, startQueryingTime, _azureMonitorIntegrationConfiguration, _logger); _logger.LogWarning("Azure monitor has returned {ResultsCount} results for metric {MetricName}", metricResultsList.Count, metricName); //TODO: This is potentially a lot of results to process in a single thread. Think of ways to utilize additional parallelism diff --git a/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs b/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs index 06bd793d7..dbf3c77bb 100644 --- a/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs +++ b/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs @@ -51,7 +51,7 @@ public static async Task GetRelevantMetricSingleResource(this Metr return GetRelevantMetricResultOrThrow(metricsQueryResponse.Value, metricName); } - public static async Task> GetRelevantMetricForResourçes(this MetricsClient metricsClient, List resourceIds, string metricName, string metricNamespace, MetricAggregationType metricAggregation, TimeSpan metricInterval, + public static async Task> GetRelevantMetricForResources(this MetricsClient metricsClient, List resourceIds, string metricName, string metricNamespace, MetricAggregationType metricAggregation, TimeSpan metricInterval, string metricFilter, List metricDimensions, int? metricLimit, DateTime recordDateTime, IOptions azureMonitorIntegrationConfiguration, ILogger logger) { MetricsQueryResourcesOptions queryOptions; @@ -80,7 +80,7 @@ public static async Task> GetRelevantMetricForResourçes(this TimeRange= new QueryTimeRange(TimeSpan.FromHours(historyStartingFromInHours)) }; } - logger.LogWarning("Batch query range: {Range}", queryOptions.TimeRange); + logger.LogWarning("Batch query range: {Range}, size: {Size}, granularity: {Interval}, aggregation: {Aggregation}", queryOptions.TimeRange, querySizeLimit, metricInterval, metricAggregation); var metricsBatchQueryResponse = await metricsClient.QueryResourcesAsync(resourceIdentifiers, [metricName], metricNamespace, queryOptions); var metricsQueryResults = metricsBatchQueryResponse.Value; From 187459cfc366c3e7c7ca63620e77c59e61bd3703 Mon Sep 17 00:00:00 2001 From: xchen Date: Fri, 16 Aug 2024 23:25:30 -0700 Subject: [PATCH 042/131] lower case aggregations --- .../Extensions/AzureMonitorQueryTasks.cs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs b/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs index dbf3c77bb..394f038e3 100644 --- a/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs +++ b/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs @@ -64,7 +64,7 @@ public static async Task> GetRelevantMetricForResources(this if (!string.IsNullOrEmpty(filter)) { queryOptions = new MetricsQueryResourcesOptions { - Aggregations = { metricAggregation.ToString() }, + Aggregations = { metricAggregation.ToString().ToLower() }, Granularity = metricInterval, Filter = filter, Size = querySizeLimit, @@ -74,13 +74,13 @@ public static async Task> GetRelevantMetricForResources(this else { queryOptions = new MetricsQueryResourcesOptions { - Aggregations = { metricAggregation.ToString() }, + Aggregations = { metricAggregation.ToString().ToLower() }, Granularity = metricInterval, Size = querySizeLimit, TimeRange= new QueryTimeRange(TimeSpan.FromHours(historyStartingFromInHours)) }; } - logger.LogWarning("Batch query range: {Range}, size: {Size}, granularity: {Interval}, aggregation: {Aggregation}", queryOptions.TimeRange, querySizeLimit, metricInterval, metricAggregation); + logger.LogWarning("Batch query range: {Range}, size: {Size}, granularity: {Interval}, aggregation: {Aggregation}", queryOptions.TimeRange, querySizeLimit, metricInterval, queryOptions.Aggregations); var metricsBatchQueryResponse = await metricsClient.QueryResourcesAsync(resourceIdentifiers, [metricName], metricNamespace, queryOptions); var metricsQueryResults = metricsBatchQueryResponse.Value; From fdabad9029841039adc0dd7972d0f7a5c38dbece Mon Sep 17 00:00:00 2001 From: xchen Date: Fri, 16 Aug 2024 23:32:10 -0700 Subject: [PATCH 043/131] do not use size when filter not present --- .../Extensions/AzureMonitorQueryTasks.cs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs b/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs index 394f038e3..a96e2513b 100644 --- a/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs +++ b/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs @@ -76,7 +76,6 @@ public static async Task> GetRelevantMetricForResources(this queryOptions = new MetricsQueryResourcesOptions { Aggregations = { metricAggregation.ToString().ToLower() }, Granularity = metricInterval, - Size = querySizeLimit, TimeRange= new QueryTimeRange(TimeSpan.FromHours(historyStartingFromInHours)) }; } From b2cfd7959ffc2680bf3d7b012f1d5a89647534e2 Mon Sep 17 00:00:00 2001 From: xchen Date: Fri, 16 Aug 2024 23:39:41 -0700 Subject: [PATCH 044/131] log filter --- .../Extensions/AzureMonitorQueryTasks.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs b/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs index a96e2513b..abbbba725 100644 --- a/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs +++ b/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs @@ -79,7 +79,7 @@ public static async Task> GetRelevantMetricForResources(this TimeRange= new QueryTimeRange(TimeSpan.FromHours(historyStartingFromInHours)) }; } - logger.LogWarning("Batch query range: {Range}, size: {Size}, granularity: {Interval}, aggregation: {Aggregation}", queryOptions.TimeRange, querySizeLimit, metricInterval, queryOptions.Aggregations); + logger.LogWarning("Batch query range: {Range}, size: {Size}, granularity: {Interval}, aggregation: {Aggregation}, filter: {Filter}", queryOptions.TimeRange, queryOptions.Size, queryOptions.Granularity, queryOptions.Aggregations, queryOptions.Filter); var metricsBatchQueryResponse = await metricsClient.QueryResourcesAsync(resourceIdentifiers, [metricName], metricNamespace, queryOptions); var metricsQueryResults = metricsBatchQueryResponse.Value; From dd9d248e5ff5599d9c6b5c741dc11219795a571f Mon Sep 17 00:00:00 2001 From: xchen Date: Fri, 16 Aug 2024 23:46:53 -0700 Subject: [PATCH 045/131] log more query params --- .../Extensions/AzureMonitorQueryTasks.cs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs b/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs index abbbba725..d09cec70a 100644 --- a/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs +++ b/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs @@ -80,7 +80,8 @@ public static async Task> GetRelevantMetricForResources(this }; } logger.LogWarning("Batch query range: {Range}, size: {Size}, granularity: {Interval}, aggregation: {Aggregation}, filter: {Filter}", queryOptions.TimeRange, queryOptions.Size, queryOptions.Granularity, queryOptions.Aggregations, queryOptions.Filter); - + logger.LogWarning("Resource IDs: {IDs}", resourceIds); + var metricsBatchQueryResponse = await metricsClient.QueryResourcesAsync(resourceIdentifiers, [metricName], metricNamespace, queryOptions); var metricsQueryResults = metricsBatchQueryResponse.Value; logger.LogWarning("Got response"); From a90bf15d415fbd8f810de8db46af2197c2ce4e58 Mon Sep 17 00:00:00 2001 From: xchen Date: Sat, 17 Aug 2024 00:03:59 -0700 Subject: [PATCH 046/131] log outgoing requests --- .../AzureMonitorQueryClient.cs | 3 +- .../LogOutgoingRequestsPolicy.cs | 34 +++++++++++++++++++ 2 files changed, 36 insertions(+), 1 deletion(-) create mode 100644 src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/LogOutgoingRequestsPolicy.cs diff --git a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs index a27bfbc39..1fc1d8d83 100644 --- a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs +++ b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs @@ -23,6 +23,7 @@ using Azure.Core.Diagnostics; using System.Diagnostics.Tracing; using Promitor.Integrations.AzureMonitor.Extensions; +using System.Globalization; namespace Promitor.Integrations.AzureMonitor { @@ -287,7 +288,7 @@ private MetricsClient CreateAzureMonitorMetricsBatchClient(AzureCloud azureCloud } }; // retry policy as suggested in the documentation: https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/migrate-to-batch-api?tabs=individual-response#529-throttling-errors var tokenCredential = AzureAuthenticationFactory.GetTokenCredential(nameof(azureCloud), tenantId, azureAuthenticationInfo, azureCloud.GetAzureAuthorityHost()); - + metricsClientOptions.AddPolicy(new LogOutgoingRequestsPolicy(_logger), HttpPipelinePosition.BeforeTransport); var azureMonitorLogging = azureMonitorLoggingConfiguration.Value; if (azureMonitorLogging.IsEnabled) { diff --git a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/LogOutgoingRequestsPolicy.cs b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/LogOutgoingRequestsPolicy.cs new file mode 100644 index 000000000..978c9bc67 --- /dev/null +++ b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/LogOutgoingRequestsPolicy.cs @@ -0,0 +1,34 @@ +using System; +using System.Text; +using System.Threading.Tasks; +using Azure.Core; +using Azure.Core.Pipeline; +using GuardNet; +using Microsoft.Extensions.Logging; +using Promitor.Core.Metrics.Sinks; +using Promitor.Integrations.Azure.Authentication; +using Version = Promitor.Core.Version; + +namespace Promitor.Integrations.AzureMonitor.HttpPipelinePolicies{ + public class LogOutgoingRequestsPolicy : HttpPipelinePolicy + { + private readonly ILogger _logger; + public LogOutgoingRequestsPolicy(ILogger logger) + { + Guard.NotNull(logger, nameof(logger)); + _logger = logger; + } + + public override async ValueTask ProcessAsync(HttpMessage message, ReadOnlyMemory pipeline) + { + _logger.LogWarning("URI: {uri}", message.Request.Uri.ToString()); + await ProcessNextAsync(message, pipeline); + } + + public override void Process(HttpMessage message, ReadOnlyMemory pipeline) + { + throw new NotSupportedException("Synchronous HTTP request path is not supported"); + } + } +} + From f2d51f2b53460235166718bece6f28f663eb0a9e Mon Sep 17 00:00:00 2001 From: xchen Date: Sat, 17 Aug 2024 00:16:17 -0700 Subject: [PATCH 047/131] try without time range first, because Azure SDK is buggy :( --- .../Extensions/AzureMonitorQueryTasks.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs b/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs index d09cec70a..e90bc0b0e 100644 --- a/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs +++ b/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs @@ -68,7 +68,7 @@ public static async Task> GetRelevantMetricForResources(this Granularity = metricInterval, Filter = filter, Size = querySizeLimit, - TimeRange= new QueryTimeRange(TimeSpan.FromHours(historyStartingFromInHours)) + //TimeRange= new QueryTimeRange(TimeSpan.FromHours(historyStartingFromInHours)) }; } else @@ -76,7 +76,7 @@ public static async Task> GetRelevantMetricForResources(this queryOptions = new MetricsQueryResourcesOptions { Aggregations = { metricAggregation.ToString().ToLower() }, Granularity = metricInterval, - TimeRange= new QueryTimeRange(TimeSpan.FromHours(historyStartingFromInHours)) + //TimeRange= new QueryTimeRange(TimeSpan.FromHours(historyStartingFromInHours)) }; } logger.LogWarning("Batch query range: {Range}, size: {Size}, granularity: {Interval}, aggregation: {Aggregation}, filter: {Filter}", queryOptions.TimeRange, queryOptions.Size, queryOptions.Granularity, queryOptions.Aggregations, queryOptions.Filter); From 6406cbedafd0437221fbdde17aecbe79bcaa39c3 Mon Sep 17 00:00:00 2001 From: xchen Date: Sat, 17 Aug 2024 00:25:31 -0700 Subject: [PATCH 048/131] log ID resposne --- .../Extensions/MetricResultExtension.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Promitor.Integrations.AzureMonitor/Extensions/MetricResultExtension.cs b/src/Promitor.Integrations.AzureMonitor/Extensions/MetricResultExtension.cs index 9ce7c3383..8dc4f8ba9 100644 --- a/src/Promitor.Integrations.AzureMonitor/Extensions/MetricResultExtension.cs +++ b/src/Promitor.Integrations.AzureMonitor/Extensions/MetricResultExtension.cs @@ -15,7 +15,7 @@ public static string ParseResourceIdFromResultId(this MetricResult metricResult) Match match = resourceIdRegex.Match(metricResult.Id); if (!match.Success || string.IsNullOrEmpty(match.Groups[1].Value)) { - throw new InvalidOperationException("The expected resource ID pattern was not found in the input string."); + throw new InvalidOperationException($"The expected resource ID pattern was not found in the input string {metricResult.Id}"); } string resourceId = match.Groups[1].Value; From be8638c47a3bd6cc03149732d17d47bf8a32fe44 Mon Sep 17 00:00:00 2001 From: xchen Date: Mon, 19 Aug 2024 13:10:53 -0700 Subject: [PATCH 049/131] try new regex for resource ID parsing --- .../Extensions/MetricResultExtension.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Promitor.Integrations.AzureMonitor/Extensions/MetricResultExtension.cs b/src/Promitor.Integrations.AzureMonitor/Extensions/MetricResultExtension.cs index 8dc4f8ba9..bb5c16e77 100644 --- a/src/Promitor.Integrations.AzureMonitor/Extensions/MetricResultExtension.cs +++ b/src/Promitor.Integrations.AzureMonitor/Extensions/MetricResultExtension.cs @@ -7,7 +7,7 @@ namespace Promitor.Integrations.AzureMonitor.Extensions public static class MetricResultExtension { // hacky to to get resource ID since it's not available directly through the SDK model - static string resourceIdPattern = @"^(/subscriptions/[^/]+/resourceGroups/[^/]+/providers/[^/]+/[^/]+/[^/]+)"; + static string resourceIdPattern = @"^(subscriptions\/[^\/]+\/resourceGroups\/[^\/]+\/providers\/[^\/]+\/[^\/]+\/[^\/]+)"; static Regex resourceIdRegex = new Regex(resourceIdPattern, RegexOptions.Compiled); public static string ParseResourceIdFromResultId(this MetricResult metricResult) From 3e8e0954169a743e7498a90557ce32be60de9a96 Mon Sep 17 00:00:00 2001 From: xchen Date: Mon, 19 Aug 2024 14:22:26 -0700 Subject: [PATCH 050/131] process metrics results as IGroup --- src/Promitor.Core.Scraping/AzureMonitorScraper.cs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs index 69675ce7a..7d68af7a5 100644 --- a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs +++ b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs @@ -127,12 +127,12 @@ protected override async Task> BatchScrapeResourceAsync(strin var scrapeResults = new List(); // group based on resource, then to enrichment per group var groupedMeasuredMetrics = resourceIdTaggedMeasuredMetrics.GroupBy(measureMetric => measureMetric.ResourceId); - foreach (List resourceMetrics in groupedMeasuredMetrics) + foreach (IGrouping resourceMetricsGroup in groupedMeasuredMetrics) { - var resourceId = resourceMetrics[0].ResourceId; + var resourceId = resourceMetricsGroup.Key; _resourceDefinitions.TryGetValue(resourceId, out IAzureResourceDefinition resourceDefinition); var metricLabels = DetermineMetricLabels((TResourceDefinition) batchScrapeDefinition.ScrapeDefinitions[0].Resource); - var finalMetricValues = EnrichMeasuredMetrics((TResourceDefinition) batchScrapeDefinition.ScrapeDefinitions[0].Resource, dimensionNames, resourceMetrics.ToImmutableList()); + var finalMetricValues = EnrichMeasuredMetrics((TResourceDefinition) batchScrapeDefinition.ScrapeDefinitions[0].Resource, dimensionNames, resourceMetricsGroup.ToImmutableList()); scrapeResults.Add(new ScrapeResult(subscriptionId, resourceDefinition.ResourceGroupName, resourceDefinition.ResourceName, resourceId, finalMetricValues, metricLabels)); Logger.LogWarning("Processed {MetricsCount} measured metrics for Metric {MetricName} and resource {ResourceName}", finalMetricValues.Count, metricName, resourceId); } From 694a856c96f9b8b047e23366604882c4de48be6b Mon Sep 17 00:00:00 2001 From: xchen Date: Mon, 19 Aug 2024 14:39:51 -0700 Subject: [PATCH 051/131] log resource definition cache --- src/Promitor.Core.Scraping/AzureMonitorScraper.cs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs index 7d68af7a5..17489c885 100644 --- a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs +++ b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs @@ -99,6 +99,7 @@ protected override async Task> BatchScrapeResourceAsync(strin // cache resource info if (!_resourceDefinitions.ContainsKey(resourceUri)) { + Logger.LogWarning("Caching resource definition {Definition} for {ResourceId}", scrapeDefinition.Resource, resourceUri); _resourceDefinitions.TryAdd(resourceUri, scrapeDefinition.Resource); } } From 63d9a03b98bfe09c4c2cfe819b5ce86b5abff346 Mon Sep 17 00:00:00 2001 From: xchen Date: Mon, 19 Aug 2024 14:50:11 -0700 Subject: [PATCH 052/131] log resource definition cache --- src/Promitor.Core.Scraping/AzureMonitorScraper.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs index 17489c885..1655d7c18 100644 --- a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs +++ b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs @@ -99,7 +99,7 @@ protected override async Task> BatchScrapeResourceAsync(strin // cache resource info if (!_resourceDefinitions.ContainsKey(resourceUri)) { - Logger.LogWarning("Caching resource definition {Definition} for {ResourceId}", scrapeDefinition.Resource, resourceUri); + Logger.LogWarning("Caching resource definition {Definition} for {ResourceId}", scrapeDefinition.Resource.ResourceGroupName, resourceUri); _resourceDefinitions.TryAdd(resourceUri, scrapeDefinition.Resource); } } From 71fb195b94ddc4642cdbb868872afea83501cf90 Mon Sep 17 00:00:00 2001 From: xchen Date: Mon, 19 Aug 2024 15:44:58 -0700 Subject: [PATCH 053/131] log individual resource definitions --- .../Scheduling/ResourcesScrapingJob.cs | 5 +++++ .../AzureMonitorScraper.cs | 2 +- .../AzureResourceDefinitionBatchingTests.cs | 20 +++++++++++++++++++ 3 files changed, 26 insertions(+), 1 deletion(-) diff --git a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs index 4992b8350..da94246e3 100644 --- a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs +++ b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs @@ -263,6 +263,11 @@ private async Task ScrapeMetrics(IEnumerable ScrapeMetricBatched(batchScrapeDefinition), cancellationToken); } } else { diff --git a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs index 1655d7c18..6c58b3f45 100644 --- a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs +++ b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs @@ -99,7 +99,7 @@ protected override async Task> BatchScrapeResourceAsync(strin // cache resource info if (!_resourceDefinitions.ContainsKey(resourceUri)) { - Logger.LogWarning("Caching resource definition {Definition} for {ResourceId}", scrapeDefinition.Resource.ResourceGroupName, resourceUri); + Logger.LogWarning("Caching resource group {Group} for {ResourceId}", scrapeDefinition.Resource.ResourceGroupName, resourceUri); _resourceDefinitions.TryAdd(resourceUri, scrapeDefinition.Resource); } } diff --git a/src/Promitor.Tests.Unit/Core/Scraping/Batching/AzureResourceDefinitionBatchingTests.cs b/src/Promitor.Tests.Unit/Core/Scraping/Batching/AzureResourceDefinitionBatchingTests.cs index 3529cb3ae..3bc779c66 100644 --- a/src/Promitor.Tests.Unit/Core/Scraping/Batching/AzureResourceDefinitionBatchingTests.cs +++ b/src/Promitor.Tests.Unit/Core/Scraping/Batching/AzureResourceDefinitionBatchingTests.cs @@ -114,6 +114,26 @@ public void MixedBatchShouldSplitAccordingToConfiguredBatchSize() Assert.Equal(6, groupedScrapeDefinitions.Count); Assert.Equal(250, CountTotalScrapeDefinitions(groupedScrapeDefinitions)); } + + [Fact] + public void BatchConstructionShouldBeAgnosticToResourceGroup() + { + var azureMetricConfiguration = _mapper.Map(_azureMetricConfigurationBase); + var scraping = _mapper.Map(_scrapingBase); + var logAnalyticsConfiguration = new LogAnalyticsConfiguration(); + var scrapeDefinitions = BuildScrapeDefinitionBatch( + azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, scraping: scraping, + resourceType: ResourceType.StorageAccount, subscriptionId: _subscriptionId, resourceGroupName: _resourceGroupName, 10 + ); + var differentScrapeDefinitions = BuildScrapeDefinitionBatch( + azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, scraping: scraping, + resourceType: ResourceType.StorageAccount, subscriptionId: _subscriptionId, resourceGroupName: "group2", 10 + ); + var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions, .. differentScrapeDefinitions], maxBatchSize: _batchSize, CancellationToken.None); + // expect two batch of 10 each + Assert.Single(groupedScrapeDefinitions); + Assert.Equal(20, groupedScrapeDefinitions[0].ScrapeDefinitions.Count); + } private static List> BuildScrapeDefinitionBatch( AzureMetricConfiguration azureMetricConfiguration, From f02eaaa8fb713486d4d622480fed367e6e49658f Mon Sep 17 00:00:00 2001 From: xchen Date: Mon, 19 Aug 2024 16:16:22 -0700 Subject: [PATCH 054/131] fill out cached resource definitions --- .../Scheduling/ResourcesScrapingJob.cs | 4 ++-- src/Promitor.Core.Scraping/AzureMonitorScraper.cs | 11 +++++++++-- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs index da94246e3..f97ea6d67 100644 --- a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs +++ b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs @@ -264,9 +264,9 @@ private async Task ScrapeMetrics(IEnumerable definition in batchScrapeDefinition.ScrapeDefinitions) { - Logger.LogInformation("ResourceID: {ResoureceID}, ResourceGroup: {ResourceGroup}", definition.ResourceName, definition.ResourceGroupName); + Logger.LogInformation("ResourceID: {ResoureceID}, ResourceGroup: {ResourceGroup}", definition.Azu, definition.ResourceGroupName); } await ScheduleLimitedConcurrencyAsyncTask(tasks, () => ScrapeMetricBatched(batchScrapeDefinition), cancellationToken); } diff --git a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs index 6c58b3f45..87c20d5db 100644 --- a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs +++ b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs @@ -99,8 +99,15 @@ protected override async Task> BatchScrapeResourceAsync(strin // cache resource info if (!_resourceDefinitions.ContainsKey(resourceUri)) { - Logger.LogWarning("Caching resource group {Group} for {ResourceId}", scrapeDefinition.Resource.ResourceGroupName, resourceUri); - _resourceDefinitions.TryAdd(resourceUri, scrapeDefinition.Resource); + var resourceDefinitionToCache = new AzureResourceDefinition + ( + resourceType: scrapeDefinition.Resource.ResourceType, + resourceGroupName: scrapeDefinition.ResourceGroupName, + subscriptionId: scrapeDefinition.SubscriptionId, + resourceName: scrapeDefinition.Resource.ResourceName + ); // the resource definition attached is missing some attributes, filling them in here + Logger.LogWarning("Caching resource group {Group} for {ResourceId}", resourceDefinitionToCache.ResourceGroupName, resourceUri); + _resourceDefinitions.TryAdd(resourceUri, resourceDefinitionToCache); } } From 6a0d2bf7bf794b5cabbd273f22a7997cf11e9dc6 Mon Sep 17 00:00:00 2001 From: xchen Date: Mon, 19 Aug 2024 16:33:34 -0700 Subject: [PATCH 055/131] fix fix --- src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs index f97ea6d67..dc98748d2 100644 --- a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs +++ b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs @@ -266,7 +266,7 @@ private async Task ScrapeMetrics(IEnumerable definition in batchScrapeDefinition.ScrapeDefinitions) { - Logger.LogInformation("ResourceID: {ResoureceID}, ResourceGroup: {ResourceGroup}", definition.Azu, definition.ResourceGroupName); + Logger.LogInformation("ResourceID: {ResoureceID}, ResourceGroup: {ResourceGroup}", definition.Resource.ResourceName, definition.ResourceGroupName); } await ScheduleLimitedConcurrencyAsyncTask(tasks, () => ScrapeMetricBatched(batchScrapeDefinition), cancellationToken); } From 78bf03c6d825096b262ee8389edacbb7ae2987f6 Mon Sep 17 00:00:00 2001 From: xchen Date: Mon, 9 Sep 2024 15:11:58 -0700 Subject: [PATCH 056/131] correct aggregation interval processing --- .../Model/Metrics/ScrapeDefinitionBatch.cs | 6 ++--- src/Promitor.Core.Scraping/Scraper.cs | 4 +-- .../AzureResourceDefinitionBatchingTests.cs | 26 +++++++++++++++++++ 3 files changed, 30 insertions(+), 6 deletions(-) diff --git a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs index 5ee57ce22..74281800b 100644 --- a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs +++ b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs @@ -65,8 +65,6 @@ public ScrapeDefinitionBatchProperties( /// public ResourceType ResourceType { get; } - public TimeSpan AggregationInterval{ get; } - public TimeSpan? GetAggregationInterval() { return AzureMetricConfiguration?.Aggregation?.Interval; @@ -83,7 +81,7 @@ public override int GetHashCode() /// private string BuildBatchHashKey() { - return string.Join("_", new List {AzureMetricConfiguration.ToUniqueStringRepresentation(), SubscriptionId, ResourceType.ToString(), AggregationInterval.ToString()}); + return string.Join("_", new List {AzureMetricConfiguration.ToUniqueStringRepresentation(), SubscriptionId, ResourceType.ToString(), GetAggregationInterval().ToString()}); } /// @@ -95,7 +93,7 @@ public bool Equals(ScrapeDefinitionBatchProperties obj) return false; ScrapeDefinitionBatchProperties other = obj; - return ResourceType == other.ResourceType && AzureMetricConfiguration.ToUniqueStringRepresentation() == other.AzureMetricConfiguration.ToUniqueStringRepresentation() && SubscriptionId == other.SubscriptionId && AggregationInterval.Equals(other.AggregationInterval); + return ResourceType == other.ResourceType && AzureMetricConfiguration.ToUniqueStringRepresentation() == other.AzureMetricConfiguration.ToUniqueStringRepresentation() && SubscriptionId == other.SubscriptionId && GetAggregationInterval().Equals(other.GetAggregationInterval()); } } diff --git a/src/Promitor.Core.Scraping/Scraper.cs b/src/Promitor.Core.Scraping/Scraper.cs index d5542c43d..f054dd801 100644 --- a/src/Promitor.Core.Scraping/Scraper.cs +++ b/src/Promitor.Core.Scraping/Scraper.cs @@ -112,7 +112,7 @@ public async Task BatchScrapeAsync(BatchScrapeDefinition(_azureMetricConfigurationBase); + azureMetricConfiguration5mInterval.Aggregation.Interval = TimeSpan.FromMinutes(5); + var azureMetricConfiguration2mInterval = _mapper.Map(_azureMetricConfigurationBase); + azureMetricConfiguration5mInterval.Aggregation.Interval = TimeSpan.FromMinutes(2); + var scraping = _mapper.Map(_scrapingBase); + var logAnalyticsConfiguration = new LogAnalyticsConfiguration(); + var scrapeDefinitions5m = BuildScrapeDefinitionBatch( + azureMetricConfiguration: azureMetricConfiguration5mInterval, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, scraping: scraping, + resourceType: ResourceType.StorageAccount, subscriptionId: _subscriptionId, resourceGroupName: _resourceGroupName, 10 + ); + var differentScrapeDefinitions2m = BuildScrapeDefinitionBatch( + azureMetricConfiguration: azureMetricConfiguration2mInterval, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, scraping: scraping, + resourceType: ResourceType.BlobStorage, subscriptionId: _subscriptionId, resourceGroupName: _resourceGroupName, 10 + ); + var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions5m, .. differentScrapeDefinitions2m], maxBatchSize: _batchSize, CancellationToken.None); + // expect two batch of 10 each + Assert.Equal(2, groupedScrapeDefinitions.Count); + Assert.Equal(10, groupedScrapeDefinitions[0].ScrapeDefinitions.Count); + Assert.Equal(10, groupedScrapeDefinitions[1].ScrapeDefinitions.Count); + } + + [Fact] public void MixedBatchShouldSplitAccordingToConfiguredBatchSize() { From 7e583a913df6f882d8c67d7d5f5246ad97a7bf20 Mon Sep 17 00:00:00 2001 From: xchen Date: Mon, 9 Sep 2024 15:41:33 -0700 Subject: [PATCH 057/131] try range query again --- .../Extensions/AzureMonitorQueryTasks.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs b/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs index e90bc0b0e..684d63bab 100644 --- a/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs +++ b/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs @@ -68,7 +68,7 @@ public static async Task> GetRelevantMetricForResources(this Granularity = metricInterval, Filter = filter, Size = querySizeLimit, - //TimeRange= new QueryTimeRange(TimeSpan.FromHours(historyStartingFromInHours)) + TimeRange= new QueryTimeRange(new DateTimeOffset(recordDateTime.AddHours(-historyStartingFromInHours)), new DateTimeOffset(recordDateTime)) }; } else @@ -76,7 +76,7 @@ public static async Task> GetRelevantMetricForResources(this queryOptions = new MetricsQueryResourcesOptions { Aggregations = { metricAggregation.ToString().ToLower() }, Granularity = metricInterval, - //TimeRange= new QueryTimeRange(TimeSpan.FromHours(historyStartingFromInHours)) + TimeRange= new QueryTimeRange(new DateTimeOffset(recordDateTime.AddHours(-historyStartingFromInHours)), new DateTimeOffset(recordDateTime)) }; } logger.LogWarning("Batch query range: {Range}, size: {Size}, granularity: {Interval}, aggregation: {Aggregation}, filter: {Filter}", queryOptions.TimeRange, queryOptions.Size, queryOptions.Granularity, queryOptions.Aggregations, queryOptions.Filter); From 802a9d2d99a5383fb7944fb3f3b769e73fa4cd3b Mon Sep 17 00:00:00 2001 From: xchen Date: Mon, 9 Sep 2024 17:04:46 -0700 Subject: [PATCH 058/131] try modify date range on outgoing requests --- .../AzureMonitorQueryClient.cs | 2 +- .../Extensions/AzureMonitorQueryTasks.cs | 2 +- .../LogOutgoingRequestsPolicy.cs | 29 +++++++++++++++---- 3 files changed, 25 insertions(+), 8 deletions(-) diff --git a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs index 1fc1d8d83..b343696a4 100644 --- a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs +++ b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs @@ -288,7 +288,7 @@ private MetricsClient CreateAzureMonitorMetricsBatchClient(AzureCloud azureCloud } }; // retry policy as suggested in the documentation: https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/migrate-to-batch-api?tabs=individual-response#529-throttling-errors var tokenCredential = AzureAuthenticationFactory.GetTokenCredential(nameof(azureCloud), tenantId, azureAuthenticationInfo, azureCloud.GetAzureAuthorityHost()); - metricsClientOptions.AddPolicy(new LogOutgoingRequestsPolicy(_logger), HttpPipelinePosition.BeforeTransport); + metricsClientOptions.AddPolicy(new ModifyOutgoingAzureMonitorRequestsPolicy(_logger), HttpPipelinePosition.BeforeTransport); var azureMonitorLogging = azureMonitorLoggingConfiguration.Value; if (azureMonitorLogging.IsEnabled) { diff --git a/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs b/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs index 684d63bab..930253f26 100644 --- a/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs +++ b/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs @@ -90,7 +90,7 @@ public static async Task> GetRelevantMetricForResources(this .ToList(); } - private static string BuildFilter(List metricDimensions, string metricFilter) + private static string BuildFilter(List metricDimensions, string metricFilter) { var filterDictionary = new Dictionary(); metricDimensions.ForEach(metricDimension => filterDictionary.Add(metricDimension, "'*'")); diff --git a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/LogOutgoingRequestsPolicy.cs b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/LogOutgoingRequestsPolicy.cs index 978c9bc67..f09d5800e 100644 --- a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/LogOutgoingRequestsPolicy.cs +++ b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/LogOutgoingRequestsPolicy.cs @@ -1,19 +1,18 @@ using System; +using System.Collections.Generic; +using System.Globalization; using System.Text; using System.Threading.Tasks; using Azure.Core; using Azure.Core.Pipeline; using GuardNet; using Microsoft.Extensions.Logging; -using Promitor.Core.Metrics.Sinks; -using Promitor.Integrations.Azure.Authentication; -using Version = Promitor.Core.Version; namespace Promitor.Integrations.AzureMonitor.HttpPipelinePolicies{ - public class LogOutgoingRequestsPolicy : HttpPipelinePolicy + public class ModifyOutgoingAzureMonitorRequestsPolicy : HttpPipelinePolicy { private readonly ILogger _logger; - public LogOutgoingRequestsPolicy(ILogger logger) + public ModifyOutgoingAzureMonitorRequestsPolicy(ILogger logger) { Guard.NotNull(logger, nameof(logger)); _logger = logger; @@ -21,7 +20,8 @@ public LogOutgoingRequestsPolicy(ILogger logger) public override async ValueTask ProcessAsync(HttpMessage message, ReadOnlyMemory pipeline) { - _logger.LogWarning("URI: {uri}", message.Request.Uri.ToString()); + ModifyDateTimeParam(["starttime", "endtime"], message); + _logger.LogWarning("Modified URI: {uri}", message.Request.Uri.ToString()); await ProcessNextAsync(message, pipeline); } @@ -29,6 +29,23 @@ public override void Process(HttpMessage message, ReadOnlyMemory paramNames, HttpMessage message) + { + // Modify the request URL by updating or adding a query parameter + var uriBuilder = new UriBuilder(message.Request.Uri.ToString()); + var query = System.Web.HttpUtility.ParseQueryString(uriBuilder.Query); + foreach (var param in paramNames) + { + if (DateTime.TryParseExact(query[param], "MM/dd/yyyy HH:mm:ss", CultureInfo.InvariantCulture, DateTimeStyles.None, out DateTime dateTime)) + { + // Transform to ISO 8601 format (e.g., "2024-09-09T20:46:14") + query[param] = dateTime.ToString("o", CultureInfo.InvariantCulture); + // Update the message with the modified URI + } + } + message.Request.Uri.Query = query.ToString(); + } } } From b26c9aab02e7ca2b5de0527fd5b0132c39bc069a Mon Sep 17 00:00:00 2001 From: xchen Date: Mon, 9 Sep 2024 17:16:23 -0700 Subject: [PATCH 059/131] try modify date range on outgoing requests --- .../HttpPipelinePolicies/LogOutgoingRequestsPolicy.cs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/LogOutgoingRequestsPolicy.cs b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/LogOutgoingRequestsPolicy.cs index f09d5800e..7109bd696 100644 --- a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/LogOutgoingRequestsPolicy.cs +++ b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/LogOutgoingRequestsPolicy.cs @@ -30,7 +30,7 @@ public override void Process(HttpMessage message, ReadOnlyMemory paramNames, HttpMessage message) + private void ModifyDateTimeParam(List paramNames, HttpMessage message) { // Modify the request URL by updating or adding a query parameter var uriBuilder = new UriBuilder(message.Request.Uri.ToString()); @@ -41,6 +41,7 @@ private static void ModifyDateTimeParam(List paramNames, HttpMessage mes { // Transform to ISO 8601 format (e.g., "2024-09-09T20:46:14") query[param] = dateTime.ToString("o", CultureInfo.InvariantCulture); + _logger.LogWarning("Modified URI param {param} to be {value}", param, message.Request.Uri.ToString()); // Update the message with the modified URI } } From e85c2555cc7ec846cdabadb90d0f1028a3e2ddc5 Mon Sep 17 00:00:00 2001 From: xchen Date: Mon, 9 Sep 2024 17:54:15 -0700 Subject: [PATCH 060/131] try modify date range on outgoing requests --- .../HttpPipelinePolicies/LogOutgoingRequestsPolicy.cs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/LogOutgoingRequestsPolicy.cs b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/LogOutgoingRequestsPolicy.cs index 7109bd696..fd8de4c3a 100644 --- a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/LogOutgoingRequestsPolicy.cs +++ b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/LogOutgoingRequestsPolicy.cs @@ -37,11 +37,13 @@ private void ModifyDateTimeParam(List paramNames, HttpMessage message) var query = System.Web.HttpUtility.ParseQueryString(uriBuilder.Query); foreach (var param in paramNames) { - if (DateTime.TryParseExact(query[param], "MM/dd/yyyy HH:mm:ss", CultureInfo.InvariantCulture, DateTimeStyles.None, out DateTime dateTime)) + _logger.LogWarning("Original URI param {param} is {value}", param, query[param]); + + if (DateTimeOffset.TryParseExact(query[param], "MM/dd/yyyy HH:mm:ss zzz", CultureInfo.InvariantCulture, DateTimeStyles.None, out DateTimeOffset dateTime)) { // Transform to ISO 8601 format (e.g., "2024-09-09T20:46:14") query[param] = dateTime.ToString("o", CultureInfo.InvariantCulture); - _logger.LogWarning("Modified URI param {param} to be {value}", param, message.Request.Uri.ToString()); + _logger.LogWarning("Modified URI param {param} to be {value}", param, query[param]); // Update the message with the modified URI } } From 0dba9648b4eb280085dd9e046c88df8a1b3de961 Mon Sep 17 00:00:00 2001 From: xchen Date: Mon, 9 Sep 2024 18:12:53 -0700 Subject: [PATCH 061/131] try modify date range on outgoing requests --- .../HttpPipelinePolicies/LogOutgoingRequestsPolicy.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/LogOutgoingRequestsPolicy.cs b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/LogOutgoingRequestsPolicy.cs index fd8de4c3a..f11b0f115 100644 --- a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/LogOutgoingRequestsPolicy.cs +++ b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/LogOutgoingRequestsPolicy.cs @@ -42,7 +42,7 @@ private void ModifyDateTimeParam(List paramNames, HttpMessage message) if (DateTimeOffset.TryParseExact(query[param], "MM/dd/yyyy HH:mm:ss zzz", CultureInfo.InvariantCulture, DateTimeStyles.None, out DateTimeOffset dateTime)) { // Transform to ISO 8601 format (e.g., "2024-09-09T20:46:14") - query[param] = dateTime.ToString("o", CultureInfo.InvariantCulture); + query[param] = dateTime.ToString("yyyy-MM-ddTHH:mm:ss.fffffffZ"); _logger.LogWarning("Modified URI param {param} to be {value}", param, query[param]); // Update the message with the modified URI } From 604ad7666a88334612cb6284bb8d24c1a9d2eecf Mon Sep 17 00:00:00 2001 From: xchen Date: Mon, 9 Sep 2024 18:24:17 -0700 Subject: [PATCH 062/131] run single resource scraping for comparison --- config/promitor/scraper/ci-runtime.yaml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/config/promitor/scraper/ci-runtime.yaml b/config/promitor/scraper/ci-runtime.yaml index c51ecf74b..b8eefbcd5 100644 --- a/config/promitor/scraper/ci-runtime.yaml +++ b/config/promitor/scraper/ci-runtime.yaml @@ -15,11 +15,6 @@ telemetry: verbosity: trace defaultVerbosity: trace azureMonitor: - integration: - metricsBatching: - enabled: true - maxBatchSize: 2 - azureRegion: westeurope logging: isEnabled: false resourceDiscovery: From 5b0b4cdcb3cfd51f2d375a273bde4ecc57053835 Mon Sep 17 00:00:00 2001 From: xchen Date: Mon, 9 Sep 2024 22:53:03 -0700 Subject: [PATCH 063/131] run single resource scraping for comparison --- .../Configuration/AzureMonitorMetricBatchScrapeConfig.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs b/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs index 4c4deb40a..b451c1b7b 100644 --- a/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs +++ b/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs @@ -6,6 +6,6 @@ public class AzureMonitorMetricBatchScrapeConfig { public bool Enabled { get; set; } public int MaxBatchSize { get; set; } - public string AzureRegion { get; set; } // Batch scrape endpoints are deployed by region + public string AzureRegion { get; set; } = "eastus"; // Batch scrape endpoints are deployed by region } } \ No newline at end of file From cb1b6963ace97c87c6756170aa33d06f3004aff7 Mon Sep 17 00:00:00 2001 From: xchen Date: Tue, 10 Sep 2024 14:23:41 -0700 Subject: [PATCH 064/131] go back to batch scraping CI --- config/promitor/scraper/ci-runtime.yaml | 5 +++++ .../Collectors/AzureScrapingSystemMetricsPublisher.cs | 11 +++++++++-- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/config/promitor/scraper/ci-runtime.yaml b/config/promitor/scraper/ci-runtime.yaml index b8eefbcd5..c51ecf74b 100644 --- a/config/promitor/scraper/ci-runtime.yaml +++ b/config/promitor/scraper/ci-runtime.yaml @@ -15,6 +15,11 @@ telemetry: verbosity: trace defaultVerbosity: trace azureMonitor: + integration: + metricsBatching: + enabled: true + maxBatchSize: 2 + azureRegion: westeurope logging: isEnabled: false resourceDiscovery: diff --git a/src/Promitor.Integrations.Sinks.Prometheus/Collectors/AzureScrapingSystemMetricsPublisher.cs b/src/Promitor.Integrations.Sinks.Prometheus/Collectors/AzureScrapingSystemMetricsPublisher.cs index 0fbe430ce..c68fb33c5 100644 --- a/src/Promitor.Integrations.Sinks.Prometheus/Collectors/AzureScrapingSystemMetricsPublisher.cs +++ b/src/Promitor.Integrations.Sinks.Prometheus/Collectors/AzureScrapingSystemMetricsPublisher.cs @@ -51,11 +51,18 @@ public async Task WriteGaugeMeasurementAsync(string name, string description, do public async Task WriteHistogramMeasurementAsync(string name, string description, double value, Dictionary labels) { - throw new System.NotImplementedException(); + var enableMetricTimestamps = _prometheusConfiguration.CurrentValue.EnableMetricTimestamps; + + var metricsDeclaration = _metricsDeclarationProvider.Get(applyDefaults: true); + labels.TryAdd("tenant_id", metricsDeclaration.AzureMetadata.TenantId); + + var orderedLabels = labels.OrderByDescending(kvp => kvp.Key).ToDictionary(kvp => kvp.Key, kvp => kvp.Value); + + await _systemMetricsPublisher.WriteGaugeMeasurementAsync(name, description, value, orderedLabels, enableMetricTimestamps); } public async Task WriteHistogramMeasurementAsync(string name, string description, double value, Dictionary labels, bool includeTimestamp) { - throw new System.NotImplementedException(); + await _systemMetricsPublisher.WriteGaugeMeasurementAsync(name, description, value, labels, includeTimestamp); } } } From 834819d6267f3d793b0f9e58603fa7bc37642657 Mon Sep 17 00:00:00 2001 From: xchen Date: Tue, 10 Sep 2024 15:34:13 -0700 Subject: [PATCH 065/131] create GitHub Action to build image under personal account(will revert) --- .github/workflows/templates-build-push-image.yml | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/.github/workflows/templates-build-push-image.yml b/.github/workflows/templates-build-push-image.yml index 60a8380f8..16f5d8440 100644 --- a/.github/workflows/templates-build-push-image.yml +++ b/.github/workflows/templates-build-push-image.yml @@ -1,5 +1,6 @@ +name: Build and Push (Linux) on: - workflow_call: + workflow_dispatch: inputs: image_name: required: true @@ -15,6 +16,8 @@ jobs: linux: name: Build & Push (Linux) runs-on: ubuntu-latest + permissions: + packages: write steps: - name: Checkout Code uses: actions/checkout@v4 @@ -40,7 +43,7 @@ jobs: uses: docker/login-action@v3 with: registry: ghcr.io - username: tomkerkhove + username: hkfgo password: ${{ secrets.GITHUB_TOKEN }} - name: Build and push preview image @@ -49,5 +52,5 @@ jobs: build-args: VERSION="${{ env.artifact_full_version }}" context: ./src/ file: ./src/${{ inputs.project_name }}/Dockerfile.linux - tags: ${{ env.image_commit_uri }},${{ env.image_latest_uri }} - push: true + tags: ghcr.io/hkfgo/${{ env.image_commit_uri }},ghcr.io/hkfgo/${{ env.image_latest_uri }} + push: true \ No newline at end of file From a3410b83af1d659e3163fc4e67f8f0044545b9a0 Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 11 Sep 2024 21:13:35 -0700 Subject: [PATCH 066/131] implement LogAnalytics batch scraping by composing single-resource scrape tasks --- src/Promitor.Core.Scraping/LogAnalyticsScraper.cs | 11 +++++++++-- ...cs => ModifyOutgoingAzureMonitorRequestsPolicy.cs} | 0 2 files changed, 9 insertions(+), 2 deletions(-) rename src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/{LogOutgoingRequestsPolicy.cs => ModifyOutgoingAzureMonitorRequestsPolicy.cs} (100%) diff --git a/src/Promitor.Core.Scraping/LogAnalyticsScraper.cs b/src/Promitor.Core.Scraping/LogAnalyticsScraper.cs index fe8d8d1a9..b0f117b5a 100644 --- a/src/Promitor.Core.Scraping/LogAnalyticsScraper.cs +++ b/src/Promitor.Core.Scraping/LogAnalyticsScraper.cs @@ -1,7 +1,10 @@ using System; using System.Collections.Generic; +using System.Linq; +using System.Net; using System.Threading.Tasks; using GuardNet; +using Microsoft.VisualBasic; using Promitor.Core.Contracts; using Promitor.Core.Contracts.ResourceTypes; using Promitor.Core.Metrics; @@ -50,9 +53,13 @@ private Dictionary DetermineMetricLabels(LogAnalyticsResourceDef return new Dictionary { { "workspace_id", resourceDefinition.WorkspaceId }, {"workspace_name", resourceDefinition.WorkspaceName} }; } - protected override Task> BatchScrapeResourceAsync(string subscriptionId, BatchScrapeDefinition batchScrapeDefinition, PromitorMetricAggregationType aggregationType, TimeSpan aggregationInterval) + protected override async Task> BatchScrapeResourceAsync(string subscriptionId, BatchScrapeDefinition batchScrapeDefinition, PromitorMetricAggregationType aggregationType, TimeSpan aggregationInterval) { - throw new NotImplementedException("Batch scraping not yet implemented for log analytics"); + // TODO: these just dispatch and await on tasks that do single-resource scraping. Implement integration with Log Analytics batch endpoint + var logScrapingTasks = batchScrapeDefinition.ScrapeDefinitions.Select(definition => ScrapeResourceAsync(subscriptionId, definition, (LogAnalyticsResourceDefinition) definition.Resource, aggregationType, aggregationInterval)).ToList(); + + var resultsList = await Task.WhenAll(logScrapingTasks); + return resultsList.ToList(); } } } \ No newline at end of file diff --git a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/LogOutgoingRequestsPolicy.cs b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs similarity index 100% rename from src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/LogOutgoingRequestsPolicy.cs rename to src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs From 70e4a3f90e1625e1fa32a7a047a8de9a68562820 Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 11 Sep 2024 22:27:22 -0700 Subject: [PATCH 067/131] fix bug writing histogram as gauge --- .../Collectors/AzureScrapingSystemMetricsPublisher.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Promitor.Integrations.Sinks.Prometheus/Collectors/AzureScrapingSystemMetricsPublisher.cs b/src/Promitor.Integrations.Sinks.Prometheus/Collectors/AzureScrapingSystemMetricsPublisher.cs index c68fb33c5..5970a2beb 100644 --- a/src/Promitor.Integrations.Sinks.Prometheus/Collectors/AzureScrapingSystemMetricsPublisher.cs +++ b/src/Promitor.Integrations.Sinks.Prometheus/Collectors/AzureScrapingSystemMetricsPublisher.cs @@ -58,11 +58,11 @@ public async Task WriteHistogramMeasurementAsync(string name, string description var orderedLabels = labels.OrderByDescending(kvp => kvp.Key).ToDictionary(kvp => kvp.Key, kvp => kvp.Value); - await _systemMetricsPublisher.WriteGaugeMeasurementAsync(name, description, value, orderedLabels, enableMetricTimestamps); + await _systemMetricsPublisher.WriteHistogramMeasurementAsync(name, description, value, orderedLabels, enableMetricTimestamps); } public async Task WriteHistogramMeasurementAsync(string name, string description, double value, Dictionary labels, bool includeTimestamp) { - await _systemMetricsPublisher.WriteGaugeMeasurementAsync(name, description, value, labels, includeTimestamp); + await _systemMetricsPublisher.WriteHistogramMeasurementAsync(name, description, value, labels, includeTimestamp); } } } From de785df470c1c91e5cd18808aef6b1fca69003ad Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 11 Sep 2024 22:33:49 -0700 Subject: [PATCH 068/131] don't throw in OpenTelemetry sink --- .../Collectors/OpenTelemetrySystemMetricsSink.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Promitor.Integrations.Sinks.OpenTelemetry/Collectors/OpenTelemetrySystemMetricsSink.cs b/src/Promitor.Integrations.Sinks.OpenTelemetry/Collectors/OpenTelemetrySystemMetricsSink.cs index edc484276..03a2de94e 100644 --- a/src/Promitor.Integrations.Sinks.OpenTelemetry/Collectors/OpenTelemetrySystemMetricsSink.cs +++ b/src/Promitor.Integrations.Sinks.OpenTelemetry/Collectors/OpenTelemetrySystemMetricsSink.cs @@ -23,7 +23,7 @@ public async Task WriteGaugeMeasurementAsync(string name, string description, do public Task WriteHistogramMeasurementAsync(string name, string description, double value, Dictionary labels, bool includeTimestamp) { - throw new System.NotImplementedException("Histogram measurement not yet supported in OpenTelemetry sink"); + return null; } } } From 0b463e7bf1b0216be0f7cad00744113d1a75d3e3 Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 11 Sep 2024 22:47:10 -0700 Subject: [PATCH 069/131] don't throw in OpenTelemetry sink --- .../Collectors/OpenTelemetrySystemMetricsSink.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Promitor.Integrations.Sinks.OpenTelemetry/Collectors/OpenTelemetrySystemMetricsSink.cs b/src/Promitor.Integrations.Sinks.OpenTelemetry/Collectors/OpenTelemetrySystemMetricsSink.cs index 03a2de94e..fc57a5116 100644 --- a/src/Promitor.Integrations.Sinks.OpenTelemetry/Collectors/OpenTelemetrySystemMetricsSink.cs +++ b/src/Promitor.Integrations.Sinks.OpenTelemetry/Collectors/OpenTelemetrySystemMetricsSink.cs @@ -23,7 +23,7 @@ public async Task WriteGaugeMeasurementAsync(string name, string description, do public Task WriteHistogramMeasurementAsync(string name, string description, double value, Dictionary labels, bool includeTimestamp) { - return null; + return Task.CompletedTask; } } } From 082a6e6d046238a57d1b49222d0d2238faf75b4f Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 11 Sep 2024 22:59:01 -0700 Subject: [PATCH 070/131] set better buckets for batch size --- .../Collectors/PrometheusSystemMetricsSink.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Promitor.Integrations.Sinks.Prometheus/Collectors/PrometheusSystemMetricsSink.cs b/src/Promitor.Integrations.Sinks.Prometheus/Collectors/PrometheusSystemMetricsSink.cs index ce47c5216..636a2e0f5 100644 --- a/src/Promitor.Integrations.Sinks.Prometheus/Collectors/PrometheusSystemMetricsSink.cs +++ b/src/Promitor.Integrations.Sinks.Prometheus/Collectors/PrometheusSystemMetricsSink.cs @@ -50,7 +50,7 @@ public Task WriteHistogramMeasurementAsync(string name, string description, doub var orderedLabels = labels.OrderByDescending(kvp => kvp.Key).ToDictionary(kvp => kvp.Key, kvp => kvp.Value); // TODO: are histogram instruments created on every invocation? Would that interfere with correctness? - var histogram = _metricFactory.CreateHistogram(name, help: description, includeTimestamp: includeTimestamp, labelNames: orderedLabels.Keys.ToArray()); + var histogram = _metricFactory.CreateHistogram(name, help: description, includeTimestamp: includeTimestamp, labelNames: orderedLabels.Keys.ToArray(), buckets: [1, 2, 4, 8, 16, 32, 64]); histogram.WithLabels(orderedLabels.Values.ToArray()).Observe(value); return Task.CompletedTask; } From beab4e631767ed1315ad570fdf48c6c037af09d5 Mon Sep 17 00:00:00 2001 From: xchen Date: Thu, 12 Sep 2024 12:18:43 -0700 Subject: [PATCH 071/131] correct logic to determine LogAnalytics aggregation interval --- .../Model/Metrics/ScrapeDefinition.cs | 1 + .../Model/Metrics/ScrapeDefinitionBatch.cs | 11 +++++ .../ScrapeDefinitionBatchPropertiesTest.cs | 48 +++++++++++++------ .../AzureResourceDefinitionBatchingTests.cs | 17 +++++-- 4 files changed, 58 insertions(+), 19 deletions(-) diff --git a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinition.cs b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinition.cs index 2e4ddb1c1..375ef43ac 100644 --- a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinition.cs +++ b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinition.cs @@ -98,6 +98,7 @@ public ScrapeDefinition( public ScrapeDefinitionBatchProperties buildPropertiesForBatch() { return new ScrapeDefinitionBatchProperties( this.AzureMetricConfiguration, + this.LogAnalyticsConfiguration, this.PrometheusMetricDefinition, this.Resource.ResourceType, this.Scraping, diff --git a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs index 74281800b..9d3209459 100644 --- a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs +++ b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs @@ -21,6 +21,7 @@ public class ScrapeDefinitionBatchProperties : IEquatable public ScrapeDefinitionBatchProperties( AzureMetricConfiguration azureMetricConfiguration, + LogAnalyticsConfiguration logAnalyticsConfiguration, PrometheusMetricDefinition prometheusMetricDefinition, ResourceType resourceType, Scraping scraping, @@ -32,6 +33,7 @@ public ScrapeDefinitionBatchProperties( Guard.NotNull(subscriptionId, nameof(subscriptionId)); AzureMetricConfiguration = azureMetricConfiguration; + LogAnalyticsConfiguration = logAnalyticsConfiguration; PrometheusMetricDefinition = prometheusMetricDefinition; Scraping = scraping; SubscriptionId = subscriptionId; @@ -43,6 +45,11 @@ public ScrapeDefinitionBatchProperties( /// public AzureMetricConfiguration AzureMetricConfiguration { get; } + /// + /// Configuration about the Azure Monitor log analytics resource to scrape + /// + public LogAnalyticsConfiguration LogAnalyticsConfiguration { get; } + /// /// The details of the prometheus metric that will be created. /// @@ -67,6 +74,10 @@ public ScrapeDefinitionBatchProperties( public TimeSpan? GetAggregationInterval() { + if (ResourceType == ResourceType.LogAnalytics) + { + return LogAnalyticsConfiguration?.Aggregation?.Interval; + } return AzureMetricConfiguration?.Aggregation?.Interval; } diff --git a/src/Promitor.Tests.Unit/Core/Metrics/ScrapeDefinitionBatchPropertiesTest.cs b/src/Promitor.Tests.Unit/Core/Metrics/ScrapeDefinitionBatchPropertiesTest.cs index bac524edb..38312041e 100644 --- a/src/Promitor.Tests.Unit/Core/Metrics/ScrapeDefinitionBatchPropertiesTest.cs +++ b/src/Promitor.Tests.Unit/Core/Metrics/ScrapeDefinitionBatchPropertiesTest.cs @@ -31,6 +31,14 @@ public class ScrapeDefinitionBatchPropertiesTest Type = PromitorMetricAggregationType.Average }, }; + private readonly static LogAnalyticsConfigurationV1 _logAnalyticsConfigurationBase = new LogAnalyticsConfigurationV1 + { + Query = "A eq B", + Aggregation = new AggregationV1 + { + Interval = TimeSpan.FromMinutes(60) + }, + }; private readonly static ScrapingV1 _scrapingBase = new ScrapingV1 { Schedule = "5 4 3 2 1" @@ -46,9 +54,11 @@ public ScrapeDefinitionBatchPropertiesTest() public void BuildBatchHashKeySameResultNoDimensions() { var azureMetricConfiguration = _mapper.Map(_azureMetricConfigurationBase); + var logAnalyticsConfiguration = _mapper.Map(_logAnalyticsConfigurationBase); + var scraping = _mapper.Map(_scrapingBase); - var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); - var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); + var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); + var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); var hashCode1 = batchProperties.GetHashCode(); var hashCode2 = batchProperties2.GetHashCode(); @@ -60,11 +70,12 @@ public void BuildBatchHashKeySameResultIdenticalDimensions() { var azureMetricConfiguration = _mapper.Map(_azureMetricConfigurationBase); azureMetricConfiguration.Dimensions = [new MetricDimension{Name = "Dimension1"}, new MetricDimension{Name = "Dimension2"}]; + var logAnalyticsConfiguration = _mapper.Map(_logAnalyticsConfigurationBase); var scraping = _mapper.Map(_scrapingBase); - var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); - var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); + var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); + var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); var hashCode1 = batchProperties.GetHashCode(); var hashCode2 = batchProperties2.GetHashCode(); @@ -78,11 +89,12 @@ public void BuildBatchHashKeyDifferentResultDifferentDimensions() azureMetricConfiguration1.Dimensions = [new MetricDimension{Name = "Dimension1"}, new MetricDimension{Name = "Dimension2"}]; var azureMetricConfiguration2 = _mapper.Map(_azureMetricConfigurationBase); azureMetricConfiguration2.Dimensions = [new MetricDimension{Name = "DiffDimension1"}, new MetricDimension{Name = "DiffDimension2"}]; + var logAnalyticsConfiguration = _mapper.Map(_logAnalyticsConfigurationBase); var scraping = _mapper.Map(_scrapingBase); - var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration1, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); - var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration2, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); + var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration1, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); + var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration2, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); var hashCode1 = batchProperties.GetHashCode(); var hashCode2 = batchProperties2.GetHashCode(); @@ -92,16 +104,17 @@ public void BuildBatchHashKeyDifferentResultDifferentDimensions() [Fact] public void BuildBatchHashKeyDifferentResultDifferentMetricName() { - var azureMetricConfiguration1 = _mapper.Map(_azureMetricConfigurationBase); + var azureMetricConfiguration1 = _mapper.Map(_azureMetricConfigurationBase); azureMetricConfiguration1.Dimensions = [new MetricDimension{Name = "Dimension1"}, new MetricDimension{Name = "Dimension2"}]; var azureMetricConfiguration2 = _mapper.Map(_azureMetricConfigurationBase); azureMetricConfiguration2.Dimensions = [new MetricDimension{Name = "Dimension1"}, new MetricDimension{Name = "Dimension2"}]; azureMetricConfiguration2.MetricName = "diffName"; + var logAnalyticsConfiguration = _mapper.Map(_logAnalyticsConfigurationBase); var scraping = _mapper.Map(_scrapingBase); - var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration1, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); - var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration2, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); + var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration1, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); + var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration2, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); var hashCode1 = batchProperties.GetHashCode(); var hashCode2 = batchProperties2.GetHashCode(); @@ -113,9 +126,11 @@ public void BuildBatchHashKeyDifferentResultDifferentSubscription() { var azureMetricConfiguration = _mapper.Map(_azureMetricConfigurationBase); var scraping = _mapper.Map(_scrapingBase); + var logAnalyticsConfiguration = _mapper.Map(_logAnalyticsConfigurationBase); - var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); - var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: "subscription2"); + + var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); + var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: "subscription2"); var hashCode1 = batchProperties.GetHashCode(); var hashCode2 = batchProperties2.GetHashCode(); @@ -127,9 +142,10 @@ public void BuildBatchHashKeyDifferentResultDifferentResourceType() { var azureMetricConfiguration = _mapper.Map(_azureMetricConfigurationBase); var scraping = _mapper.Map(_scrapingBase); + var logAnalyticsConfiguration = _mapper.Map(_logAnalyticsConfigurationBase); - var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); - var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.LoadBalancer, scraping: scraping, subscriptionId: "subscription2"); + var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); + var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.LoadBalancer, scraping: scraping, subscriptionId: "subscription2"); var hashCode1 = batchProperties.GetHashCode(); var hashCode2 = batchProperties2.GetHashCode(); @@ -143,9 +159,11 @@ public void BuildBatchHashKeyDifferentResultDifferentSchedule() var scraping1 = _mapper.Map(_scrapingBase); var scraping2 = _mapper.Map(_scrapingBase); scraping2.Schedule = "6 4 3 2 1"; + var logAnalyticsConfiguration = _mapper.Map(_logAnalyticsConfigurationBase); + - var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping1, subscriptionId: _subscriptionId); - var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping2, subscriptionId: "subscription2"); + var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping1, subscriptionId: _subscriptionId); + var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping2, subscriptionId: "subscription2"); var hashCode1 = batchProperties.GetHashCode(); var hashCode2 = batchProperties2.GetHashCode(); diff --git a/src/Promitor.Tests.Unit/Core/Scraping/Batching/AzureResourceDefinitionBatchingTests.cs b/src/Promitor.Tests.Unit/Core/Scraping/Batching/AzureResourceDefinitionBatchingTests.cs index 0fa11d051..f552f26f6 100644 --- a/src/Promitor.Tests.Unit/Core/Scraping/Batching/AzureResourceDefinitionBatchingTests.cs +++ b/src/Promitor.Tests.Unit/Core/Scraping/Batching/AzureResourceDefinitionBatchingTests.cs @@ -30,6 +30,14 @@ public class AzureResourceDefinitionBatchingTests Type = PromitorMetricAggregationType.Average }, }; + private readonly static LogAnalyticsConfigurationV1 _logAnalyticsConfigurationBase = new LogAnalyticsConfigurationV1 + { + Query = "A eq B", + Aggregation = new AggregationV1 + { + Interval = TimeSpan.FromMinutes(60) + }, + }; private readonly static ScrapingV1 _scrapingBase = new ScrapingV1 { Schedule = "5 4 3 2 1" @@ -48,7 +56,7 @@ public void IdenticalBatchPropertiesShouldBatchTogether() { var azureMetricConfiguration = _mapper.Map(_azureMetricConfigurationBase); var scraping = _mapper.Map(_scrapingBase); - var logAnalyticsConfiguration = new LogAnalyticsConfiguration(); + var logAnalyticsConfiguration = _mapper.Map(_logAnalyticsConfigurationBase); var scrapeDefinitions = BuildScrapeDefinitionBatch( azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, scraping: scraping, resourceType: ResourceType.StorageAccount, subscriptionId: _subscriptionId, resourceGroupName: _resourceGroupName, 10 @@ -63,8 +71,9 @@ public void IdenticalBatchPropertiesShouldBatchTogether() public void BatchShouldSplitAccordingToConfiguredBatchSize() { var azureMetricConfiguration = _mapper.Map(_azureMetricConfigurationBase); + var logAnalyticsConfiguration = _mapper.Map(_logAnalyticsConfigurationBase); + var scraping = _mapper.Map(_scrapingBase); - var logAnalyticsConfiguration = new LogAnalyticsConfiguration(); var scrapeDefinitions = BuildScrapeDefinitionBatch( azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, scraping: scraping, resourceType: ResourceType.StorageAccount, subscriptionId: _subscriptionId, resourceGroupName: _resourceGroupName, 130 @@ -80,7 +89,7 @@ public void DifferentBatchPropertiesShouldBatchSeparately() { var azureMetricConfiguration = _mapper.Map(_azureMetricConfigurationBase); var scraping = _mapper.Map(_scrapingBase); - var logAnalyticsConfiguration = new LogAnalyticsConfiguration(); + var logAnalyticsConfiguration = _mapper.Map(_logAnalyticsConfigurationBase); var scrapeDefinitions = BuildScrapeDefinitionBatch( azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, scraping: scraping, resourceType: ResourceType.StorageAccount, subscriptionId: _subscriptionId, resourceGroupName: _resourceGroupName, 10 @@ -103,8 +112,8 @@ public void DifferentAggregationIntervalsShouldBatchSeparately() azureMetricConfiguration5mInterval.Aggregation.Interval = TimeSpan.FromMinutes(5); var azureMetricConfiguration2mInterval = _mapper.Map(_azureMetricConfigurationBase); azureMetricConfiguration5mInterval.Aggregation.Interval = TimeSpan.FromMinutes(2); + var logAnalyticsConfiguration = _mapper.Map(_logAnalyticsConfigurationBase); var scraping = _mapper.Map(_scrapingBase); - var logAnalyticsConfiguration = new LogAnalyticsConfiguration(); var scrapeDefinitions5m = BuildScrapeDefinitionBatch( azureMetricConfiguration: azureMetricConfiguration5mInterval, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, scraping: scraping, resourceType: ResourceType.StorageAccount, subscriptionId: _subscriptionId, resourceGroupName: _resourceGroupName, 10 From 6c721abf8821485dc0ec72db3f8e7eb576055fb2 Mon Sep 17 00:00:00 2001 From: xchen Date: Thu, 12 Sep 2024 14:26:47 -0700 Subject: [PATCH 072/131] add more debug logging --- .../Scheduling/ResourcesScrapingJob.cs | 2 +- .../Model/Metrics/ScrapeDefinitionBatch.cs | 2 +- .../ScrapeDefinitionBatchPropertiesTest.cs | 36 +++++++++++++++++++ 3 files changed, 38 insertions(+), 2 deletions(-) diff --git a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs index dc98748d2..7fd4a8459 100644 --- a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs +++ b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs @@ -266,7 +266,7 @@ private async Task ScrapeMetrics(IEnumerable definition in batchScrapeDefinition.ScrapeDefinitions) { - Logger.LogInformation("ResourceID: {ResoureceID}, ResourceGroup: {ResourceGroup}", definition.Resource.ResourceName, definition.ResourceGroupName); + Logger.LogInformation("ResourceID: {ResoureceID}, ResourceGroup: {ResourceGroup}, Prometheus metric name: {MetricName}, Batch Key: {BatchKey}", definition.Resource.ResourceName, definition.ResourceGroupName, definition.PrometheusMetricDefinition.Name, definition.buildPropertiesForBatch().BuildBatchHashKey()); } await ScheduleLimitedConcurrencyAsyncTask(tasks, () => ScrapeMetricBatched(batchScrapeDefinition), cancellationToken); } diff --git a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs index 9d3209459..7ba5df390 100644 --- a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs +++ b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs @@ -90,7 +90,7 @@ public override int GetHashCode() /// Builds a namespaced string key to satisfy batch restrictions, in the format of /// ___ /// - private string BuildBatchHashKey() + public string BuildBatchHashKey() { return string.Join("_", new List {AzureMetricConfiguration.ToUniqueStringRepresentation(), SubscriptionId, ResourceType.ToString(), GetAggregationInterval().ToString()}); } diff --git a/src/Promitor.Tests.Unit/Core/Metrics/ScrapeDefinitionBatchPropertiesTest.cs b/src/Promitor.Tests.Unit/Core/Metrics/ScrapeDefinitionBatchPropertiesTest.cs index 38312041e..defafb36e 100644 --- a/src/Promitor.Tests.Unit/Core/Metrics/ScrapeDefinitionBatchPropertiesTest.cs +++ b/src/Promitor.Tests.Unit/Core/Metrics/ScrapeDefinitionBatchPropertiesTest.cs @@ -169,5 +169,41 @@ public void BuildBatchHashKeyDifferentResultDifferentSchedule() var hashCode2 = batchProperties2.GetHashCode(); Assert.NotEqual(hashCode1, hashCode2); } + + [Fact] + public void BuildBatchHashKeyTest() + { + AzureMetricConfigurationV1 _azureMetricConfigurationTest1 = new AzureMetricConfigurationV1 + { + MetricName = "availabilityResults/availabilityPercentage", + Aggregation = new MetricAggregationV1 + { + Type = PromitorMetricAggregationType.Average + }, + }; + AzureMetricConfigurationV1 _azureMetricConfigurationTest2 = new AzureMetricConfigurationV1 + { + MetricName = "availabilityResults/availabilityPercentage", + Dimensions = [new MetricDimensionV1{Name = "availabilityResult/name"}], + Aggregation = new MetricAggregationV1 + { + Type = PromitorMetricAggregationType.Average + }, + }; + var azureMetricConfiguration1 = _mapper.Map(_azureMetricConfigurationTest1); + var azureMetricConfiguration2 = _mapper.Map(_azureMetricConfigurationTest2); + + var scraping1 = _mapper.Map(_scrapingBase); + var scraping2 = _mapper.Map(_scrapingBase); + var logAnalyticsConfiguration = _mapper.Map(_logAnalyticsConfigurationBase); + + + var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration1, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.ApplicationInsights, scraping: scraping1, subscriptionId: _subscriptionId); + var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration2, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.ApplicationInsights, scraping: scraping2, subscriptionId: _subscriptionId); + + var hashCode1 = batchProperties.GetHashCode(); + var hashCode2 = batchProperties2.GetHashCode(); + Assert.NotEqual(hashCode1, hashCode2); + } } } \ No newline at end of file From 5d8b5efc9ee11b3dbdfcd745421f3c33fb1ec523 Mon Sep 17 00:00:00 2001 From: xchen Date: Thu, 12 Sep 2024 14:53:11 -0700 Subject: [PATCH 073/131] handle single dimension in batching logic --- src/Promitor.Core.Scraping/AzureMonitorScraper.cs | 2 +- .../Configuration/Model/AzureMetricConfiguration.cs | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs index 87c20d5db..def55a86b 100644 --- a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs +++ b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs @@ -134,7 +134,7 @@ protected override async Task> BatchScrapeResourceAsync(strin var scrapeResults = new List(); // group based on resource, then to enrichment per group - var groupedMeasuredMetrics = resourceIdTaggedMeasuredMetrics.GroupBy(measureMetric => measureMetric.ResourceId); + var groupedMeasuredMetrics = resourceIdTaggedMeasuredMetrics.GroupBy(measuredMetric => measuredMetric.ResourceId); foreach (IGrouping resourceMetricsGroup in groupedMeasuredMetrics) { var resourceId = resourceMetricsGroup.Key; diff --git a/src/Promitor.Core.Scraping/Configuration/Model/AzureMetricConfiguration.cs b/src/Promitor.Core.Scraping/Configuration/Model/AzureMetricConfiguration.cs index ad06bd44b..dce9cb45e 100644 --- a/src/Promitor.Core.Scraping/Configuration/Model/AzureMetricConfiguration.cs +++ b/src/Promitor.Core.Scraping/Configuration/Model/AzureMetricConfiguration.cs @@ -52,7 +52,13 @@ public string ToUniqueStringRepresentation() { StringBuilder sb = new StringBuilder(); sb.Append(MetricName); - if (Dimensions != null) { + if (Dimension != null) + { + sb.Append("_"); + sb.Append(Dimension.Name); + } + else if (Dimensions != null) + { foreach (var dimension in Dimensions) { sb.Append("_"); From fc0362d05cfcd100a2a757f931def5ded1f7f2b8 Mon Sep 17 00:00:00 2001 From: xchen Date: Thu, 12 Sep 2024 15:10:42 -0700 Subject: [PATCH 074/131] account for limit --- .../Configuration/Model/AzureMetricConfiguration.cs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Promitor.Core.Scraping/Configuration/Model/AzureMetricConfiguration.cs b/src/Promitor.Core.Scraping/Configuration/Model/AzureMetricConfiguration.cs index dce9cb45e..e7e20439d 100644 --- a/src/Promitor.Core.Scraping/Configuration/Model/AzureMetricConfiguration.cs +++ b/src/Promitor.Core.Scraping/Configuration/Model/AzureMetricConfiguration.cs @@ -65,6 +65,8 @@ public string ToUniqueStringRepresentation() sb.Append(dimension.Name); } } + sb.Append($"_limit{Limit}"); + return sb.ToString(); } From 4dff71b9193fd6bba18c9860977ccca246ad2dc3 Mon Sep 17 00:00:00 2001 From: xchen Date: Thu, 12 Sep 2024 15:44:43 -0700 Subject: [PATCH 075/131] add debug logging for resource ID metrics --- src/Promitor.Core.Scraping/AzureMonitorScraper.cs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs index def55a86b..f1f5bf567 100644 --- a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs +++ b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs @@ -121,6 +121,10 @@ protected override async Task> BatchScrapeResourceAsync(strin // Query Azure Monitor for metrics Logger.LogWarning("Querying Azure Monitor for metric {MetricName} with batch size {BatchSize}", metricName, resourceUriList.Count); resourceIdTaggedMeasuredMetrics = await AzureMonitorClient.BatchQueryMetricAsync(metricName, dimensionNames, aggregationType, aggregationInterval, resourceUriList, metricFilter, metricLimit); + foreach (var resourceMetric in resourceIdTaggedMeasuredMetrics) + { + Logger.LogWarning("Discovered value {Value} for metric {Metric} and resource ID {ResourceID}", resourceMetric.Value, metricName, resourceMetric.ResourceId); + } } catch (MetricInformationNotFoundException metricsNotFoundException) { From cbc7e06b8b717c55ddbf19d940e8838dd0c877be Mon Sep 17 00:00:00 2001 From: xchen Date: Thu, 12 Sep 2024 20:36:52 -0700 Subject: [PATCH 076/131] add forward slash in front of resource ID --- src/Promitor.Core.Scraping/AzureMonitorScraper.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs index f1f5bf567..337aa204f 100644 --- a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs +++ b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs @@ -94,7 +94,7 @@ protected override async Task> BatchScrapeResourceAsync(strin var resourceUriList = new List(); foreach (ScrapeDefinition scrapeDefinition in batchScrapeDefinition.ScrapeDefinitions) { - var resourceUri = BuildResourceUri(subscriptionId, scrapeDefinition, (TResourceDefinition) scrapeDefinition.Resource); + var resourceUri = $"/{BuildResourceUri(subscriptionId, scrapeDefinition, (TResourceDefinition) scrapeDefinition.Resource)}"; resourceUriList.Add(resourceUri); // cache resource info if (!_resourceDefinitions.ContainsKey(resourceUri)) From 30aa1534a05a40b644760df1ae14c49d51e8118e Mon Sep 17 00:00:00 2001 From: xchen Date: Thu, 12 Sep 2024 21:06:10 -0700 Subject: [PATCH 077/131] improve regex matching --- .../Extensions/MetricResultExtension.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Promitor.Integrations.AzureMonitor/Extensions/MetricResultExtension.cs b/src/Promitor.Integrations.AzureMonitor/Extensions/MetricResultExtension.cs index bb5c16e77..40a15d4ab 100644 --- a/src/Promitor.Integrations.AzureMonitor/Extensions/MetricResultExtension.cs +++ b/src/Promitor.Integrations.AzureMonitor/Extensions/MetricResultExtension.cs @@ -7,7 +7,7 @@ namespace Promitor.Integrations.AzureMonitor.Extensions public static class MetricResultExtension { // hacky to to get resource ID since it's not available directly through the SDK model - static string resourceIdPattern = @"^(subscriptions\/[^\/]+\/resourceGroups\/[^\/]+\/providers\/[^\/]+\/[^\/]+\/[^\/]+)"; + static string resourceIdPattern = @"^([\/]?subscriptions\/[^\/]+\/resourceGroups\/[^\/]+\/providers\/[^\/]+\/[^\/]+\/[^\/]+)"; static Regex resourceIdRegex = new Regex(resourceIdPattern, RegexOptions.Compiled); public static string ParseResourceIdFromResultId(this MetricResult metricResult) From b77903d648ff21a54649b566d544c25196f54a9f Mon Sep 17 00:00:00 2001 From: xchen Date: Tue, 17 Sep 2024 14:10:56 -0700 Subject: [PATCH 078/131] add more debug logging --- src/Promitor.Core.Scraping/AzureMonitorScraper.cs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs index 337aa204f..e50f7b7ec 100644 --- a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs +++ b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs @@ -137,7 +137,7 @@ protected override async Task> BatchScrapeResourceAsync(strin } var scrapeResults = new List(); - // group based on resource, then to enrichment per group + // group based on resource, then do enrichment per group var groupedMeasuredMetrics = resourceIdTaggedMeasuredMetrics.GroupBy(measuredMetric => measuredMetric.ResourceId); foreach (IGrouping resourceMetricsGroup in groupedMeasuredMetrics) { @@ -145,6 +145,7 @@ protected override async Task> BatchScrapeResourceAsync(strin _resourceDefinitions.TryGetValue(resourceId, out IAzureResourceDefinition resourceDefinition); var metricLabels = DetermineMetricLabels((TResourceDefinition) batchScrapeDefinition.ScrapeDefinitions[0].Resource); var finalMetricValues = EnrichMeasuredMetrics((TResourceDefinition) batchScrapeDefinition.ScrapeDefinitions[0].Resource, dimensionNames, resourceMetricsGroup.ToImmutableList()); + Logger.LogWarning("Processing {MetricsCount} measured metrics for Metric {MetricName}, resourceID {ResourceId} with name {ResourceName}, of resource group {ResourceGroup}", finalMetricValues.Count, metricName, resourceId, resourceDefinition.ResourceName, resourceDefinition.ResourceGroupName); scrapeResults.Add(new ScrapeResult(subscriptionId, resourceDefinition.ResourceGroupName, resourceDefinition.ResourceName, resourceId, finalMetricValues, metricLabels)); Logger.LogWarning("Processed {MetricsCount} measured metrics for Metric {MetricName} and resource {ResourceName}", finalMetricValues.Count, metricName, resourceId); } From a2da4a5c29d4f7101898dcfa135e872dfa39bfe1 Mon Sep 17 00:00:00 2001 From: xchen Date: Tue, 17 Sep 2024 16:34:54 -0700 Subject: [PATCH 079/131] use configured max batch size --- .../Batching/AzureResourceDefinitionBatching.cs | 4 ++-- .../Batching/AzureResourceDefinitionBatchingTests.cs | 7 ++++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs b/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs index f3eb70c8f..d1d7d6813 100644 --- a/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs +++ b/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs @@ -29,10 +29,10 @@ public static List> GroupScrapeD /// private static List>> SplitScrapeDefinitionBatch(List> batchToSplit, int maxBatchSize, CancellationToken cancellationToken) { - int numNewGroups = (batchToSplit.Count - 1) / 50 + 1; + int numNewGroups = (batchToSplit.Count - 1) / maxBatchSize + 1; return Enumerable.Range(0, numNewGroups) - .Select(i => batchToSplit.Skip(i * 50).Take(50).ToList()) + .Select(i => batchToSplit.Skip(i * maxBatchSize).Take(maxBatchSize).ToList()) .ToList(); } } diff --git a/src/Promitor.Tests.Unit/Core/Scraping/Batching/AzureResourceDefinitionBatchingTests.cs b/src/Promitor.Tests.Unit/Core/Scraping/Batching/AzureResourceDefinitionBatchingTests.cs index f552f26f6..f466cf619 100644 --- a/src/Promitor.Tests.Unit/Core/Scraping/Batching/AzureResourceDefinitionBatchingTests.cs +++ b/src/Promitor.Tests.Unit/Core/Scraping/Batching/AzureResourceDefinitionBatchingTests.cs @@ -72,16 +72,17 @@ public void BatchShouldSplitAccordingToConfiguredBatchSize() { var azureMetricConfiguration = _mapper.Map(_azureMetricConfigurationBase); var logAnalyticsConfiguration = _mapper.Map(_logAnalyticsConfigurationBase); + var testBatchSize = 10; var scraping = _mapper.Map(_scrapingBase); var scrapeDefinitions = BuildScrapeDefinitionBatch( azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, scraping: scraping, - resourceType: ResourceType.StorageAccount, subscriptionId: _subscriptionId, resourceGroupName: _resourceGroupName, 130 + resourceType: ResourceType.StorageAccount, subscriptionId: _subscriptionId, resourceGroupName: _resourceGroupName, 25 ); - var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions(scrapeDefinitions, maxBatchSize: _batchSize, CancellationToken.None); + var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions(scrapeDefinitions, maxBatchSize: testBatchSize, CancellationToken.None); // expect three batches adding up to total size Assert.Equal(3, groupedScrapeDefinitions.Count); - Assert.Equal(130, CountTotalScrapeDefinitions(groupedScrapeDefinitions)); + Assert.Equal(25, CountTotalScrapeDefinitions(groupedScrapeDefinitions)); } [Fact] From 124acbc109083e85f25cd61e79a5f4f1f057b401 Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 18 Sep 2024 13:07:45 -0700 Subject: [PATCH 080/131] use cached resource definition --- src/Promitor.Core.Scraping/AzureMonitorScraper.cs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs index e50f7b7ec..76bbfa334 100644 --- a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs +++ b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs @@ -106,7 +106,7 @@ protected override async Task> BatchScrapeResourceAsync(strin subscriptionId: scrapeDefinition.SubscriptionId, resourceName: scrapeDefinition.Resource.ResourceName ); // the resource definition attached is missing some attributes, filling them in here - Logger.LogWarning("Caching resource group {Group} for {ResourceId}", resourceDefinitionToCache.ResourceGroupName, resourceUri); + Logger.LogWarning("Caching resource group {Group}, resource name {ResourceName}, subscription ID {SubscriptionID}, resource group {ResourceGroup}, for {ResourceId}", resourceDefinitionToCache.ResourceGroupName, resourceDefinitionToCache.ResourceName, resourceDefinitionToCache.SubscriptionId, resourceDefinitionToCache.ResourceGroupName, resourceUri); _resourceDefinitions.TryAdd(resourceUri, resourceDefinitionToCache); } } @@ -143,8 +143,8 @@ protected override async Task> BatchScrapeResourceAsync(strin { var resourceId = resourceMetricsGroup.Key; _resourceDefinitions.TryGetValue(resourceId, out IAzureResourceDefinition resourceDefinition); - var metricLabels = DetermineMetricLabels((TResourceDefinition) batchScrapeDefinition.ScrapeDefinitions[0].Resource); - var finalMetricValues = EnrichMeasuredMetrics((TResourceDefinition) batchScrapeDefinition.ScrapeDefinitions[0].Resource, dimensionNames, resourceMetricsGroup.ToImmutableList()); + var metricLabels = DetermineMetricLabels((TResourceDefinition) resourceDefinition); + var finalMetricValues = EnrichMeasuredMetrics((TResourceDefinition) resourceDefinition, dimensionNames, resourceMetricsGroup.ToImmutableList()); Logger.LogWarning("Processing {MetricsCount} measured metrics for Metric {MetricName}, resourceID {ResourceId} with name {ResourceName}, of resource group {ResourceGroup}", finalMetricValues.Count, metricName, resourceId, resourceDefinition.ResourceName, resourceDefinition.ResourceGroupName); scrapeResults.Add(new ScrapeResult(subscriptionId, resourceDefinition.ResourceGroupName, resourceDefinition.ResourceName, resourceId, finalMetricValues, metricLabels)); Logger.LogWarning("Processed {MetricsCount} measured metrics for Metric {MetricName} and resource {ResourceName}", finalMetricValues.Count, metricName, resourceId); From c5fd2be2faafd7c2e1b8fc96def3acd643af39c1 Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 18 Sep 2024 13:21:43 -0700 Subject: [PATCH 081/131] go back --- src/Promitor.Core.Scraping/AzureMonitorScraper.cs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs index 76bbfa334..927d5762c 100644 --- a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs +++ b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs @@ -106,7 +106,7 @@ protected override async Task> BatchScrapeResourceAsync(strin subscriptionId: scrapeDefinition.SubscriptionId, resourceName: scrapeDefinition.Resource.ResourceName ); // the resource definition attached is missing some attributes, filling them in here - Logger.LogWarning("Caching resource group {Group}, resource name {ResourceName}, subscription ID {SubscriptionID}, resource group {ResourceGroup}, for {ResourceId}", resourceDefinitionToCache.ResourceGroupName, resourceDefinitionToCache.ResourceName, resourceDefinitionToCache.SubscriptionId, resourceDefinitionToCache.ResourceGroupName, resourceUri); + Logger.LogWarning("Caching resource group {Group}, resource name {ResourceName}, subscription ID {SubscriptionID}, resource group {ResourceGroup}, for {ResourceId}", resourceDefinitionToCache.ResourceGroupName, resourceDefinitionToCache.ResourceName, resourceDefinitionToCache.SubscriptionId, resourceDefinitionToCache, resourceUri); _resourceDefinitions.TryAdd(resourceUri, resourceDefinitionToCache); } } @@ -143,9 +143,9 @@ protected override async Task> BatchScrapeResourceAsync(strin { var resourceId = resourceMetricsGroup.Key; _resourceDefinitions.TryGetValue(resourceId, out IAzureResourceDefinition resourceDefinition); - var metricLabels = DetermineMetricLabels((TResourceDefinition) resourceDefinition); - var finalMetricValues = EnrichMeasuredMetrics((TResourceDefinition) resourceDefinition, dimensionNames, resourceMetricsGroup.ToImmutableList()); - Logger.LogWarning("Processing {MetricsCount} measured metrics for Metric {MetricName}, resourceID {ResourceId} with name {ResourceName}, of resource group {ResourceGroup}", finalMetricValues.Count, metricName, resourceId, resourceDefinition.ResourceName, resourceDefinition.ResourceGroupName); + var metricLabels = DetermineMetricLabels((TResourceDefinition) batchScrapeDefinition.ScrapeDefinitions[0].Resource); + var finalMetricValues = EnrichMeasuredMetrics((TResourceDefinition) batchScrapeDefinition.ScrapeDefinitions[0].Resource, dimensionNames, resourceMetricsGroup.ToImmutableList()); + Logger.LogWarning("Processing {MetricsCount} measured metrics for resourceID {ResourceId}", finalMetricValues.Count, metricName, resourceId, resourceDefinition.ResourceName, resourceDefinition.ResourceGroupName); scrapeResults.Add(new ScrapeResult(subscriptionId, resourceDefinition.ResourceGroupName, resourceDefinition.ResourceName, resourceId, finalMetricValues, metricLabels)); Logger.LogWarning("Processed {MetricsCount} measured metrics for Metric {MetricName} and resource {ResourceName}", finalMetricValues.Count, metricName, resourceId); } From 51e589f37bb3391fef6fa24dac7e9deb7273427a Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 18 Sep 2024 13:22:58 -0700 Subject: [PATCH 082/131] go back --- src/Promitor.Core.Scraping/AzureMonitorScraper.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs index 927d5762c..69a0de673 100644 --- a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs +++ b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs @@ -145,7 +145,7 @@ protected override async Task> BatchScrapeResourceAsync(strin _resourceDefinitions.TryGetValue(resourceId, out IAzureResourceDefinition resourceDefinition); var metricLabels = DetermineMetricLabels((TResourceDefinition) batchScrapeDefinition.ScrapeDefinitions[0].Resource); var finalMetricValues = EnrichMeasuredMetrics((TResourceDefinition) batchScrapeDefinition.ScrapeDefinitions[0].Resource, dimensionNames, resourceMetricsGroup.ToImmutableList()); - Logger.LogWarning("Processing {MetricsCount} measured metrics for resourceID {ResourceId}", finalMetricValues.Count, metricName, resourceId, resourceDefinition.ResourceName, resourceDefinition.ResourceGroupName); + Logger.LogWarning("Processing {MetricsCount} measured metrics for resourceID {ResourceId}", finalMetricValues.Count, resourceId); scrapeResults.Add(new ScrapeResult(subscriptionId, resourceDefinition.ResourceGroupName, resourceDefinition.ResourceName, resourceId, finalMetricValues, metricLabels)); Logger.LogWarning("Processed {MetricsCount} measured metrics for Metric {MetricName} and resource {ResourceName}", finalMetricValues.Count, metricName, resourceId); } From 376062910a031bd1053b5eee78e13c86ef008ae0 Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 18 Sep 2024 17:29:30 -0700 Subject: [PATCH 083/131] use an older collector version --- .../templates/agents/run-opentelemetry-collector.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build/azure-devops/templates/agents/run-opentelemetry-collector.yml b/build/azure-devops/templates/agents/run-opentelemetry-collector.yml index d418fa493..c4a5cc23c 100644 --- a/build/azure-devops/templates/agents/run-opentelemetry-collector.yml +++ b/build/azure-devops/templates/agents/run-opentelemetry-collector.yml @@ -34,7 +34,7 @@ steps: displayName: 'Show OpenTelemetry configuration' - script: | echo Mounting volumes: ${{ parameters.volumes }} - docker run -d -p 8888:8888 -p 8889:8889 --name ${{ parameters.containerName }} $(networkArgument) --volume ${{ parameters.volumes }} otel/opentelemetry-collector --config /etc/otel-collector-config.yaml + docker run -d -p 8888:8888 -p 8889:8889 --name ${{ parameters.containerName }} $(networkArgument) --volume ${{ parameters.volumes }} otel/opentelemetry-collector:0.103.0 --config /etc/otel-collector-config.yaml sleep 10 docker logs ${{ parameters.containerName }} displayName: Run OpenTelemetry Collector as ${{ parameters.containerName }} container From 4853b3e768e91054dafce41e1788f4f2b1265be7 Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 18 Sep 2024 20:39:07 -0700 Subject: [PATCH 084/131] use different string matching --- .../Extensions/MetricResultExtension.cs | 27 +++++++++++++++---- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/src/Promitor.Integrations.AzureMonitor/Extensions/MetricResultExtension.cs b/src/Promitor.Integrations.AzureMonitor/Extensions/MetricResultExtension.cs index 40a15d4ab..10b2ca7ef 100644 --- a/src/Promitor.Integrations.AzureMonitor/Extensions/MetricResultExtension.cs +++ b/src/Promitor.Integrations.AzureMonitor/Extensions/MetricResultExtension.cs @@ -12,14 +12,31 @@ public static class MetricResultExtension public static string ParseResourceIdFromResultId(this MetricResult metricResult) { - Match match = resourceIdRegex.Match(metricResult.Id); - if (!match.Success || string.IsNullOrEmpty(match.Groups[1].Value)) + // Match match = resourceIdRegex.Match(metricResult.Id); + // if (!match.Success || string.IsNullOrEmpty(match.Groups[1].Value)) + // { + // throw new InvalidOperationException($"The expected resource ID pattern was not found in the input string {metricResult.Id}"); + // } + + // string resourceId = match.Groups[1].Value; + // return resourceId; + return ExtractResourceId(metricResult.Id); + } + + private static string ExtractResourceId(string fullId) + { + // Find the index of the second occurrence of "/providers/" + int firstIndex = fullId.IndexOf("/providers/"); + int secondIndex = fullId.IndexOf("/providers/", firstIndex + 1); + + // If the second "/providers/" is found, slice the string up to that point + if (secondIndex != -1) { - throw new InvalidOperationException($"The expected resource ID pattern was not found in the input string {metricResult.Id}"); + return fullId.Substring(0, secondIndex); } - string resourceId = match.Groups[1].Value; - return resourceId; + // If not found, return the full string + return fullId; } } From e2a013fb16aa77736c773585854e4a160fb298b1 Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 18 Sep 2024 23:28:52 -0700 Subject: [PATCH 085/131] run prom exporter on localhost --- .../templates/agents/run-opentelemetry-collector.yml | 2 +- config/opentelemetry-collector/collector-config.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/build/azure-devops/templates/agents/run-opentelemetry-collector.yml b/build/azure-devops/templates/agents/run-opentelemetry-collector.yml index c4a5cc23c..d418fa493 100644 --- a/build/azure-devops/templates/agents/run-opentelemetry-collector.yml +++ b/build/azure-devops/templates/agents/run-opentelemetry-collector.yml @@ -34,7 +34,7 @@ steps: displayName: 'Show OpenTelemetry configuration' - script: | echo Mounting volumes: ${{ parameters.volumes }} - docker run -d -p 8888:8888 -p 8889:8889 --name ${{ parameters.containerName }} $(networkArgument) --volume ${{ parameters.volumes }} otel/opentelemetry-collector:0.103.0 --config /etc/otel-collector-config.yaml + docker run -d -p 8888:8888 -p 8889:8889 --name ${{ parameters.containerName }} $(networkArgument) --volume ${{ parameters.volumes }} otel/opentelemetry-collector --config /etc/otel-collector-config.yaml sleep 10 docker logs ${{ parameters.containerName }} displayName: Run OpenTelemetry Collector as ${{ parameters.containerName }} container diff --git a/config/opentelemetry-collector/collector-config.yaml b/config/opentelemetry-collector/collector-config.yaml index 5efc22f2a..022ce91a2 100644 --- a/config/opentelemetry-collector/collector-config.yaml +++ b/config/opentelemetry-collector/collector-config.yaml @@ -5,7 +5,7 @@ receivers: exporters: prometheus: - endpoint: "0.0.0.0:8889" + endpoint: "localhost:8889" namespace: otel const_labels: source_app: promitor From c29399adeb5138f872aa6106b5484fe50e9abcf7 Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 18 Sep 2024 23:59:01 -0700 Subject: [PATCH 086/131] try insecure flag --- config/opentelemetry-collector/collector-config.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/config/opentelemetry-collector/collector-config.yaml b/config/opentelemetry-collector/collector-config.yaml index 022ce91a2..3ce975037 100644 --- a/config/opentelemetry-collector/collector-config.yaml +++ b/config/opentelemetry-collector/collector-config.yaml @@ -2,10 +2,11 @@ receivers: otlp: protocols: grpc: - + tls: + insecure: true exporters: prometheus: - endpoint: "localhost:8889" + endpoint: "0.0.0.0:8889" namespace: otel const_labels: source_app: promitor From 027254b2c530db6dd7d6180c4f3032e67a112624 Mon Sep 17 00:00:00 2001 From: xchen Date: Thu, 19 Sep 2024 15:04:08 -0700 Subject: [PATCH 087/131] fix style issues --- .../ResourceTypes/DataShareScraper.cs | 1 - src/Promitor.Core.Scraping/Scraper.cs | 1 - .../ScrapeDefinitionBatchPropertiesTest.cs | 104 +++++++++-------- .../AzureResourceDefinitionBatchingTests.cs | 106 +++++++++--------- 4 files changed, 103 insertions(+), 109 deletions(-) diff --git a/src/Promitor.Core.Scraping/ResourceTypes/DataShareScraper.cs b/src/Promitor.Core.Scraping/ResourceTypes/DataShareScraper.cs index 4cc5bdcab..efe88ec5c 100644 --- a/src/Promitor.Core.Scraping/ResourceTypes/DataShareScraper.cs +++ b/src/Promitor.Core.Scraping/ResourceTypes/DataShareScraper.cs @@ -1,6 +1,5 @@ using System; using System.Collections.Generic; -using System.Collections.Immutable; using System.Linq; using Microsoft.Extensions.Logging; using Promitor.Core.Contracts; diff --git a/src/Promitor.Core.Scraping/Scraper.cs b/src/Promitor.Core.Scraping/Scraper.cs index f054dd801..59ccefc3c 100644 --- a/src/Promitor.Core.Scraping/Scraper.cs +++ b/src/Promitor.Core.Scraping/Scraper.cs @@ -308,7 +308,6 @@ protected abstract Task ScrapeResourceAsync( /// /// Metric subscription Id /// Contains all scrape definitions in the batch and their shared properties(like resource type) - /// Contains the resource cast to the specific resource type. /// Aggregation for the metric to use /// Interval that is used to aggregate metrics protected abstract Task> BatchScrapeResourceAsync( diff --git a/src/Promitor.Tests.Unit/Core/Metrics/ScrapeDefinitionBatchPropertiesTest.cs b/src/Promitor.Tests.Unit/Core/Metrics/ScrapeDefinitionBatchPropertiesTest.cs index defafb36e..d9f061fce 100644 --- a/src/Promitor.Tests.Unit/Core/Metrics/ScrapeDefinitionBatchPropertiesTest.cs +++ b/src/Promitor.Tests.Unit/Core/Metrics/ScrapeDefinitionBatchPropertiesTest.cs @@ -1,11 +1,7 @@ using System; using System.Collections.Generic; using System.ComponentModel; -using System.IO; -using System.Runtime.Serialization.Formatters.Binary; using AutoMapper; -using Bogus.DataSets; -using Microsoft.AspNetCore.Mvc.ModelBinding; using Promitor.Core.Metrics; using Promitor.Core.Scraping.Configuration.Model; using Promitor.Core.Scraping.Configuration.Model.Metrics; @@ -19,19 +15,19 @@ namespace Promitor.Tests.Unit.Core.Metrics public class ScrapeDefinitionBatchPropertiesTest { private readonly IMapper _mapper; // to model instantiation happen - private readonly static string _azureMetricNameBase = "promitor_batch_test_metric"; - private readonly static PrometheusMetricDefinition _prometheusMetricDefinition = + private readonly static string azureMetricNameBase = "promitor_batch_test_metric"; + private readonly static PrometheusMetricDefinition prometheusMetricDefinition = new("promitor_batch_test", "test", new Dictionary()); - private readonly static string _subscriptionId = "subscription"; - private readonly static AzureMetricConfigurationV1 _azureMetricConfigurationBase = new AzureMetricConfigurationV1 + private readonly static string subscriptionId = "subscription"; + private readonly static AzureMetricConfigurationV1 azureMetricConfigurationBase = new AzureMetricConfigurationV1 { - MetricName = _azureMetricNameBase, + MetricName = azureMetricNameBase, Aggregation = new MetricAggregationV1 { Type = PromitorMetricAggregationType.Average }, }; - private readonly static LogAnalyticsConfigurationV1 _logAnalyticsConfigurationBase = new LogAnalyticsConfigurationV1 + private readonly static LogAnalyticsConfigurationV1 logAnalyticsConfigurationBase = new LogAnalyticsConfigurationV1 { Query = "A eq B", Aggregation = new AggregationV1 @@ -39,7 +35,7 @@ public class ScrapeDefinitionBatchPropertiesTest Interval = TimeSpan.FromMinutes(60) }, }; - private readonly static ScrapingV1 _scrapingBase = new ScrapingV1 + private readonly static ScrapingV1 scrapingBase = new ScrapingV1 { Schedule = "5 4 3 2 1" }; @@ -53,12 +49,12 @@ public ScrapeDefinitionBatchPropertiesTest() [Fact] public void BuildBatchHashKeySameResultNoDimensions() { - var azureMetricConfiguration = _mapper.Map(_azureMetricConfigurationBase); - var logAnalyticsConfiguration = _mapper.Map(_logAnalyticsConfigurationBase); + var azureMetricConfiguration = _mapper.Map(azureMetricConfigurationBase); + var logAnalyticsConfiguration = _mapper.Map(logAnalyticsConfigurationBase); - var scraping = _mapper.Map(_scrapingBase); - var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); - var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); + var scraping = _mapper.Map(scrapingBase); + var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: subscriptionId); + var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: subscriptionId); var hashCode1 = batchProperties.GetHashCode(); var hashCode2 = batchProperties2.GetHashCode(); @@ -68,14 +64,14 @@ public void BuildBatchHashKeySameResultNoDimensions() [Fact] public void BuildBatchHashKeySameResultIdenticalDimensions() { - var azureMetricConfiguration = _mapper.Map(_azureMetricConfigurationBase); + var azureMetricConfiguration = _mapper.Map(azureMetricConfigurationBase); azureMetricConfiguration.Dimensions = [new MetricDimension{Name = "Dimension1"}, new MetricDimension{Name = "Dimension2"}]; - var logAnalyticsConfiguration = _mapper.Map(_logAnalyticsConfigurationBase); + var logAnalyticsConfiguration = _mapper.Map(logAnalyticsConfigurationBase); - var scraping = _mapper.Map(_scrapingBase); + var scraping = _mapper.Map(scrapingBase); - var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); - var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); + var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: subscriptionId); + var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: subscriptionId); var hashCode1 = batchProperties.GetHashCode(); var hashCode2 = batchProperties2.GetHashCode(); @@ -85,16 +81,16 @@ public void BuildBatchHashKeySameResultIdenticalDimensions() [Fact] public void BuildBatchHashKeyDifferentResultDifferentDimensions() { - var azureMetricConfiguration1 = _mapper.Map(_azureMetricConfigurationBase); + var azureMetricConfiguration1 = _mapper.Map(azureMetricConfigurationBase); azureMetricConfiguration1.Dimensions = [new MetricDimension{Name = "Dimension1"}, new MetricDimension{Name = "Dimension2"}]; - var azureMetricConfiguration2 = _mapper.Map(_azureMetricConfigurationBase); + var azureMetricConfiguration2 = _mapper.Map(azureMetricConfigurationBase); azureMetricConfiguration2.Dimensions = [new MetricDimension{Name = "DiffDimension1"}, new MetricDimension{Name = "DiffDimension2"}]; - var logAnalyticsConfiguration = _mapper.Map(_logAnalyticsConfigurationBase); + var logAnalyticsConfiguration = _mapper.Map(logAnalyticsConfigurationBase); - var scraping = _mapper.Map(_scrapingBase); + var scraping = _mapper.Map(scrapingBase); - var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration1, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); - var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration2, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); + var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration1, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: subscriptionId); + var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration2, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: subscriptionId); var hashCode1 = batchProperties.GetHashCode(); var hashCode2 = batchProperties2.GetHashCode(); @@ -104,17 +100,17 @@ public void BuildBatchHashKeyDifferentResultDifferentDimensions() [Fact] public void BuildBatchHashKeyDifferentResultDifferentMetricName() { - var azureMetricConfiguration1 = _mapper.Map(_azureMetricConfigurationBase); + var azureMetricConfiguration1 = _mapper.Map(azureMetricConfigurationBase); azureMetricConfiguration1.Dimensions = [new MetricDimension{Name = "Dimension1"}, new MetricDimension{Name = "Dimension2"}]; - var azureMetricConfiguration2 = _mapper.Map(_azureMetricConfigurationBase); + var azureMetricConfiguration2 = _mapper.Map(azureMetricConfigurationBase); azureMetricConfiguration2.Dimensions = [new MetricDimension{Name = "Dimension1"}, new MetricDimension{Name = "Dimension2"}]; azureMetricConfiguration2.MetricName = "diffName"; - var logAnalyticsConfiguration = _mapper.Map(_logAnalyticsConfigurationBase); + var logAnalyticsConfiguration = _mapper.Map(logAnalyticsConfigurationBase); - var scraping = _mapper.Map(_scrapingBase); + var scraping = _mapper.Map(scrapingBase); - var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration1, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); - var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration2, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); + var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration1, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: subscriptionId); + var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration2, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: subscriptionId); var hashCode1 = batchProperties.GetHashCode(); var hashCode2 = batchProperties2.GetHashCode(); @@ -124,13 +120,13 @@ public void BuildBatchHashKeyDifferentResultDifferentMetricName() [Fact] public void BuildBatchHashKeyDifferentResultDifferentSubscription() { - var azureMetricConfiguration = _mapper.Map(_azureMetricConfigurationBase); - var scraping = _mapper.Map(_scrapingBase); - var logAnalyticsConfiguration = _mapper.Map(_logAnalyticsConfigurationBase); + var azureMetricConfiguration = _mapper.Map(azureMetricConfigurationBase); + var scraping = _mapper.Map(scrapingBase); + var logAnalyticsConfiguration = _mapper.Map(logAnalyticsConfigurationBase); - var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); - var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: "subscription2"); + var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: subscriptionId); + var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: "subscription2"); var hashCode1 = batchProperties.GetHashCode(); var hashCode2 = batchProperties2.GetHashCode(); @@ -140,12 +136,12 @@ public void BuildBatchHashKeyDifferentResultDifferentSubscription() [Fact] public void BuildBatchHashKeyDifferentResultDifferentResourceType() { - var azureMetricConfiguration = _mapper.Map(_azureMetricConfigurationBase); - var scraping = _mapper.Map(_scrapingBase); - var logAnalyticsConfiguration = _mapper.Map(_logAnalyticsConfigurationBase); + var azureMetricConfiguration = _mapper.Map(azureMetricConfigurationBase); + var scraping = _mapper.Map(scrapingBase); + var logAnalyticsConfiguration = _mapper.Map(logAnalyticsConfigurationBase); - var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: _subscriptionId); - var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.LoadBalancer, scraping: scraping, subscriptionId: "subscription2"); + var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping, subscriptionId: subscriptionId); + var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.LoadBalancer, scraping: scraping, subscriptionId: "subscription2"); var hashCode1 = batchProperties.GetHashCode(); var hashCode2 = batchProperties2.GetHashCode(); @@ -155,15 +151,15 @@ public void BuildBatchHashKeyDifferentResultDifferentResourceType() [Fact] public void BuildBatchHashKeyDifferentResultDifferentSchedule() { - var azureMetricConfiguration = _mapper.Map(_azureMetricConfigurationBase); - var scraping1 = _mapper.Map(_scrapingBase); - var scraping2 = _mapper.Map(_scrapingBase); + var azureMetricConfiguration = _mapper.Map(azureMetricConfigurationBase); + var scraping1 = _mapper.Map(scrapingBase); + var scraping2 = _mapper.Map(scrapingBase); scraping2.Schedule = "6 4 3 2 1"; - var logAnalyticsConfiguration = _mapper.Map(_logAnalyticsConfigurationBase); + var logAnalyticsConfiguration = _mapper.Map(logAnalyticsConfigurationBase); - var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping1, subscriptionId: _subscriptionId); - var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping2, subscriptionId: "subscription2"); + var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping1, subscriptionId: subscriptionId); + var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.StorageAccount, scraping: scraping2, subscriptionId: "subscription2"); var hashCode1 = batchProperties.GetHashCode(); var hashCode2 = batchProperties2.GetHashCode(); @@ -193,13 +189,13 @@ public void BuildBatchHashKeyTest() var azureMetricConfiguration1 = _mapper.Map(_azureMetricConfigurationTest1); var azureMetricConfiguration2 = _mapper.Map(_azureMetricConfigurationTest2); - var scraping1 = _mapper.Map(_scrapingBase); - var scraping2 = _mapper.Map(_scrapingBase); - var logAnalyticsConfiguration = _mapper.Map(_logAnalyticsConfigurationBase); + var scraping1 = _mapper.Map(scrapingBase); + var scraping2 = _mapper.Map(scrapingBase); + var logAnalyticsConfiguration = _mapper.Map(logAnalyticsConfigurationBase); - var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration1, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.ApplicationInsights, scraping: scraping1, subscriptionId: _subscriptionId); - var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration2, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.ApplicationInsights, scraping: scraping2, subscriptionId: _subscriptionId); + var batchProperties = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration1, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.ApplicationInsights, scraping: scraping1, subscriptionId: subscriptionId); + var batchProperties2 = new ScrapeDefinitionBatchProperties(azureMetricConfiguration: azureMetricConfiguration2, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, resourceType: Promitor.Core.Contracts.ResourceType.ApplicationInsights, scraping: scraping2, subscriptionId: subscriptionId); var hashCode1 = batchProperties.GetHashCode(); var hashCode2 = batchProperties2.GetHashCode(); diff --git a/src/Promitor.Tests.Unit/Core/Scraping/Batching/AzureResourceDefinitionBatchingTests.cs b/src/Promitor.Tests.Unit/Core/Scraping/Batching/AzureResourceDefinitionBatchingTests.cs index f466cf619..4090b4b47 100644 --- a/src/Promitor.Tests.Unit/Core/Scraping/Batching/AzureResourceDefinitionBatchingTests.cs +++ b/src/Promitor.Tests.Unit/Core/Scraping/Batching/AzureResourceDefinitionBatchingTests.cs @@ -18,19 +18,19 @@ namespace Promitor.Tests.Unit.Core.Metrics public class AzureResourceDefinitionBatchingTests { private readonly IMapper _mapper; // to model instantiation happen - private readonly static string _azureMetricNameBase = "promitor_batch_test_metric"; - private readonly static PrometheusMetricDefinition _prometheusMetricDefinition = + private readonly static string azureMetricNameBase = "promitor_batch_test_metric"; + private readonly static PrometheusMetricDefinition prometheusMetricDefinition = new("promitor_batch_test", "test", new Dictionary()); - private readonly static string _subscriptionId = "subscription"; - private readonly static AzureMetricConfigurationV1 _azureMetricConfigurationBase = new AzureMetricConfigurationV1 + private readonly static string subscriptionId = "subscription"; + private readonly static AzureMetricConfigurationV1 azureMetricConfigurationBase = new AzureMetricConfigurationV1 { - MetricName = _azureMetricNameBase, + MetricName = azureMetricNameBase, Aggregation = new MetricAggregationV1 { Type = PromitorMetricAggregationType.Average }, }; - private readonly static LogAnalyticsConfigurationV1 _logAnalyticsConfigurationBase = new LogAnalyticsConfigurationV1 + private readonly static LogAnalyticsConfigurationV1 logAnalyticsConfigurationBase = new LogAnalyticsConfigurationV1 { Query = "A eq B", Aggregation = new AggregationV1 @@ -38,12 +38,12 @@ public class AzureResourceDefinitionBatchingTests Interval = TimeSpan.FromMinutes(60) }, }; - private readonly static ScrapingV1 _scrapingBase = new ScrapingV1 + private readonly static ScrapingV1 scrapingBase = new ScrapingV1 { Schedule = "5 4 3 2 1" }; - private readonly static string _resourceGroupName = "batch_test_group"; - private readonly static int _batchSize = 50; + private readonly static string resourceGroupName = "batch_test_group"; + private readonly static int batchSize = 50; public AzureResourceDefinitionBatchingTests() { @@ -54,14 +54,14 @@ public AzureResourceDefinitionBatchingTests() [Fact] public void IdenticalBatchPropertiesShouldBatchTogether() { - var azureMetricConfiguration = _mapper.Map(_azureMetricConfigurationBase); - var scraping = _mapper.Map(_scrapingBase); - var logAnalyticsConfiguration = _mapper.Map(_logAnalyticsConfigurationBase); + var azureMetricConfiguration = _mapper.Map(azureMetricConfigurationBase); + var scraping = _mapper.Map(scrapingBase); + var logAnalyticsConfiguration = _mapper.Map(logAnalyticsConfigurationBase); var scrapeDefinitions = BuildScrapeDefinitionBatch( - azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, scraping: scraping, - resourceType: ResourceType.StorageAccount, subscriptionId: _subscriptionId, resourceGroupName: _resourceGroupName, 10 + azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping, + resourceType: ResourceType.StorageAccount, subscriptionId: subscriptionId, resourceGroupName: resourceGroupName, 10 ); - var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions(scrapeDefinitions, maxBatchSize: _batchSize, CancellationToken.None); + var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions(scrapeDefinitions, maxBatchSize: batchSize, CancellationToken.None); // expect one batch of 10 Assert.Single(groupedScrapeDefinitions); Assert.Equal(10, groupedScrapeDefinitions[0].ScrapeDefinitions.Count); @@ -70,14 +70,14 @@ public void IdenticalBatchPropertiesShouldBatchTogether() [Fact] public void BatchShouldSplitAccordingToConfiguredBatchSize() { - var azureMetricConfiguration = _mapper.Map(_azureMetricConfigurationBase); - var logAnalyticsConfiguration = _mapper.Map(_logAnalyticsConfigurationBase); + var azureMetricConfiguration = _mapper.Map(azureMetricConfigurationBase); + var logAnalyticsConfiguration = _mapper.Map(logAnalyticsConfigurationBase); var testBatchSize = 10; - var scraping = _mapper.Map(_scrapingBase); + var scraping = _mapper.Map(scrapingBase); var scrapeDefinitions = BuildScrapeDefinitionBatch( - azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, scraping: scraping, - resourceType: ResourceType.StorageAccount, subscriptionId: _subscriptionId, resourceGroupName: _resourceGroupName, 25 + azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping, + resourceType: ResourceType.StorageAccount, subscriptionId: subscriptionId, resourceGroupName: resourceGroupName, 25 ); var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions(scrapeDefinitions, maxBatchSize: testBatchSize, CancellationToken.None); // expect three batches adding up to total size @@ -88,18 +88,18 @@ public void BatchShouldSplitAccordingToConfiguredBatchSize() [Fact] public void DifferentBatchPropertiesShouldBatchSeparately() { - var azureMetricConfiguration = _mapper.Map(_azureMetricConfigurationBase); - var scraping = _mapper.Map(_scrapingBase); - var logAnalyticsConfiguration = _mapper.Map(_logAnalyticsConfigurationBase); + var azureMetricConfiguration = _mapper.Map(azureMetricConfigurationBase); + var scraping = _mapper.Map(scrapingBase); + var logAnalyticsConfiguration = _mapper.Map(logAnalyticsConfigurationBase); var scrapeDefinitions = BuildScrapeDefinitionBatch( - azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, scraping: scraping, - resourceType: ResourceType.StorageAccount, subscriptionId: _subscriptionId, resourceGroupName: _resourceGroupName, 10 + azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping, + resourceType: ResourceType.StorageAccount, subscriptionId: subscriptionId, resourceGroupName: resourceGroupName, 10 ); var differentScrapeDefinitions = BuildScrapeDefinitionBatch( - azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, scraping: scraping, - resourceType: ResourceType.BlobStorage, subscriptionId: _subscriptionId, resourceGroupName: _resourceGroupName, 10 + azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping, + resourceType: ResourceType.BlobStorage, subscriptionId: subscriptionId, resourceGroupName: resourceGroupName, 10 ); - var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions, .. differentScrapeDefinitions], maxBatchSize: _batchSize, CancellationToken.None); + var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions, .. differentScrapeDefinitions], maxBatchSize: batchSize, CancellationToken.None); // expect two batch of 10 each Assert.Equal(2, groupedScrapeDefinitions.Count); Assert.Equal(10, groupedScrapeDefinitions[0].ScrapeDefinitions.Count); @@ -109,21 +109,21 @@ public void DifferentBatchPropertiesShouldBatchSeparately() [Fact] public void DifferentAggregationIntervalsShouldBatchSeparately() { - var azureMetricConfiguration5mInterval = _mapper.Map(_azureMetricConfigurationBase); - azureMetricConfiguration5mInterval.Aggregation.Interval = TimeSpan.FromMinutes(5); - var azureMetricConfiguration2mInterval = _mapper.Map(_azureMetricConfigurationBase); - azureMetricConfiguration5mInterval.Aggregation.Interval = TimeSpan.FromMinutes(2); - var logAnalyticsConfiguration = _mapper.Map(_logAnalyticsConfigurationBase); - var scraping = _mapper.Map(_scrapingBase); + var azureMetricConfiguration5MInterval = _mapper.Map(azureMetricConfigurationBase); + azureMetricConfiguration5MInterval.Aggregation.Interval = TimeSpan.FromMinutes(5); + var azureMetricConfiguration2MInterval = _mapper.Map(azureMetricConfigurationBase); + azureMetricConfiguration5MInterval.Aggregation.Interval = TimeSpan.FromMinutes(2); + var logAnalyticsConfiguration = _mapper.Map(logAnalyticsConfigurationBase); + var scraping = _mapper.Map(scrapingBase); var scrapeDefinitions5m = BuildScrapeDefinitionBatch( - azureMetricConfiguration: azureMetricConfiguration5mInterval, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, scraping: scraping, - resourceType: ResourceType.StorageAccount, subscriptionId: _subscriptionId, resourceGroupName: _resourceGroupName, 10 + azureMetricConfiguration: azureMetricConfiguration5MInterval, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping, + resourceType: ResourceType.StorageAccount, subscriptionId: subscriptionId, resourceGroupName: resourceGroupName, 10 ); var differentScrapeDefinitions2m = BuildScrapeDefinitionBatch( - azureMetricConfiguration: azureMetricConfiguration2mInterval, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, scraping: scraping, - resourceType: ResourceType.BlobStorage, subscriptionId: _subscriptionId, resourceGroupName: _resourceGroupName, 10 + azureMetricConfiguration: azureMetricConfiguration2MInterval, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping, + resourceType: ResourceType.BlobStorage, subscriptionId: subscriptionId, resourceGroupName: resourceGroupName, 10 ); - var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions5m, .. differentScrapeDefinitions2m], maxBatchSize: _batchSize, CancellationToken.None); + var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions5m, .. differentScrapeDefinitions2m], maxBatchSize: batchSize, CancellationToken.None); // expect two batch of 10 each Assert.Equal(2, groupedScrapeDefinitions.Count); Assert.Equal(10, groupedScrapeDefinitions[0].ScrapeDefinitions.Count); @@ -134,18 +134,18 @@ public void DifferentAggregationIntervalsShouldBatchSeparately() [Fact] public void MixedBatchShouldSplitAccordingToConfiguredBatchSize() { - var azureMetricConfiguration = _mapper.Map(_azureMetricConfigurationBase); - var scraping = _mapper.Map(_scrapingBase); + var azureMetricConfiguration = _mapper.Map(azureMetricConfigurationBase); + var scraping = _mapper.Map(scrapingBase); var logAnalyticsConfiguration = new LogAnalyticsConfiguration(); var scrapeDefinitions = BuildScrapeDefinitionBatch( - azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, scraping: scraping, - resourceType: ResourceType.StorageAccount, subscriptionId: _subscriptionId, resourceGroupName: _resourceGroupName, 130 + azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping, + resourceType: ResourceType.StorageAccount, subscriptionId: subscriptionId, resourceGroupName: resourceGroupName, 130 ); var differentScrapeDefinitions = BuildScrapeDefinitionBatch( - azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, scraping: scraping, - resourceType: ResourceType.BlobStorage, subscriptionId: _subscriptionId, resourceGroupName: _resourceGroupName, 120 + azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping, + resourceType: ResourceType.BlobStorage, subscriptionId: subscriptionId, resourceGroupName: resourceGroupName, 120 ); - var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions, .. differentScrapeDefinitions], maxBatchSize: _batchSize, CancellationToken.None); + var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions, .. differentScrapeDefinitions], maxBatchSize: batchSize, CancellationToken.None); // expect two batch of 10 each Assert.Equal(6, groupedScrapeDefinitions.Count); Assert.Equal(250, CountTotalScrapeDefinitions(groupedScrapeDefinitions)); @@ -154,18 +154,18 @@ public void MixedBatchShouldSplitAccordingToConfiguredBatchSize() [Fact] public void BatchConstructionShouldBeAgnosticToResourceGroup() { - var azureMetricConfiguration = _mapper.Map(_azureMetricConfigurationBase); - var scraping = _mapper.Map(_scrapingBase); + var azureMetricConfiguration = _mapper.Map(azureMetricConfigurationBase); + var scraping = _mapper.Map(scrapingBase); var logAnalyticsConfiguration = new LogAnalyticsConfiguration(); var scrapeDefinitions = BuildScrapeDefinitionBatch( - azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, scraping: scraping, - resourceType: ResourceType.StorageAccount, subscriptionId: _subscriptionId, resourceGroupName: _resourceGroupName, 10 + azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping, + resourceType: ResourceType.StorageAccount, subscriptionId: subscriptionId, resourceGroupName: resourceGroupName, 10 ); var differentScrapeDefinitions = BuildScrapeDefinitionBatch( - azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: _prometheusMetricDefinition, scraping: scraping, - resourceType: ResourceType.StorageAccount, subscriptionId: _subscriptionId, resourceGroupName: "group2", 10 + azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping, + resourceType: ResourceType.StorageAccount, subscriptionId: subscriptionId, resourceGroupName: "group2", 10 ); - var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions, .. differentScrapeDefinitions], maxBatchSize: _batchSize, CancellationToken.None); + var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions, .. differentScrapeDefinitions], maxBatchSize: batchSize, CancellationToken.None); // expect two batch of 10 each Assert.Single(groupedScrapeDefinitions); Assert.Equal(20, groupedScrapeDefinitions[0].ScrapeDefinitions.Count); From d0c7b41d931d012ea0690358c80b0d89cfb4f80c Mon Sep 17 00:00:00 2001 From: xchen Date: Thu, 19 Sep 2024 15:47:06 -0700 Subject: [PATCH 088/131] use associated resource definition during processing --- .../AzureMonitorScraper.cs | 20 ++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs index 69a0de673..1ab82e450 100644 --- a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs +++ b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs @@ -25,7 +25,7 @@ public abstract class AzureMonitorScraper : Scraper /// A cache to store resource definitions. Used to hydrate resource info from resource ID, when processing batch query results /// - private readonly ConcurrentDictionary _resourceDefinitions; // using a dictionary for now since IMemoryCache involves layers of injection + private readonly ConcurrentDictionary> _resourceDefinitions; // using a dictionary for now since IMemoryCache involves layers of injection /// /// Constructor @@ -33,7 +33,7 @@ public abstract class AzureMonitorScraper : Scraper(); + _resourceDefinitions = new ConcurrentDictionary>(); } /// @@ -99,15 +99,16 @@ protected override async Task> BatchScrapeResourceAsync(strin // cache resource info if (!_resourceDefinitions.ContainsKey(resourceUri)) { + // the TResourceDefinition resource definition attached to scrape definition can sometimes missing some attributes, need to them in here var resourceDefinitionToCache = new AzureResourceDefinition ( resourceType: scrapeDefinition.Resource.ResourceType, resourceGroupName: scrapeDefinition.ResourceGroupName, subscriptionId: scrapeDefinition.SubscriptionId, resourceName: scrapeDefinition.Resource.ResourceName - ); // the resource definition attached is missing some attributes, filling them in here - Logger.LogWarning("Caching resource group {Group}, resource name {ResourceName}, subscription ID {SubscriptionID}, resource group {ResourceGroup}, for {ResourceId}", resourceDefinitionToCache.ResourceGroupName, resourceDefinitionToCache.ResourceName, resourceDefinitionToCache.SubscriptionId, resourceDefinitionToCache, resourceUri); - _resourceDefinitions.TryAdd(resourceUri, resourceDefinitionToCache); + ); + Logger.LogWarning("Caching resource group {Group}, resource name {ResourceName}, subscription ID {SubscriptionID}, for {ResourceId}, of resource type {ResourceType}", resourceDefinitionToCache.ResourceGroupName, resourceDefinitionToCache.ResourceName, resourceDefinitionToCache.SubscriptionId, resourceUri, resourceDefinitionToCache.ResourceType); + _resourceDefinitions.TryAdd(resourceUri, new Tuple(resourceDefinitionToCache, (TResourceDefinition)scrapeDefinition.Resource)); } } @@ -142,10 +143,11 @@ protected override async Task> BatchScrapeResourceAsync(strin foreach (IGrouping resourceMetricsGroup in groupedMeasuredMetrics) { var resourceId = resourceMetricsGroup.Key; - _resourceDefinitions.TryGetValue(resourceId, out IAzureResourceDefinition resourceDefinition); - var metricLabels = DetermineMetricLabels((TResourceDefinition) batchScrapeDefinition.ScrapeDefinitions[0].Resource); - var finalMetricValues = EnrichMeasuredMetrics((TResourceDefinition) batchScrapeDefinition.ScrapeDefinitions[0].Resource, dimensionNames, resourceMetricsGroup.ToImmutableList()); - Logger.LogWarning("Processing {MetricsCount} measured metrics for resourceID {ResourceId}", finalMetricValues.Count, resourceId); + _resourceDefinitions.TryGetValue(resourceId, out Tuple resourceDefinitionTuple); + var resourceDefinition = resourceDefinitionTuple.Item1; + var metricLabels = DetermineMetricLabels(resourceDefinitionTuple.Item2); + var finalMetricValues = EnrichMeasuredMetrics(resourceDefinitionTuple.Item2, dimensionNames, resourceMetricsGroup.ToImmutableList()); + Logger.LogWarning("Processing {MetricsCount} measured metrics for resourceID {ResourceId} of resource group {ResourceGroup}", finalMetricValues.Count, resourceId, resourceDefinition.ResourceGroupName); scrapeResults.Add(new ScrapeResult(subscriptionId, resourceDefinition.ResourceGroupName, resourceDefinition.ResourceName, resourceId, finalMetricValues, metricLabels)); Logger.LogWarning("Processed {MetricsCount} measured metrics for Metric {MetricName} and resource {ResourceName}", finalMetricValues.Count, metricName, resourceId); } From 46931e876aaf5aafa83d6485788a7d8c6a1904e2 Mon Sep 17 00:00:00 2001 From: xchen Date: Fri, 20 Sep 2024 09:38:45 -0700 Subject: [PATCH 089/131] don't use resource-specific filters --- .../AzureMonitorScraper.cs | 26 ++++++++----------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs index 1ab82e450..1b4c09fb0 100644 --- a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs +++ b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs @@ -97,22 +97,18 @@ protected override async Task> BatchScrapeResourceAsync(strin var resourceUri = $"/{BuildResourceUri(subscriptionId, scrapeDefinition, (TResourceDefinition) scrapeDefinition.Resource)}"; resourceUriList.Add(resourceUri); // cache resource info - if (!_resourceDefinitions.ContainsKey(resourceUri)) - { - // the TResourceDefinition resource definition attached to scrape definition can sometimes missing some attributes, need to them in here - var resourceDefinitionToCache = new AzureResourceDefinition - ( - resourceType: scrapeDefinition.Resource.ResourceType, - resourceGroupName: scrapeDefinition.ResourceGroupName, - subscriptionId: scrapeDefinition.SubscriptionId, - resourceName: scrapeDefinition.Resource.ResourceName - ); - Logger.LogWarning("Caching resource group {Group}, resource name {ResourceName}, subscription ID {SubscriptionID}, for {ResourceId}, of resource type {ResourceType}", resourceDefinitionToCache.ResourceGroupName, resourceDefinitionToCache.ResourceName, resourceDefinitionToCache.SubscriptionId, resourceUri, resourceDefinitionToCache.ResourceType); - _resourceDefinitions.TryAdd(resourceUri, new Tuple(resourceDefinitionToCache, (TResourceDefinition)scrapeDefinition.Resource)); - } + // the TResourceDefinition resource definition attached to scrape definition can sometimes missing some attributes, need to them in here + var resourceDefinitionToCache = new AzureResourceDefinition + ( + resourceType: scrapeDefinition.Resource.ResourceType, + resourceGroupName: scrapeDefinition.ResourceGroupName, + subscriptionId: scrapeDefinition.SubscriptionId, + resourceName: scrapeDefinition.Resource.ResourceName + ); + Logger.LogWarning("Caching resource group {Group}, resource name {ResourceName}, subscription ID {SubscriptionID}, for {ResourceId}, of resource type {ResourceType}", resourceDefinitionToCache.ResourceGroupName, resourceDefinitionToCache.ResourceName, resourceDefinitionToCache.SubscriptionId, resourceUri, resourceDefinitionToCache.ResourceType); + _resourceDefinitions.AddOrUpdate(resourceUri, new Tuple(resourceDefinitionToCache, (TResourceDefinition)scrapeDefinition.Resource), (uri, oldTuple) => oldTuple); } - var metricFilter = DetermineMetricFilter(metricName, (TResourceDefinition) batchScrapeDefinition.ScrapeDefinitions[0].Resource); var metricLimit = batchScrapeDefinition.ScrapeDefinitionBatchProperties.AzureMetricConfiguration.Limit; var dimensionNames = DetermineMetricDimensions(metricName, (TResourceDefinition) batchScrapeDefinition.ScrapeDefinitions[0].Resource, batchScrapeDefinition.ScrapeDefinitionBatchProperties.AzureMetricConfiguration); // TODO: resource definition doesn't seem to be used, can we remove it from function signature? @@ -121,7 +117,7 @@ protected override async Task> BatchScrapeResourceAsync(strin { // Query Azure Monitor for metrics Logger.LogWarning("Querying Azure Monitor for metric {MetricName} with batch size {BatchSize}", metricName, resourceUriList.Count); - resourceIdTaggedMeasuredMetrics = await AzureMonitorClient.BatchQueryMetricAsync(metricName, dimensionNames, aggregationType, aggregationInterval, resourceUriList, metricFilter, metricLimit); + resourceIdTaggedMeasuredMetrics = await AzureMonitorClient.BatchQueryMetricAsync(metricName, dimensionNames, aggregationType, aggregationInterval, resourceUriList, null, metricLimit); foreach (var resourceMetric in resourceIdTaggedMeasuredMetrics) { Logger.LogWarning("Discovered value {Value} for metric {Metric} and resource ID {ResourceID}", resourceMetric.Value, metricName, resourceMetric.ResourceId); From d88c53bd40c2964aaf8099cd0fe46960615fc99c Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 25 Sep 2024 16:23:31 -0700 Subject: [PATCH 090/131] fix style issues --- .../Scheduling/ResourcesScrapingJob.cs | 2 +- src/Promitor.Core.Scraping/AzureMonitorScraper.cs | 2 +- .../Batching/AzureResourceDefinitionBatching.cs | 4 ++-- .../Model/AzureMetricConfiguration.cs | 4 ++-- .../Model/Metrics/BatchScrapeDefinition.cs | 13 ++++--------- .../Model/Metrics/ScrapeDefinition.cs | 2 +- .../Model/Metrics/ScrapeDefinitionBatch.cs | 12 +++--------- .../Extensions/MetricResultExtension.cs | 14 +------------- 8 files changed, 15 insertions(+), 38 deletions(-) diff --git a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs index 7fd4a8459..46d56bbbd 100644 --- a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs +++ b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs @@ -266,7 +266,7 @@ private async Task ScrapeMetrics(IEnumerable definition in batchScrapeDefinition.ScrapeDefinitions) { - Logger.LogInformation("ResourceID: {ResoureceID}, ResourceGroup: {ResourceGroup}, Prometheus metric name: {MetricName}, Batch Key: {BatchKey}", definition.Resource.ResourceName, definition.ResourceGroupName, definition.PrometheusMetricDefinition.Name, definition.buildPropertiesForBatch().BuildBatchHashKey()); + Logger.LogInformation("ResourceID: {ResoureceID}, ResourceGroup: {ResourceGroup}, Prometheus metric name: {MetricName}, Batch Key: {BatchKey}", definition.Resource.ResourceName, definition.ResourceGroupName, definition.PrometheusMetricDefinition.Name, definition.BuildPropertiesForBatch().BuildBatchHashKey()); } await ScheduleLimitedConcurrencyAsyncTask(tasks, () => ScrapeMetricBatched(batchScrapeDefinition), cancellationToken); } diff --git a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs index 1b4c09fb0..eb5691a35 100644 --- a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs +++ b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs @@ -127,7 +127,7 @@ protected override async Task> BatchScrapeResourceAsync(strin { Logger.LogWarning("No metric information found for metric {MetricName} with dimensions {MetricDimensions}. Details: {Details}", metricsNotFoundException.Name, metricsNotFoundException.Dimensions, metricsNotFoundException.Details); - var measuredMetric = dimensionNames.Any() + var measuredMetric = dimensionNames.Count > 0 ? MeasuredMetric.CreateForDimensions(dimensionNames) : MeasuredMetric.CreateWithoutDimensions(null); resourceIdTaggedMeasuredMetrics.Add(measuredMetric.WithResourceIdAssociation(null)); diff --git a/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs b/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs index d1d7d6813..2e5649bed 100644 --- a/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs +++ b/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs @@ -17,7 +17,7 @@ public static class AzureResourceDefinitionBatching /// public static List> GroupScrapeDefinitions(IEnumerable> allScrapeDefinitions, int maxBatchSize, CancellationToken cancellationToken) { - return allScrapeDefinitions.GroupBy(def => def.buildPropertiesForBatch()) + return allScrapeDefinitions.GroupBy(def => def.BuildPropertiesForBatch()) .ToDictionary(group => group.Key, group => group.ToList()) // first pass to build batches that could exceed max .ToDictionary(group => group.Key, group => SplitScrapeDefinitionBatch(group.Value, maxBatchSize, cancellationToken)) // split to right-sized batches .SelectMany(group => group.Value.Select(batch => new BatchScrapeDefinition(group.Key, batch))) @@ -29,7 +29,7 @@ public static List> GroupScrapeD /// private static List>> SplitScrapeDefinitionBatch(List> batchToSplit, int maxBatchSize, CancellationToken cancellationToken) { - int numNewGroups = (batchToSplit.Count - 1) / maxBatchSize + 1; + int numNewGroups = ((batchToSplit.Count - 1) / maxBatchSize) + 1; return Enumerable.Range(0, numNewGroups) .Select(i => batchToSplit.Skip(i * maxBatchSize).Take(maxBatchSize).ToList()) diff --git a/src/Promitor.Core.Scraping/Configuration/Model/AzureMetricConfiguration.cs b/src/Promitor.Core.Scraping/Configuration/Model/AzureMetricConfiguration.cs index e7e20439d..3e9e1514b 100644 --- a/src/Promitor.Core.Scraping/Configuration/Model/AzureMetricConfiguration.cs +++ b/src/Promitor.Core.Scraping/Configuration/Model/AzureMetricConfiguration.cs @@ -54,14 +54,14 @@ public string ToUniqueStringRepresentation() sb.Append(MetricName); if (Dimension != null) { - sb.Append("_"); + sb.Append('_'); sb.Append(Dimension.Name); } else if (Dimensions != null) { foreach (var dimension in Dimensions) { - sb.Append("_"); + sb.Append('_'); sb.Append(dimension.Name); } } diff --git a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs index b49e6a34e..bdf5d7a4c 100644 --- a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs +++ b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs @@ -11,20 +11,15 @@ namespace Promitor.Core.Scraping.Configuration.Model.Metrics /// 1. The same resource type /// 2. The same Azure metric scrape target with identical dimensions /// 3. The same time granularity + /// 4. The same filters /// public class BatchScrapeDefinition where TResourceDefinition : class, IAzureResourceDefinition { /// - /// Creates a new instance of the class. + /// Creates a new instance of the class. /// - /// Configuration about the Azure Monitor metric to scrape - /// Configuration about the Azure Monitor metric to scrape - /// The scraping model. - /// Specify a subscription to scrape that defers from the default subscription. - /// - /// The name of the resource group containing the resource to scrape. This should contain the global - /// resource group name if none is overridden at the resource level. - /// + /// Shared Properties Among ScrapeDefinition's in the batch + /// Scape definitions in the batch public BatchScrapeDefinition(ScrapeDefinitionBatchProperties scrapeDefinitionBatchProperties, List> groupedScrapeDefinitions) { Guard.NotNull(groupedScrapeDefinitions, nameof(scrapeDefinitionBatchProperties)); diff --git a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinition.cs b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinition.cs index 375ef43ac..7c285fa28 100644 --- a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinition.cs +++ b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinition.cs @@ -95,7 +95,7 @@ public ScrapeDefinition( return AzureMetricConfiguration?.Aggregation?.Interval; } - public ScrapeDefinitionBatchProperties buildPropertiesForBatch() { + public ScrapeDefinitionBatchProperties BuildPropertiesForBatch() { return new ScrapeDefinitionBatchProperties( this.AzureMetricConfiguration, this.LogAnalyticsConfiguration, diff --git a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs index 7ba5df390..c98669582 100644 --- a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs +++ b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs @@ -11,14 +11,11 @@ namespace Promitor.Core.Scraping.Configuration.Model.Metrics public class ScrapeDefinitionBatchProperties : IEquatable { /// Configuration about the Azure Monitor metric to scrape + /// Configuration about the LogAnalytics resource to scrape /// The details of the prometheus metric that will be created. /// The scraping model. - /// The resource to scrape. + /// Resource type of the batch /// Specify a subscription to scrape that defers from the default subscription. - /// - /// The name of the resource group containing the resource to scrape. This should contain the global - /// resource group name if none is overridden at the resource level. - /// public ScrapeDefinitionBatchProperties( AzureMetricConfiguration azureMetricConfiguration, LogAnalyticsConfiguration logAnalyticsConfiguration, @@ -61,9 +58,7 @@ public ScrapeDefinitionBatchProperties( public Scraping Scraping { get; } /// - /// The Azure subscription to get the metric from. This should be used instead of using - /// the SubscriptionId from because this property will contain - /// the global subscription id if none is overridden at the resource level. + /// The Azure subscription to get the metric from. /// public string SubscriptionId { get; } @@ -106,6 +101,5 @@ public bool Equals(ScrapeDefinitionBatchProperties obj) ScrapeDefinitionBatchProperties other = obj; return ResourceType == other.ResourceType && AzureMetricConfiguration.ToUniqueStringRepresentation() == other.AzureMetricConfiguration.ToUniqueStringRepresentation() && SubscriptionId == other.SubscriptionId && GetAggregationInterval().Equals(other.GetAggregationInterval()); } - } } \ No newline at end of file diff --git a/src/Promitor.Integrations.AzureMonitor/Extensions/MetricResultExtension.cs b/src/Promitor.Integrations.AzureMonitor/Extensions/MetricResultExtension.cs index 10b2ca7ef..cc74151f7 100644 --- a/src/Promitor.Integrations.AzureMonitor/Extensions/MetricResultExtension.cs +++ b/src/Promitor.Integrations.AzureMonitor/Extensions/MetricResultExtension.cs @@ -7,19 +7,8 @@ namespace Promitor.Integrations.AzureMonitor.Extensions public static class MetricResultExtension { // hacky to to get resource ID since it's not available directly through the SDK model - static string resourceIdPattern = @"^([\/]?subscriptions\/[^\/]+\/resourceGroups\/[^\/]+\/providers\/[^\/]+\/[^\/]+\/[^\/]+)"; - static Regex resourceIdRegex = new Regex(resourceIdPattern, RegexOptions.Compiled); - public static string ParseResourceIdFromResultId(this MetricResult metricResult) { - // Match match = resourceIdRegex.Match(metricResult.Id); - // if (!match.Success || string.IsNullOrEmpty(match.Groups[1].Value)) - // { - // throw new InvalidOperationException($"The expected resource ID pattern was not found in the input string {metricResult.Id}"); - // } - - // string resourceId = match.Groups[1].Value; - // return resourceId; return ExtractResourceId(metricResult.Id); } @@ -32,12 +21,11 @@ private static string ExtractResourceId(string fullId) // If the second "/providers/" is found, slice the string up to that point if (secondIndex != -1) { - return fullId.Substring(0, secondIndex); + return fullId[..secondIndex]; } // If not found, return the full string return fullId; } - } } \ No newline at end of file From a853c2687dba8c25e24e94d4b0ff47c4432b4594 Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 25 Sep 2024 16:35:47 -0700 Subject: [PATCH 091/131] fix style --- src/Promitor.Core/Extensions/MeasureMetricExtensions.cs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/src/Promitor.Core/Extensions/MeasureMetricExtensions.cs b/src/Promitor.Core/Extensions/MeasureMetricExtensions.cs index 8edfab2a5..34baeff60 100644 --- a/src/Promitor.Core/Extensions/MeasureMetricExtensions.cs +++ b/src/Promitor.Core/Extensions/MeasureMetricExtensions.cs @@ -4,11 +4,8 @@ namespace Promitor.Core.Extensions { public static class MeasuredMetricExtensions { - /// - /// Get Azure environment information under legacy SDK model - /// - /// Microsoft Azure cloud - /// Azure environment information for specified cloud + /// Resource ID to associate the metric with + /// Instance of MeasuredMetric subclass with resourceId attached public static ResourceAssociatedMeasuredMetric WithResourceIdAssociation(this MeasuredMetric measuredMetric, string resourceId) { return measuredMetric.IsDimensional From 430c72539d16fff4bea7e9f25f8d3d48b96094cd Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 25 Sep 2024 17:21:49 -0700 Subject: [PATCH 092/131] Address comments --- .../Scheduling/ResourcesScrapingJob.cs | 2 +- .../Batching/AzureResourceDefinitionBatching.cs | 2 +- .../Configuration/Model/Metrics/BatchScrapeDefinition.cs | 3 ++- .../Configuration/Model/Metrics/ScrapeDefinition.cs | 2 +- .../Configuration/AzureMonitorMetricBatchScrapeConfig.cs | 2 +- .../Extensions/MetricResultExtension.cs | 6 +++--- .../ModifyOutgoingAzureMonitorRequestsPolicy.cs | 3 +++ 7 files changed, 12 insertions(+), 8 deletions(-) diff --git a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs index 46d56bbbd..73cb7c01d 100644 --- a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs +++ b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs @@ -266,7 +266,7 @@ private async Task ScrapeMetrics(IEnumerable definition in batchScrapeDefinition.ScrapeDefinitions) { - Logger.LogInformation("ResourceID: {ResoureceID}, ResourceGroup: {ResourceGroup}, Prometheus metric name: {MetricName}, Batch Key: {BatchKey}", definition.Resource.ResourceName, definition.ResourceGroupName, definition.PrometheusMetricDefinition.Name, definition.BuildPropertiesForBatch().BuildBatchHashKey()); + Logger.LogInformation("ResourceID: {ResoureceID}, ResourceGroup: {ResourceGroup}, Prometheus metric name: {MetricName}, Batch Key: {BatchKey}", definition.Resource.ResourceName, definition.ResourceGroupName, definition.PrometheusMetricDefinition.Name, definition.BuildScrapingBatchInfo().BuildBatchHashKey()); } await ScheduleLimitedConcurrencyAsyncTask(tasks, () => ScrapeMetricBatched(batchScrapeDefinition), cancellationToken); } diff --git a/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs b/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs index 2e5649bed..a920c85e8 100644 --- a/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs +++ b/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs @@ -17,7 +17,7 @@ public static class AzureResourceDefinitionBatching /// public static List> GroupScrapeDefinitions(IEnumerable> allScrapeDefinitions, int maxBatchSize, CancellationToken cancellationToken) { - return allScrapeDefinitions.GroupBy(def => def.BuildPropertiesForBatch()) + return allScrapeDefinitions.GroupBy(def => def.BuildScrapingBatchInfo()) .ToDictionary(group => group.Key, group => group.ToList()) // first pass to build batches that could exceed max .ToDictionary(group => group.Key, group => SplitScrapeDefinitionBatch(group.Value, maxBatchSize, cancellationToken)) // split to right-sized batches .SelectMany(group => group.Value.Select(batch => new BatchScrapeDefinition(group.Key, batch))) diff --git a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs index bdf5d7a4c..97f96563f 100644 --- a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs +++ b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs @@ -22,8 +22,9 @@ public class BatchScrapeDefinition where TResourceDefinitio /// Scape definitions in the batch public BatchScrapeDefinition(ScrapeDefinitionBatchProperties scrapeDefinitionBatchProperties, List> groupedScrapeDefinitions) { - Guard.NotNull(groupedScrapeDefinitions, nameof(scrapeDefinitionBatchProperties)); Guard.NotNull(groupedScrapeDefinitions, nameof(groupedScrapeDefinitions)); + Guard.NotLessThan(groupedScrapeDefinitions.Count, 1, nameof(groupedScrapeDefinitions)); + Guard.NotNull(scrapeDefinitionBatchProperties, nameof(scrapeDefinitionBatchProperties)); ScrapeDefinitionBatchProperties = scrapeDefinitionBatchProperties; ScrapeDefinitions = groupedScrapeDefinitions; diff --git a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinition.cs b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinition.cs index 7c285fa28..71acf10ba 100644 --- a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinition.cs +++ b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinition.cs @@ -95,7 +95,7 @@ public ScrapeDefinition( return AzureMetricConfiguration?.Aggregation?.Interval; } - public ScrapeDefinitionBatchProperties BuildPropertiesForBatch() { + public ScrapeDefinitionBatchProperties BuildScrapingBatchInfo() { return new ScrapeDefinitionBatchProperties( this.AzureMetricConfiguration, this.LogAnalyticsConfiguration, diff --git a/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs b/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs index b451c1b7b..aa8f2a8cd 100644 --- a/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs +++ b/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs @@ -4,7 +4,7 @@ namespace Promitor.Integrations.AzureMonitor.Configuration { public class AzureMonitorMetricBatchScrapeConfig { - public bool Enabled { get; set; } + public bool Enabled { get; set; } = false; public int MaxBatchSize { get; set; } public string AzureRegion { get; set; } = "eastus"; // Batch scrape endpoints are deployed by region } diff --git a/src/Promitor.Integrations.AzureMonitor/Extensions/MetricResultExtension.cs b/src/Promitor.Integrations.AzureMonitor/Extensions/MetricResultExtension.cs index cc74151f7..b51ea03a4 100644 --- a/src/Promitor.Integrations.AzureMonitor/Extensions/MetricResultExtension.cs +++ b/src/Promitor.Integrations.AzureMonitor/Extensions/MetricResultExtension.cs @@ -1,12 +1,12 @@ -using System; -using System.Text.RegularExpressions; using Azure.Monitor.Query.Models; namespace Promitor.Integrations.AzureMonitor.Extensions { public static class MetricResultExtension { - // hacky to to get resource ID since it's not available directly through the SDK model + /// + /// hacky to to get resource ID since it's not available directly through the SDK model, because the MetricResult model does not have the ResourceID attribute that comes with Response JSON + /// public static string ParseResourceIdFromResultId(this MetricResult metricResult) { return ExtractResourceId(metricResult.Id); diff --git a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs index f11b0f115..1da811323 100644 --- a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs +++ b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs @@ -9,6 +9,9 @@ using Microsoft.Extensions.Logging; namespace Promitor.Integrations.AzureMonitor.HttpPipelinePolicies{ + /// + /// Work around to make sure range queries work properly. + /// public class ModifyOutgoingAzureMonitorRequestsPolicy : HttpPipelinePolicy { private readonly ILogger _logger; From 11d765c3afe5767f6963a9dcf05e4974d3756fe3 Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 25 Sep 2024 17:48:22 -0700 Subject: [PATCH 093/131] more style fixes --- .../Scheduling/ResourcesScrapingJob.cs | 2 +- .../AzureResourceDefinitionBatching.cs | 6 +- .../Model/Metrics/BatchScrapeDefinition.cs | 3 +- .../Model/Metrics/ScrapeDefinitionBatch.cs | 105 ------------------ .../AzureMonitorQueryClient.cs | 2 +- .../Extensions/MetricResultExtension.cs | 4 +- ...odifyOutgoingAzureMonitorRequestsPolicy.cs | 4 +- .../Collectors/PrometheusSystemMetricsSink.cs | 1 - .../AzureResourceDefinitionBatchingTests.cs | 12 +- 9 files changed, 16 insertions(+), 123 deletions(-) delete mode 100644 src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs diff --git a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs index 73cb7c01d..1e5a7d834 100644 --- a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs +++ b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs @@ -257,7 +257,7 @@ private async Task ScrapeMetrics(IEnumerable - public static List> GroupScrapeDefinitions(IEnumerable> allScrapeDefinitions, int maxBatchSize, CancellationToken cancellationToken) + public static List> GroupScrapeDefinitions(IEnumerable> allScrapeDefinitions, int maxBatchSize) { return allScrapeDefinitions.GroupBy(def => def.BuildScrapingBatchInfo()) .ToDictionary(group => group.Key, group => group.ToList()) // first pass to build batches that could exceed max - .ToDictionary(group => group.Key, group => SplitScrapeDefinitionBatch(group.Value, maxBatchSize, cancellationToken)) // split to right-sized batches + .ToDictionary(group => group.Key, group => SplitScrapeDefinitionBatch(group.Value, maxBatchSize)) // split to right-sized batches .SelectMany(group => group.Value.Select(batch => new BatchScrapeDefinition(group.Key, batch))) .ToList(); // flatten } @@ -27,7 +27,7 @@ public static List> GroupScrapeD /// /// splits the "raw" batch according to max batch size configured /// - private static List>> SplitScrapeDefinitionBatch(List> batchToSplit, int maxBatchSize, CancellationToken cancellationToken) + private static List>> SplitScrapeDefinitionBatch(List> batchToSplit, int maxBatchSize) { int numNewGroups = ((batchToSplit.Count - 1) / maxBatchSize) + 1; diff --git a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs index 97f96563f..d5d9052f8 100644 --- a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs +++ b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/BatchScrapeDefinition.cs @@ -1,4 +1,3 @@ -using System; using System.Collections.Generic; using GuardNet; using Promitor.Core.Contracts; @@ -33,7 +32,7 @@ public BatchScrapeDefinition(ScrapeDefinitionBatchProperties scrapeDefinitionBat /// /// A batch of scrape job definitions to be executed as a single request /// - public List> ScrapeDefinitions { get; set; } = new List>(); + public List> ScrapeDefinitions { get; set; } public ScrapeDefinitionBatchProperties ScrapeDefinitionBatchProperties { get; set; } } diff --git a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs deleted file mode 100644 index c98669582..000000000 --- a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatch.cs +++ /dev/null @@ -1,105 +0,0 @@ -using System; -using System.Collections.Generic; -using GuardNet; -using Promitor.Core.Contracts; - -namespace Promitor.Core.Scraping.Configuration.Model.Metrics -{ - /// - /// Defines properties of a batch of scrape definitions - /// - public class ScrapeDefinitionBatchProperties : IEquatable - { - /// Configuration about the Azure Monitor metric to scrape - /// Configuration about the LogAnalytics resource to scrape - /// The details of the prometheus metric that will be created. - /// The scraping model. - /// Resource type of the batch - /// Specify a subscription to scrape that defers from the default subscription. - public ScrapeDefinitionBatchProperties( - AzureMetricConfiguration azureMetricConfiguration, - LogAnalyticsConfiguration logAnalyticsConfiguration, - PrometheusMetricDefinition prometheusMetricDefinition, - ResourceType resourceType, - Scraping scraping, - string subscriptionId) - { - Guard.NotNull(azureMetricConfiguration, nameof(azureMetricConfiguration)); - Guard.NotNull(prometheusMetricDefinition, nameof(prometheusMetricDefinition)); - Guard.NotNull(scraping, nameof(scraping)); - Guard.NotNull(subscriptionId, nameof(subscriptionId)); - - AzureMetricConfiguration = azureMetricConfiguration; - LogAnalyticsConfiguration = logAnalyticsConfiguration; - PrometheusMetricDefinition = prometheusMetricDefinition; - Scraping = scraping; - SubscriptionId = subscriptionId; - ResourceType = resourceType; - } - - /// - /// Configuration about the Azure Monitor metric to scrape - /// - public AzureMetricConfiguration AzureMetricConfiguration { get; } - - /// - /// Configuration about the Azure Monitor log analytics resource to scrape - /// - public LogAnalyticsConfiguration LogAnalyticsConfiguration { get; } - - /// - /// The details of the prometheus metric that will be created. - /// - public PrometheusMetricDefinition PrometheusMetricDefinition { get; } - - /// - /// The scraping model. - /// - public Scraping Scraping { get; } - - /// - /// The Azure subscription to get the metric from. - /// - public string SubscriptionId { get; } - - /// - /// The Azure resource type shared by all scrape definitions in the batch - /// - public ResourceType ResourceType { get; } - - public TimeSpan? GetAggregationInterval() - { - if (ResourceType == ResourceType.LogAnalytics) - { - return LogAnalyticsConfiguration?.Aggregation?.Interval; - } - return AzureMetricConfiguration?.Aggregation?.Interval; - } - - public override int GetHashCode() - { - return this.BuildBatchHashKey().GetHashCode(); - } - - /// - /// Builds a namespaced string key to satisfy batch restrictions, in the format of - /// ___ - /// - public string BuildBatchHashKey() - { - return string.Join("_", new List {AzureMetricConfiguration.ToUniqueStringRepresentation(), SubscriptionId, ResourceType.ToString(), GetAggregationInterval().ToString()}); - } - - /// - /// Equality comparison override in case of hash collision - /// - public bool Equals(ScrapeDefinitionBatchProperties obj) - { - if (obj == null || !(obj is ScrapeDefinitionBatchProperties)) - return false; - - ScrapeDefinitionBatchProperties other = obj; - return ResourceType == other.ResourceType && AzureMetricConfiguration.ToUniqueStringRepresentation() == other.AzureMetricConfiguration.ToUniqueStringRepresentation() && SubscriptionId == other.SubscriptionId && GetAggregationInterval().Equals(other.GetAggregationInterval()); - } - } -} \ No newline at end of file diff --git a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs index b343696a4..1f8f34ea5 100644 --- a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs +++ b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs @@ -302,7 +302,7 @@ private MetricsClient CreateAzureMonitorMetricsBatchClient(AzureCloud azureCloud public static string InsertRegionIntoUrl(string region, string baseUrl) { // Find the position where ".metrics" starts in the URL - int metricsIndex = baseUrl.IndexOf("metrics"); + int metricsIndex = baseUrl.IndexOf("metrics", System.StringComparison.Ordinal); // Split the base URL into two parts: before and after the ".metrics" string beforeMetrics = baseUrl.Substring(0, metricsIndex); diff --git a/src/Promitor.Integrations.AzureMonitor/Extensions/MetricResultExtension.cs b/src/Promitor.Integrations.AzureMonitor/Extensions/MetricResultExtension.cs index b51ea03a4..6db3e1c37 100644 --- a/src/Promitor.Integrations.AzureMonitor/Extensions/MetricResultExtension.cs +++ b/src/Promitor.Integrations.AzureMonitor/Extensions/MetricResultExtension.cs @@ -15,8 +15,8 @@ public static string ParseResourceIdFromResultId(this MetricResult metricResult) private static string ExtractResourceId(string fullId) { // Find the index of the second occurrence of "/providers/" - int firstIndex = fullId.IndexOf("/providers/"); - int secondIndex = fullId.IndexOf("/providers/", firstIndex + 1); + int firstIndex = fullId.IndexOf("/providers/", System.StringComparison.Ordinal); + int secondIndex = fullId.IndexOf("/providers/", firstIndex + 1, System.StringComparison.Ordinal); // If the second "/providers/" is found, slice the string up to that point if (secondIndex != -1) diff --git a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs index 1da811323..9db3814c2 100644 --- a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs +++ b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs @@ -10,7 +10,7 @@ namespace Promitor.Integrations.AzureMonitor.HttpPipelinePolicies{ /// - /// Work around to make sure range queries work properly. + /// Work around to make sure range queries work properly. /// public class ModifyOutgoingAzureMonitorRequestsPolicy : HttpPipelinePolicy { @@ -50,7 +50,7 @@ private void ModifyDateTimeParam(List paramNames, HttpMessage message) // Update the message with the modified URI } } - message.Request.Uri.Query = query.ToString(); + message.Request.Uri.Query = query.ToString(); } } } diff --git a/src/Promitor.Integrations.Sinks.Prometheus/Collectors/PrometheusSystemMetricsSink.cs b/src/Promitor.Integrations.Sinks.Prometheus/Collectors/PrometheusSystemMetricsSink.cs index 636a2e0f5..a3c260cb2 100644 --- a/src/Promitor.Integrations.Sinks.Prometheus/Collectors/PrometheusSystemMetricsSink.cs +++ b/src/Promitor.Integrations.Sinks.Prometheus/Collectors/PrometheusSystemMetricsSink.cs @@ -49,7 +49,6 @@ public Task WriteHistogramMeasurementAsync(string name, string description, doub { var orderedLabels = labels.OrderByDescending(kvp => kvp.Key).ToDictionary(kvp => kvp.Key, kvp => kvp.Value); - // TODO: are histogram instruments created on every invocation? Would that interfere with correctness? var histogram = _metricFactory.CreateHistogram(name, help: description, includeTimestamp: includeTimestamp, labelNames: orderedLabels.Keys.ToArray(), buckets: [1, 2, 4, 8, 16, 32, 64]); histogram.WithLabels(orderedLabels.Values.ToArray()).Observe(value); return Task.CompletedTask; diff --git a/src/Promitor.Tests.Unit/Core/Scraping/Batching/AzureResourceDefinitionBatchingTests.cs b/src/Promitor.Tests.Unit/Core/Scraping/Batching/AzureResourceDefinitionBatchingTests.cs index 4090b4b47..68a674481 100644 --- a/src/Promitor.Tests.Unit/Core/Scraping/Batching/AzureResourceDefinitionBatchingTests.cs +++ b/src/Promitor.Tests.Unit/Core/Scraping/Batching/AzureResourceDefinitionBatchingTests.cs @@ -61,7 +61,7 @@ public void IdenticalBatchPropertiesShouldBatchTogether() azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping, resourceType: ResourceType.StorageAccount, subscriptionId: subscriptionId, resourceGroupName: resourceGroupName, 10 ); - var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions(scrapeDefinitions, maxBatchSize: batchSize, CancellationToken.None); + var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions(scrapeDefinitions, maxBatchSize: batchSize); // expect one batch of 10 Assert.Single(groupedScrapeDefinitions); Assert.Equal(10, groupedScrapeDefinitions[0].ScrapeDefinitions.Count); @@ -79,7 +79,7 @@ public void BatchShouldSplitAccordingToConfiguredBatchSize() azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping, resourceType: ResourceType.StorageAccount, subscriptionId: subscriptionId, resourceGroupName: resourceGroupName, 25 ); - var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions(scrapeDefinitions, maxBatchSize: testBatchSize, CancellationToken.None); + var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions(scrapeDefinitions, maxBatchSize: testBatchSize); // expect three batches adding up to total size Assert.Equal(3, groupedScrapeDefinitions.Count); Assert.Equal(25, CountTotalScrapeDefinitions(groupedScrapeDefinitions)); @@ -99,7 +99,7 @@ public void DifferentBatchPropertiesShouldBatchSeparately() azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping, resourceType: ResourceType.BlobStorage, subscriptionId: subscriptionId, resourceGroupName: resourceGroupName, 10 ); - var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions, .. differentScrapeDefinitions], maxBatchSize: batchSize, CancellationToken.None); + var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions, .. differentScrapeDefinitions], maxBatchSize: batchSize); // expect two batch of 10 each Assert.Equal(2, groupedScrapeDefinitions.Count); Assert.Equal(10, groupedScrapeDefinitions[0].ScrapeDefinitions.Count); @@ -123,7 +123,7 @@ public void DifferentAggregationIntervalsShouldBatchSeparately() azureMetricConfiguration: azureMetricConfiguration2MInterval, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping, resourceType: ResourceType.BlobStorage, subscriptionId: subscriptionId, resourceGroupName: resourceGroupName, 10 ); - var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions5m, .. differentScrapeDefinitions2m], maxBatchSize: batchSize, CancellationToken.None); + var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions5m, .. differentScrapeDefinitions2m], maxBatchSize: batchSize); // expect two batch of 10 each Assert.Equal(2, groupedScrapeDefinitions.Count); Assert.Equal(10, groupedScrapeDefinitions[0].ScrapeDefinitions.Count); @@ -145,7 +145,7 @@ public void MixedBatchShouldSplitAccordingToConfiguredBatchSize() azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping, resourceType: ResourceType.BlobStorage, subscriptionId: subscriptionId, resourceGroupName: resourceGroupName, 120 ); - var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions, .. differentScrapeDefinitions], maxBatchSize: batchSize, CancellationToken.None); + var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions, .. differentScrapeDefinitions], maxBatchSize: batchSize); // expect two batch of 10 each Assert.Equal(6, groupedScrapeDefinitions.Count); Assert.Equal(250, CountTotalScrapeDefinitions(groupedScrapeDefinitions)); @@ -165,7 +165,7 @@ public void BatchConstructionShouldBeAgnosticToResourceGroup() azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping, resourceType: ResourceType.StorageAccount, subscriptionId: subscriptionId, resourceGroupName: "group2", 10 ); - var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions, .. differentScrapeDefinitions], maxBatchSize: batchSize, CancellationToken.None); + var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions, .. differentScrapeDefinitions], maxBatchSize: batchSize); // expect two batch of 10 each Assert.Single(groupedScrapeDefinitions); Assert.Equal(20, groupedScrapeDefinitions[0].ScrapeDefinitions.Count); From 5d3868473f23c113834c416b4d5b68fbee04842f Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 25 Sep 2024 17:54:24 -0700 Subject: [PATCH 094/131] consolidate some logging --- .../Scheduling/ResourcesScrapingJob.cs | 7 +- .../ScrapeDefinitionBatchProperties.cs | 105 ++++++++++++++++++ 2 files changed, 109 insertions(+), 3 deletions(-) create mode 100644 src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatchProperties.cs diff --git a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs index 1e5a7d834..f01c2b2cf 100644 --- a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs +++ b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs @@ -254,9 +254,10 @@ private async Task ScrapeMetrics(IEnumerable(); var batchScrapingEnabled = this._azureMonitorIntegrationConfiguration.Value.MetricsBatching?.Enabled ?? false; - Logger.LogInformation("Parsed batch config: {Enabled}, {BatchSize}", this._azureMonitorIntegrationConfiguration.Value.MetricsBatching.Enabled, this._azureMonitorIntegrationConfiguration.Value.MetricsBatching.MaxBatchSize); - Logger.LogInformation("Parsed SDK runtime config {Enabled}", this._azureMonitorIntegrationConfiguration.Value.UseAzureMonitorSdk); if (batchScrapingEnabled) { + Logger.LogInformation("Promitor Scraper with operate in batch scraping mode, with max batch size {BatchSize}", this._azureMonitorIntegrationConfiguration.Value.MetricsBatching.MaxBatchSize); + Logger.LogWarning(""); + var batchScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions(scrapeDefinitions, this._azureMonitorIntegrationConfiguration.Value.MetricsBatching.MaxBatchSize); foreach(var batchScrapeDefinition in batchScrapeDefinitions) { @@ -266,7 +267,7 @@ private async Task ScrapeMetrics(IEnumerable definition in batchScrapeDefinition.ScrapeDefinitions) { - Logger.LogInformation("ResourceID: {ResoureceID}, ResourceGroup: {ResourceGroup}, Prometheus metric name: {MetricName}, Batch Key: {BatchKey}", definition.Resource.ResourceName, definition.ResourceGroupName, definition.PrometheusMetricDefinition.Name, definition.BuildScrapingBatchInfo().BuildBatchHashKey()); + Logger.LogInformation("ResourceID: {ResourceID}, ResourceGroup: {ResourceGroup}, Prometheus metric name: {MetricName}, Batch Key: {BatchKey}", definition.Resource.ResourceName, definition.ResourceGroupName, definition.PrometheusMetricDefinition.Name, definition.BuildScrapingBatchInfo().BuildBatchHashKey()); } await ScheduleLimitedConcurrencyAsyncTask(tasks, () => ScrapeMetricBatched(batchScrapeDefinition), cancellationToken); } diff --git a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatchProperties.cs b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatchProperties.cs new file mode 100644 index 000000000..b3f8868bf --- /dev/null +++ b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatchProperties.cs @@ -0,0 +1,105 @@ +using System; +using System.Collections.Generic; +using GuardNet; +using Promitor.Core.Contracts; + +namespace Promitor.Core.Scraping.Configuration.Model.Metrics +{ + /// + /// Defines properties of a batch of scrape definitions + /// + public class ScrapeDefinitionBatchProperties : IEquatable + { + /// Configuration about the Azure Monitor metric to scrape + /// Configuration about the LogAnalytics resource to scrape + /// The details of the prometheus metric that will be created. + /// The scraping model. + /// Resource type of the batch + /// Specify a subscription to scrape that defers from the default subscription. + public ScrapeDefinitionBatchProperties( + AzureMetricConfiguration azureMetricConfiguration, + LogAnalyticsConfiguration logAnalyticsConfiguration, + PrometheusMetricDefinition prometheusMetricDefinition, + ResourceType resourceType, + Scraping scraping, + string subscriptionId) + { + Guard.NotNull(azureMetricConfiguration, nameof(azureMetricConfiguration)); + Guard.NotNull(prometheusMetricDefinition, nameof(prometheusMetricDefinition)); + Guard.NotNull(scraping, nameof(scraping)); + Guard.NotNull(subscriptionId, nameof(subscriptionId)); + + AzureMetricConfiguration = azureMetricConfiguration; + LogAnalyticsConfiguration = logAnalyticsConfiguration; + PrometheusMetricDefinition = prometheusMetricDefinition; + Scraping = scraping; + SubscriptionId = subscriptionId; + ResourceType = resourceType; + } + + /// + /// Configuration about the Azure Monitor metric to scrape + /// + public AzureMetricConfiguration AzureMetricConfiguration { get; } + + /// + /// Configuration about the Azure Monitor log analytics resource to scrape + /// + public LogAnalyticsConfiguration LogAnalyticsConfiguration { get; } + + /// + /// The details of the prometheus metric that will be created. + /// + public PrometheusMetricDefinition PrometheusMetricDefinition { get; } + + /// + /// The scraping model. + /// + public Scraping Scraping { get; } + + /// + /// The Azure subscription to get the metric from. + /// + public string SubscriptionId { get; } + + /// + /// The Azure resource type shared by all scrape definitions in the batch + /// + public ResourceType ResourceType { get; } + + public TimeSpan? GetAggregationInterval() + { + if (ResourceType == ResourceType.LogAnalytics) + { + return LogAnalyticsConfiguration?.Aggregation?.Interval; + } + return AzureMetricConfiguration?.Aggregation?.Interval; + } + + public override int GetHashCode() + { + return this.BuildBatchHashKey().GetHashCode(); + } + + /// + /// Builds a namespaced string key to satisfy batch restrictions, in the format of + /// (AzureMetricAndDimensionsAndFilter)_(SubscriptionId)_(ResourceType)_(AggregationInterval>) + /// + public string BuildBatchHashKey() + { + return string.Join("_", new List {AzureMetricConfiguration.ToUniqueStringRepresentation(), SubscriptionId, ResourceType.ToString(), GetAggregationInterval().ToString()}); + } + + /// + /// Equality comparison override in case of hash collision + /// + public bool Equals(ScrapeDefinitionBatchProperties obj) + { + if (obj is null || !(obj is ScrapeDefinitionBatchProperties)) + return false; + + ScrapeDefinitionBatchProperties other = obj; + return ResourceType == other.ResourceType && AzureMetricConfiguration.ToUniqueStringRepresentation() == other.AzureMetricConfiguration.ToUniqueStringRepresentation() && SubscriptionId == other.SubscriptionId && GetAggregationInterval().Equals(other.GetAggregationInterval()); + } + } +} \ No newline at end of file From b3eb049f1f4f58592aa6f15e514c0aa01fe04315 Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 25 Sep 2024 18:25:01 -0700 Subject: [PATCH 095/131] more style fixes :( --- .../Model/Metrics/ScrapeDefinitionBatchProperties.cs | 2 +- .../Serialization/v1/Model/MetricsDeclarationV1.cs | 1 - src/Promitor.Core.Scraping/LogAnalyticsScraper.cs | 3 --- src/Promitor.Core/Extensions/MeasureMetricExtensions.cs | 1 + .../AzureMonitorQueryClient.cs | 3 +-- .../Configuration/AzureMonitorMetricBatchScrapeConfig.cs | 4 +--- .../ModifyOutgoingAzureMonitorRequestsPolicy.cs | 1 - .../Core/Metrics/ScrapeDefinitionBatchPropertiesTest.cs | 8 ++++---- .../Batching/AzureResourceDefinitionBatchingTests.cs | 3 +-- 9 files changed, 9 insertions(+), 17 deletions(-) diff --git a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatchProperties.cs b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatchProperties.cs index b3f8868bf..8755e71cd 100644 --- a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatchProperties.cs +++ b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatchProperties.cs @@ -95,7 +95,7 @@ public string BuildBatchHashKey() /// public bool Equals(ScrapeDefinitionBatchProperties obj) { - if (obj is null || !(obj is ScrapeDefinitionBatchProperties)) + if (!(obj is ScrapeDefinitionBatchProperties)) return false; ScrapeDefinitionBatchProperties other = obj; diff --git a/src/Promitor.Core.Scraping/Configuration/Serialization/v1/Model/MetricsDeclarationV1.cs b/src/Promitor.Core.Scraping/Configuration/Serialization/v1/Model/MetricsDeclarationV1.cs index 8f7dae4dd..4c4e80082 100644 --- a/src/Promitor.Core.Scraping/Configuration/Serialization/v1/Model/MetricsDeclarationV1.cs +++ b/src/Promitor.Core.Scraping/Configuration/Serialization/v1/Model/MetricsDeclarationV1.cs @@ -1,5 +1,4 @@ using System.Collections.Generic; -using Promitor.Core.Scraping.Configuration.Model; using Promitor.Core.Serialization.Enum; namespace Promitor.Core.Scraping.Configuration.Serialization.v1.Model diff --git a/src/Promitor.Core.Scraping/LogAnalyticsScraper.cs b/src/Promitor.Core.Scraping/LogAnalyticsScraper.cs index b0f117b5a..3f32d30a4 100644 --- a/src/Promitor.Core.Scraping/LogAnalyticsScraper.cs +++ b/src/Promitor.Core.Scraping/LogAnalyticsScraper.cs @@ -1,10 +1,8 @@ using System; using System.Collections.Generic; using System.Linq; -using System.Net; using System.Threading.Tasks; using GuardNet; -using Microsoft.VisualBasic; using Promitor.Core.Contracts; using Promitor.Core.Contracts.ResourceTypes; using Promitor.Core.Metrics; @@ -55,7 +53,6 @@ private Dictionary DetermineMetricLabels(LogAnalyticsResourceDef protected override async Task> BatchScrapeResourceAsync(string subscriptionId, BatchScrapeDefinition batchScrapeDefinition, PromitorMetricAggregationType aggregationType, TimeSpan aggregationInterval) { - // TODO: these just dispatch and await on tasks that do single-resource scraping. Implement integration with Log Analytics batch endpoint var logScrapingTasks = batchScrapeDefinition.ScrapeDefinitions.Select(definition => ScrapeResourceAsync(subscriptionId, definition, (LogAnalyticsResourceDefinition) definition.Resource, aggregationType, aggregationInterval)).ToList(); var resultsList = await Task.WhenAll(logScrapingTasks); diff --git a/src/Promitor.Core/Extensions/MeasureMetricExtensions.cs b/src/Promitor.Core/Extensions/MeasureMetricExtensions.cs index 34baeff60..a886ab27e 100644 --- a/src/Promitor.Core/Extensions/MeasureMetricExtensions.cs +++ b/src/Promitor.Core/Extensions/MeasureMetricExtensions.cs @@ -4,6 +4,7 @@ namespace Promitor.Core.Extensions { public static class MeasuredMetricExtensions { + /// A time series value /// Resource ID to associate the metric with /// Instance of MeasuredMetric subclass with resourceId attached public static ResourceAssociatedMeasuredMetric WithResourceIdAssociation(this MeasuredMetric measuredMetric, string resourceId) diff --git a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs index 1f8f34ea5..f39ef53a7 100644 --- a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs +++ b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs @@ -23,7 +23,6 @@ using Azure.Core.Diagnostics; using System.Diagnostics.Tracing; using Promitor.Integrations.AzureMonitor.Extensions; -using System.Globalization; namespace Promitor.Integrations.AzureMonitor { @@ -302,7 +301,7 @@ private MetricsClient CreateAzureMonitorMetricsBatchClient(AzureCloud azureCloud public static string InsertRegionIntoUrl(string region, string baseUrl) { // Find the position where ".metrics" starts in the URL - int metricsIndex = baseUrl.IndexOf("metrics", System.StringComparison.Ordinal); + int metricsIndex = baseUrl.IndexOf("metrics", StringComparison.Ordinal); // Split the base URL into two parts: before and after the ".metrics" string beforeMetrics = baseUrl.Substring(0, metricsIndex); diff --git a/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs b/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs index aa8f2a8cd..817a4ef0b 100644 --- a/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs +++ b/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs @@ -1,11 +1,9 @@ -using Azure.Core; - namespace Promitor.Integrations.AzureMonitor.Configuration { public class AzureMonitorMetricBatchScrapeConfig { public bool Enabled { get; set; } = false; public int MaxBatchSize { get; set; } - public string AzureRegion { get; set; } = "eastus"; // Batch scrape endpoints are deployed by region + public string AzureRegion { get; set; } // Batch scrape endpoints are deployed by region } } \ No newline at end of file diff --git a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs index 9db3814c2..3acbc1981 100644 --- a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs +++ b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs @@ -1,7 +1,6 @@ using System; using System.Collections.Generic; using System.Globalization; -using System.Text; using System.Threading.Tasks; using Azure.Core; using Azure.Core.Pipeline; diff --git a/src/Promitor.Tests.Unit/Core/Metrics/ScrapeDefinitionBatchPropertiesTest.cs b/src/Promitor.Tests.Unit/Core/Metrics/ScrapeDefinitionBatchPropertiesTest.cs index d9f061fce..c95cc4479 100644 --- a/src/Promitor.Tests.Unit/Core/Metrics/ScrapeDefinitionBatchPropertiesTest.cs +++ b/src/Promitor.Tests.Unit/Core/Metrics/ScrapeDefinitionBatchPropertiesTest.cs @@ -169,7 +169,7 @@ public void BuildBatchHashKeyDifferentResultDifferentSchedule() [Fact] public void BuildBatchHashKeyTest() { - AzureMetricConfigurationV1 _azureMetricConfigurationTest1 = new AzureMetricConfigurationV1 + AzureMetricConfigurationV1 azureMetricConfigurationTest1 = new AzureMetricConfigurationV1 { MetricName = "availabilityResults/availabilityPercentage", Aggregation = new MetricAggregationV1 @@ -177,7 +177,7 @@ public void BuildBatchHashKeyTest() Type = PromitorMetricAggregationType.Average }, }; - AzureMetricConfigurationV1 _azureMetricConfigurationTest2 = new AzureMetricConfigurationV1 + AzureMetricConfigurationV1 azureMetricConfigurationTest2 = new AzureMetricConfigurationV1 { MetricName = "availabilityResults/availabilityPercentage", Dimensions = [new MetricDimensionV1{Name = "availabilityResult/name"}], @@ -186,8 +186,8 @@ public void BuildBatchHashKeyTest() Type = PromitorMetricAggregationType.Average }, }; - var azureMetricConfiguration1 = _mapper.Map(_azureMetricConfigurationTest1); - var azureMetricConfiguration2 = _mapper.Map(_azureMetricConfigurationTest2); + var azureMetricConfiguration1 = _mapper.Map(azureMetricConfigurationTest1); + var azureMetricConfiguration2 = _mapper.Map(azureMetricConfigurationTest2); var scraping1 = _mapper.Map(scrapingBase); var scraping2 = _mapper.Map(scrapingBase); diff --git a/src/Promitor.Tests.Unit/Core/Scraping/Batching/AzureResourceDefinitionBatchingTests.cs b/src/Promitor.Tests.Unit/Core/Scraping/Batching/AzureResourceDefinitionBatchingTests.cs index 68a674481..fa5409718 100644 --- a/src/Promitor.Tests.Unit/Core/Scraping/Batching/AzureResourceDefinitionBatchingTests.cs +++ b/src/Promitor.Tests.Unit/Core/Scraping/Batching/AzureResourceDefinitionBatchingTests.cs @@ -1,7 +1,6 @@ using System; using System.Collections.Generic; using System.ComponentModel; -using System.Threading; using AutoMapper; using Promitor.Core.Contracts; using Promitor.Core.Metrics; @@ -12,7 +11,7 @@ using Promitor.Core.Scraping.Configuration.Serialization.v1.Model; using Xunit; -namespace Promitor.Tests.Unit.Core.Metrics +namespace Promitor.Tests.Unit.Core.Scraping.Batching { [Category("Unit")] public class AzureResourceDefinitionBatchingTests From 9073887468df399382f694f00f3dc68f87a8aaa1 Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 25 Sep 2024 18:45:25 -0700 Subject: [PATCH 096/131] add null check --- .../Batching/AzureResourceDefinitionBatching.cs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs b/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs index d4baf5efc..b08639b58 100644 --- a/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs +++ b/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs @@ -1,6 +1,5 @@ using System.Collections.Generic; using System.Linq; -using System.Threading; using Promitor.Core.Contracts; using Promitor.Core.Scraping.Configuration.Model.Metrics; @@ -24,7 +23,7 @@ public static List> GroupScrapeD .ToList(); // flatten } - /// + /// /// splits the "raw" batch according to max batch size configured /// private static List>> SplitScrapeDefinitionBatch(List> batchToSplit, int maxBatchSize) From e1c96b1a7ae2afe901ea0c2d9d605828f435df2a Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 25 Sep 2024 19:12:57 -0700 Subject: [PATCH 097/131] avoid naming collision --- .../AzureMonitorScraper.cs | 18 ++++--- .../AzureResourceDefinitionBatchingTests.cs | 52 +++++++++---------- 2 files changed, 36 insertions(+), 34 deletions(-) diff --git a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs index eb5691a35..53c6c632e 100644 --- a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs +++ b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs @@ -106,7 +106,7 @@ protected override async Task> BatchScrapeResourceAsync(strin resourceName: scrapeDefinition.Resource.ResourceName ); Logger.LogWarning("Caching resource group {Group}, resource name {ResourceName}, subscription ID {SubscriptionID}, for {ResourceId}, of resource type {ResourceType}", resourceDefinitionToCache.ResourceGroupName, resourceDefinitionToCache.ResourceName, resourceDefinitionToCache.SubscriptionId, resourceUri, resourceDefinitionToCache.ResourceType); - _resourceDefinitions.AddOrUpdate(resourceUri, new Tuple(resourceDefinitionToCache, (TResourceDefinition)scrapeDefinition.Resource), (uri, oldTuple) => oldTuple); + _resourceDefinitions.AddOrUpdate(resourceUri, new Tuple(resourceDefinitionToCache, (TResourceDefinition)scrapeDefinition.Resource), (newTuple, oldTuple) => oldTuple); } var metricLimit = batchScrapeDefinition.ScrapeDefinitionBatchProperties.AzureMetricConfiguration.Limit; @@ -139,13 +139,15 @@ protected override async Task> BatchScrapeResourceAsync(strin foreach (IGrouping resourceMetricsGroup in groupedMeasuredMetrics) { var resourceId = resourceMetricsGroup.Key; - _resourceDefinitions.TryGetValue(resourceId, out Tuple resourceDefinitionTuple); - var resourceDefinition = resourceDefinitionTuple.Item1; - var metricLabels = DetermineMetricLabels(resourceDefinitionTuple.Item2); - var finalMetricValues = EnrichMeasuredMetrics(resourceDefinitionTuple.Item2, dimensionNames, resourceMetricsGroup.ToImmutableList()); - Logger.LogWarning("Processing {MetricsCount} measured metrics for resourceID {ResourceId} of resource group {ResourceGroup}", finalMetricValues.Count, resourceId, resourceDefinition.ResourceGroupName); - scrapeResults.Add(new ScrapeResult(subscriptionId, resourceDefinition.ResourceGroupName, resourceDefinition.ResourceName, resourceId, finalMetricValues, metricLabels)); - Logger.LogWarning("Processed {MetricsCount} measured metrics for Metric {MetricName} and resource {ResourceName}", finalMetricValues.Count, metricName, resourceId); + if (_resourceDefinitions.TryGetValue(resourceId, out Tuple resourceDefinitionTuple)) + { + var resourceDefinition = resourceDefinitionTuple.Item1; + var metricLabels = DetermineMetricLabels(resourceDefinitionTuple.Item2); + var finalMetricValues = EnrichMeasuredMetrics(resourceDefinitionTuple.Item2, dimensionNames, resourceMetricsGroup.ToImmutableList()); + Logger.LogWarning("Processing {MetricsCount} measured metrics for resourceID {ResourceId} of resource group {ResourceGroup}", finalMetricValues.Count, resourceId, resourceDefinition.ResourceGroupName); + scrapeResults.Add(new ScrapeResult(subscriptionId, resourceDefinition.ResourceGroupName, resourceDefinition.ResourceName, resourceId, finalMetricValues, metricLabels)); + Logger.LogWarning("Processed {MetricsCount} measured metrics for Metric {MetricName} and resource {ResourceName}", finalMetricValues.Count, metricName, resourceId); + } } return scrapeResults; diff --git a/src/Promitor.Tests.Unit/Core/Scraping/Batching/AzureResourceDefinitionBatchingTests.cs b/src/Promitor.Tests.Unit/Core/Scraping/Batching/AzureResourceDefinitionBatchingTests.cs index fa5409718..a23989993 100644 --- a/src/Promitor.Tests.Unit/Core/Scraping/Batching/AzureResourceDefinitionBatchingTests.cs +++ b/src/Promitor.Tests.Unit/Core/Scraping/Batching/AzureResourceDefinitionBatchingTests.cs @@ -18,9 +18,9 @@ public class AzureResourceDefinitionBatchingTests { private readonly IMapper _mapper; // to model instantiation happen private readonly static string azureMetricNameBase = "promitor_batch_test_metric"; - private readonly static PrometheusMetricDefinition prometheusMetricDefinition = + private readonly static PrometheusMetricDefinition prometheusMetricDefinitionTest = new("promitor_batch_test", "test", new Dictionary()); - private readonly static string subscriptionId = "subscription"; + private readonly static string subscriptionIdTest = "subscription"; private readonly static AzureMetricConfigurationV1 azureMetricConfigurationBase = new AzureMetricConfigurationV1 { MetricName = azureMetricNameBase, @@ -41,7 +41,7 @@ public class AzureResourceDefinitionBatchingTests { Schedule = "5 4 3 2 1" }; - private readonly static string resourceGroupName = "batch_test_group"; + private readonly static string resourceGroupNameTest = "batch_test_group"; private readonly static int batchSize = 50; public AzureResourceDefinitionBatchingTests() @@ -57,8 +57,8 @@ public void IdenticalBatchPropertiesShouldBatchTogether() var scraping = _mapper.Map(scrapingBase); var logAnalyticsConfiguration = _mapper.Map(logAnalyticsConfigurationBase); var scrapeDefinitions = BuildScrapeDefinitionBatch( - azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping, - resourceType: ResourceType.StorageAccount, subscriptionId: subscriptionId, resourceGroupName: resourceGroupName, 10 + azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinitionTest, scraping: scraping, + resourceType: ResourceType.StorageAccount, subscriptionId: subscriptionIdTest, resourceGroupName: resourceGroupNameTest, 10 ); var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions(scrapeDefinitions, maxBatchSize: batchSize); // expect one batch of 10 @@ -75,8 +75,8 @@ public void BatchShouldSplitAccordingToConfiguredBatchSize() var scraping = _mapper.Map(scrapingBase); var scrapeDefinitions = BuildScrapeDefinitionBatch( - azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping, - resourceType: ResourceType.StorageAccount, subscriptionId: subscriptionId, resourceGroupName: resourceGroupName, 25 + azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinitionTest, scraping: scraping, + resourceType: ResourceType.StorageAccount, subscriptionId: subscriptionIdTest, resourceGroupName: resourceGroupNameTest, 25 ); var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions(scrapeDefinitions, maxBatchSize: testBatchSize); // expect three batches adding up to total size @@ -91,12 +91,12 @@ public void DifferentBatchPropertiesShouldBatchSeparately() var scraping = _mapper.Map(scrapingBase); var logAnalyticsConfiguration = _mapper.Map(logAnalyticsConfigurationBase); var scrapeDefinitions = BuildScrapeDefinitionBatch( - azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping, - resourceType: ResourceType.StorageAccount, subscriptionId: subscriptionId, resourceGroupName: resourceGroupName, 10 + azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinitionTest, scraping: scraping, + resourceType: ResourceType.StorageAccount, subscriptionId: subscriptionIdTest, resourceGroupName: resourceGroupNameTest, 10 ); var differentScrapeDefinitions = BuildScrapeDefinitionBatch( - azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping, - resourceType: ResourceType.BlobStorage, subscriptionId: subscriptionId, resourceGroupName: resourceGroupName, 10 + azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinitionTest, scraping: scraping, + resourceType: ResourceType.BlobStorage, subscriptionId: subscriptionIdTest, resourceGroupName: resourceGroupNameTest, 10 ); var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions, .. differentScrapeDefinitions], maxBatchSize: batchSize); // expect two batch of 10 each @@ -114,15 +114,15 @@ public void DifferentAggregationIntervalsShouldBatchSeparately() azureMetricConfiguration5MInterval.Aggregation.Interval = TimeSpan.FromMinutes(2); var logAnalyticsConfiguration = _mapper.Map(logAnalyticsConfigurationBase); var scraping = _mapper.Map(scrapingBase); - var scrapeDefinitions5m = BuildScrapeDefinitionBatch( - azureMetricConfiguration: azureMetricConfiguration5MInterval, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping, - resourceType: ResourceType.StorageAccount, subscriptionId: subscriptionId, resourceGroupName: resourceGroupName, 10 + var scrapeDefinitions5M = BuildScrapeDefinitionBatch( + azureMetricConfiguration: azureMetricConfiguration5MInterval, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinitionTest, scraping: scraping, + resourceType: ResourceType.StorageAccount, subscriptionId: subscriptionIdTest, resourceGroupName: resourceGroupNameTest, 10 ); - var differentScrapeDefinitions2m = BuildScrapeDefinitionBatch( - azureMetricConfiguration: azureMetricConfiguration2MInterval, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping, - resourceType: ResourceType.BlobStorage, subscriptionId: subscriptionId, resourceGroupName: resourceGroupName, 10 + var differentScrapeDefinitions2M = BuildScrapeDefinitionBatch( + azureMetricConfiguration: azureMetricConfiguration2MInterval, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinitionTest, scraping: scraping, + resourceType: ResourceType.BlobStorage, subscriptionId: subscriptionIdTest, resourceGroupName: resourceGroupNameTest, 10 ); - var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions5m, .. differentScrapeDefinitions2m], maxBatchSize: batchSize); + var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions5M, .. differentScrapeDefinitions2M], maxBatchSize: batchSize); // expect two batch of 10 each Assert.Equal(2, groupedScrapeDefinitions.Count); Assert.Equal(10, groupedScrapeDefinitions[0].ScrapeDefinitions.Count); @@ -137,12 +137,12 @@ public void MixedBatchShouldSplitAccordingToConfiguredBatchSize() var scraping = _mapper.Map(scrapingBase); var logAnalyticsConfiguration = new LogAnalyticsConfiguration(); var scrapeDefinitions = BuildScrapeDefinitionBatch( - azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping, - resourceType: ResourceType.StorageAccount, subscriptionId: subscriptionId, resourceGroupName: resourceGroupName, 130 + azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinitionTest, scraping: scraping, + resourceType: ResourceType.StorageAccount, subscriptionId: subscriptionIdTest, resourceGroupName: resourceGroupNameTest, 130 ); var differentScrapeDefinitions = BuildScrapeDefinitionBatch( - azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping, - resourceType: ResourceType.BlobStorage, subscriptionId: subscriptionId, resourceGroupName: resourceGroupName, 120 + azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinitionTest, scraping: scraping, + resourceType: ResourceType.BlobStorage, subscriptionId: subscriptionIdTest, resourceGroupName: resourceGroupNameTest, 120 ); var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions, .. differentScrapeDefinitions], maxBatchSize: batchSize); // expect two batch of 10 each @@ -157,12 +157,12 @@ public void BatchConstructionShouldBeAgnosticToResourceGroup() var scraping = _mapper.Map(scrapingBase); var logAnalyticsConfiguration = new LogAnalyticsConfiguration(); var scrapeDefinitions = BuildScrapeDefinitionBatch( - azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping, - resourceType: ResourceType.StorageAccount, subscriptionId: subscriptionId, resourceGroupName: resourceGroupName, 10 + azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinitionTest, scraping: scraping, + resourceType: ResourceType.StorageAccount, subscriptionId: subscriptionIdTest, resourceGroupName: resourceGroupNameTest, 10 ); var differentScrapeDefinitions = BuildScrapeDefinitionBatch( - azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinition, scraping: scraping, - resourceType: ResourceType.StorageAccount, subscriptionId: subscriptionId, resourceGroupName: "group2", 10 + azureMetricConfiguration: azureMetricConfiguration, logAnalyticsConfiguration: logAnalyticsConfiguration, prometheusMetricDefinition: prometheusMetricDefinitionTest, scraping: scraping, + resourceType: ResourceType.StorageAccount, subscriptionId: subscriptionIdTest, resourceGroupName: "group2", 10 ); var groupedScrapeDefinitions = AzureResourceDefinitionBatching.GroupScrapeDefinitions([.. scrapeDefinitions, .. differentScrapeDefinitions], maxBatchSize: batchSize); // expect two batch of 10 each From 172cd3331af8ebca221c18f63ab574a0ca27ea5a Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 25 Sep 2024 20:16:04 -0700 Subject: [PATCH 098/131] fix fix style --- .../Configuration/AzureMonitorMetricBatchScrapeConfig.cs | 6 +++--- .../ModifyOutgoingAzureMonitorRequestsPolicy.cs | 8 +++++--- .../Collectors/PrometheusSystemMetricsSink.cs | 2 ++ 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs b/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs index 817a4ef0b..e85095b58 100644 --- a/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs +++ b/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs @@ -2,8 +2,8 @@ namespace Promitor.Integrations.AzureMonitor.Configuration { public class AzureMonitorMetricBatchScrapeConfig { - public bool Enabled { get; set; } = false; - public int MaxBatchSize { get; set; } - public string AzureRegion { get; set; } // Batch scrape endpoints are deployed by region + public bool Enabled { get; } = false; + public int MaxBatchSize { get; } + public string AzureRegion { get; } // Batch scrape endpoints are deployed by region } } \ No newline at end of file diff --git a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs index 3acbc1981..7f56d9a26 100644 --- a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs +++ b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs @@ -9,7 +9,7 @@ namespace Promitor.Integrations.AzureMonitor.HttpPipelinePolicies{ /// - /// Work around to make sure range queries work properly. + /// Work around to make sure range queries work properly. /// public class ModifyOutgoingAzureMonitorRequestsPolicy : HttpPipelinePolicy { @@ -23,7 +23,6 @@ public ModifyOutgoingAzureMonitorRequestsPolicy(ILogger logger) public override async ValueTask ProcessAsync(HttpMessage message, ReadOnlyMemory pipeline) { ModifyDateTimeParam(["starttime", "endtime"], message); - _logger.LogWarning("Modified URI: {uri}", message.Request.Uri.ToString()); await ProcessNextAsync(message, pipeline); } @@ -49,7 +48,10 @@ private void ModifyDateTimeParam(List paramNames, HttpMessage message) // Update the message with the modified URI } } - message.Request.Uri.Query = query.ToString(); + if (message?.Request?.Uri != null && query != null) + { + message.Request.Uri.Query = query.ToString(); + } } } } diff --git a/src/Promitor.Integrations.Sinks.Prometheus/Collectors/PrometheusSystemMetricsSink.cs b/src/Promitor.Integrations.Sinks.Prometheus/Collectors/PrometheusSystemMetricsSink.cs index a3c260cb2..18d589ec9 100644 --- a/src/Promitor.Integrations.Sinks.Prometheus/Collectors/PrometheusSystemMetricsSink.cs +++ b/src/Promitor.Integrations.Sinks.Prometheus/Collectors/PrometheusSystemMetricsSink.cs @@ -28,6 +28,7 @@ public PrometheusSystemMetricsSink(IMetricFactory metricFactory) /// Indication whether or not a timestamp should be reported public Task WriteGaugeMeasurementAsync(string name, string description, double value, Dictionary labels, bool includeTimestamp) { + Guard.NotNull(labels, nameof(labels)); // Order labels alphabetically var orderedLabels = labels.OrderByDescending(kvp => kvp.Key).ToDictionary(kvp => kvp.Key, kvp => kvp.Value); @@ -47,6 +48,7 @@ public Task WriteGaugeMeasurementAsync(string name, string description, double v /// Indication whether or not a timestamp should be reported public Task WriteHistogramMeasurementAsync(string name, string description, double value, Dictionary labels, bool includeTimestamp) { + Guard.NotNull(labels, nameof(labels)); var orderedLabels = labels.OrderByDescending(kvp => kvp.Key).ToDictionary(kvp => kvp.Key, kvp => kvp.Value); var histogram = _metricFactory.CreateHistogram(name, help: description, includeTimestamp: includeTimestamp, labelNames: orderedLabels.Keys.ToArray(), buckets: [1, 2, 4, 8, 16, 32, 64]); From 299ec6ec9c7e834d577222e3943ec21079360a76 Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 25 Sep 2024 20:20:44 -0700 Subject: [PATCH 099/131] make config settable --- .../Configuration/AzureMonitorMetricBatchScrapeConfig.cs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs b/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs index e85095b58..817a4ef0b 100644 --- a/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs +++ b/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs @@ -2,8 +2,8 @@ namespace Promitor.Integrations.AzureMonitor.Configuration { public class AzureMonitorMetricBatchScrapeConfig { - public bool Enabled { get; } = false; - public int MaxBatchSize { get; } - public string AzureRegion { get; } // Batch scrape endpoints are deployed by region + public bool Enabled { get; set; } = false; + public int MaxBatchSize { get; set; } + public string AzureRegion { get; set; } // Batch scrape endpoints are deployed by region } } \ No newline at end of file From b2d3458efcfa37d3a8fbf82256eb6852c1aaffcb Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 25 Sep 2024 20:31:59 -0700 Subject: [PATCH 100/131] get rid of some redundant code --- .../Model/Metrics/ScrapeDefinitionBatchProperties.cs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatchProperties.cs b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatchProperties.cs index 8755e71cd..05c31f2eb 100644 --- a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatchProperties.cs +++ b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatchProperties.cs @@ -93,12 +93,8 @@ public string BuildBatchHashKey() /// /// Equality comparison override in case of hash collision /// - public bool Equals(ScrapeDefinitionBatchProperties obj) + public bool Equals(ScrapeDefinitionBatchProperties other) { - if (!(obj is ScrapeDefinitionBatchProperties)) - return false; - - ScrapeDefinitionBatchProperties other = obj; return ResourceType == other.ResourceType && AzureMetricConfiguration.ToUniqueStringRepresentation() == other.AzureMetricConfiguration.ToUniqueStringRepresentation() && SubscriptionId == other.SubscriptionId && GetAggregationInterval().Equals(other.GetAggregationInterval()); } } From 47672d6c06ff5c8aca273c22921c04c56b28470e Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 25 Sep 2024 22:59:09 -0700 Subject: [PATCH 101/131] Fighting resharper --- .../AzureMonitorMetricBatchScrapeConfig.cs | 5 ++++- .../ModifyOutgoingAzureMonitorRequestsPolicy.cs | 14 +++++++------- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs b/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs index 817a4ef0b..4cf9f39a3 100644 --- a/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs +++ b/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs @@ -1,8 +1,11 @@ +using System.Diagnostics.CodeAnalysis; + namespace Promitor.Integrations.AzureMonitor.Configuration { public class AzureMonitorMetricBatchScrapeConfig { - public bool Enabled { get; set; } = false; + [SuppressMessage("Style", "IDE0044:Add readonly modifier", Justification = "Explicitly init the false value for better readability")] + public bool Enabled { get; set; } = false; public int MaxBatchSize { get; set; } public string AzureRegion { get; set; } // Batch scrape endpoints are deployed by region } diff --git a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs index 7f56d9a26..027bab749 100644 --- a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs +++ b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs @@ -36,21 +36,21 @@ private void ModifyDateTimeParam(List paramNames, HttpMessage message) // Modify the request URL by updating or adding a query parameter var uriBuilder = new UriBuilder(message.Request.Uri.ToString()); var query = System.Web.HttpUtility.ParseQueryString(uriBuilder.Query); + bool queryModified = false; + foreach (var param in paramNames) { - _logger.LogWarning("Original URI param {param} is {value}", param, query[param]); - if (DateTimeOffset.TryParseExact(query[param], "MM/dd/yyyy HH:mm:ss zzz", CultureInfo.InvariantCulture, DateTimeStyles.None, out DateTimeOffset dateTime)) { // Transform to ISO 8601 format (e.g., "2024-09-09T20:46:14") query[param] = dateTime.ToString("yyyy-MM-ddTHH:mm:ss.fffffffZ"); - _logger.LogWarning("Modified URI param {param} to be {value}", param, query[param]); - // Update the message with the modified URI + queryModified = true; } } - if (message?.Request?.Uri != null && query != null) - { - message.Request.Uri.Query = query.ToString(); + if (queryModified) { + message.Request.Uri.Query = uriBuilder.Query; + } else { + _logger.LogWarning("Failed to modify parameters {Parms}", string.Join("and ", paramNames)); } } } From 06e404dc9fa620f6eccf2d6c5b6640d6ef6adc28 Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 25 Sep 2024 23:05:50 -0700 Subject: [PATCH 102/131] Fighting resharper --- .../Configuration/AzureMonitorMetricBatchScrapeConfig.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs b/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs index 4cf9f39a3..ed5317353 100644 --- a/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs +++ b/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs @@ -4,9 +4,9 @@ namespace Promitor.Integrations.AzureMonitor.Configuration { public class AzureMonitorMetricBatchScrapeConfig { - [SuppressMessage("Style", "IDE0044:Add readonly modifier", Justification = "Explicitly init the false value for better readability")] + [System.Diagnostics.CodeAnalysis.SuppressMessage("CodeQuality", "IDE0051:Remove unused private members", Justification = "Explicit initialization to false for better readability")] public bool Enabled { get; set; } = false; public int MaxBatchSize { get; set; } public string AzureRegion { get; set; } // Batch scrape endpoints are deployed by region } -} \ No newline at end of file +} From af638b0745163cbb9ca0570661867c3cde22daf0 Mon Sep 17 00:00:00 2001 From: xchen Date: Wed, 25 Sep 2024 23:16:51 -0700 Subject: [PATCH 103/131] add null check --- .../Model/Metrics/ScrapeDefinitionBatchProperties.cs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatchProperties.cs b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatchProperties.cs index 05c31f2eb..d990d7f93 100644 --- a/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatchProperties.cs +++ b/src/Promitor.Core.Scraping/Configuration/Model/Metrics/ScrapeDefinitionBatchProperties.cs @@ -95,6 +95,10 @@ public string BuildBatchHashKey() /// public bool Equals(ScrapeDefinitionBatchProperties other) { + if (other is null) { + return false; + } + return ResourceType == other.ResourceType && AzureMetricConfiguration.ToUniqueStringRepresentation() == other.AzureMetricConfiguration.ToUniqueStringRepresentation() && SubscriptionId == other.SubscriptionId && GetAggregationInterval().Equals(other.GetAggregationInterval()); } } From 457330098113d15ee1812bb452db10fc4a161ee8 Mon Sep 17 00:00:00 2001 From: xchen Date: Thu, 26 Sep 2024 00:11:55 -0700 Subject: [PATCH 104/131] add null check --- .../Configuration/AzureMonitorMetricBatchScrapeConfig.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs b/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs index ed5317353..bf7f36bba 100644 --- a/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs +++ b/src/Promitor.Integrations.AzureMonitor/Configuration/AzureMonitorMetricBatchScrapeConfig.cs @@ -4,7 +4,7 @@ namespace Promitor.Integrations.AzureMonitor.Configuration { public class AzureMonitorMetricBatchScrapeConfig { - [System.Diagnostics.CodeAnalysis.SuppressMessage("CodeQuality", "IDE0051:Remove unused private members", Justification = "Explicit initialization to false for better readability")] + [SuppressMessage("ReSharper", "RedundantDefaultMemberInitializer", Justification = "Explicit initialization to false for better readability")] public bool Enabled { get; set; } = false; public int MaxBatchSize { get; set; } public string AzureRegion { get; set; } // Batch scrape endpoints are deployed by region From 9f258041e1d98a403c9668221aa7a50a69685b18 Mon Sep 17 00:00:00 2001 From: xchen Date: Thu, 26 Sep 2024 00:28:56 -0700 Subject: [PATCH 105/131] add simple unit test --- .../Collectors/PrometheusSystemMetricsSink.cs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Promitor.Integrations.Sinks.Prometheus/Collectors/PrometheusSystemMetricsSink.cs b/src/Promitor.Integrations.Sinks.Prometheus/Collectors/PrometheusSystemMetricsSink.cs index 18d589ec9..239e3330f 100644 --- a/src/Promitor.Integrations.Sinks.Prometheus/Collectors/PrometheusSystemMetricsSink.cs +++ b/src/Promitor.Integrations.Sinks.Prometheus/Collectors/PrometheusSystemMetricsSink.cs @@ -29,6 +29,7 @@ public PrometheusSystemMetricsSink(IMetricFactory metricFactory) public Task WriteGaugeMeasurementAsync(string name, string description, double value, Dictionary labels, bool includeTimestamp) { Guard.NotNull(labels, nameof(labels)); + // Order labels alphabetically var orderedLabels = labels.OrderByDescending(kvp => kvp.Key).ToDictionary(kvp => kvp.Key, kvp => kvp.Value); From 943e4372722510af0397cd534bc024489e5c16cc Mon Sep 17 00:00:00 2001 From: xchen Date: Thu, 26 Sep 2024 00:33:35 -0700 Subject: [PATCH 106/131] Revert CI workflow changes --- .github/workflows/templates-build-push-image.yml | 9 +++------ config/opentelemetry-collector/collector-config.yaml | 3 +-- config/promitor/scraper/ci-runtime.yaml | 5 ----- 3 files changed, 4 insertions(+), 13 deletions(-) diff --git a/.github/workflows/templates-build-push-image.yml b/.github/workflows/templates-build-push-image.yml index 16f5d8440..fc7a2afc6 100644 --- a/.github/workflows/templates-build-push-image.yml +++ b/.github/workflows/templates-build-push-image.yml @@ -1,6 +1,5 @@ -name: Build and Push (Linux) on: - workflow_dispatch: + workflow_call: inputs: image_name: required: true @@ -16,8 +15,6 @@ jobs: linux: name: Build & Push (Linux) runs-on: ubuntu-latest - permissions: - packages: write steps: - name: Checkout Code uses: actions/checkout@v4 @@ -43,7 +40,7 @@ jobs: uses: docker/login-action@v3 with: registry: ghcr.io - username: hkfgo + username: tomkerkhove password: ${{ secrets.GITHUB_TOKEN }} - name: Build and push preview image @@ -52,5 +49,5 @@ jobs: build-args: VERSION="${{ env.artifact_full_version }}" context: ./src/ file: ./src/${{ inputs.project_name }}/Dockerfile.linux - tags: ghcr.io/hkfgo/${{ env.image_commit_uri }},ghcr.io/hkfgo/${{ env.image_latest_uri }} + tags: ${{ env.image_commit_uri }},${{ env.image_latest_uri }} push: true \ No newline at end of file diff --git a/config/opentelemetry-collector/collector-config.yaml b/config/opentelemetry-collector/collector-config.yaml index 3ce975037..5efc22f2a 100644 --- a/config/opentelemetry-collector/collector-config.yaml +++ b/config/opentelemetry-collector/collector-config.yaml @@ -2,8 +2,7 @@ receivers: otlp: protocols: grpc: - tls: - insecure: true + exporters: prometheus: endpoint: "0.0.0.0:8889" diff --git a/config/promitor/scraper/ci-runtime.yaml b/config/promitor/scraper/ci-runtime.yaml index c51ecf74b..b8eefbcd5 100644 --- a/config/promitor/scraper/ci-runtime.yaml +++ b/config/promitor/scraper/ci-runtime.yaml @@ -15,11 +15,6 @@ telemetry: verbosity: trace defaultVerbosity: trace azureMonitor: - integration: - metricsBatching: - enabled: true - maxBatchSize: 2 - azureRegion: westeurope logging: isEnabled: false resourceDiscovery: From 88f43cc8febceaed7f4a95abd3b76fb7fc502a3a Mon Sep 17 00:00:00 2001 From: xchen Date: Thu, 26 Sep 2024 00:37:22 -0700 Subject: [PATCH 107/131] get rid of excessive logging --- .../Scheduling/ResourcesScrapingJob.cs | 7 +----- .../AzureMonitorScraper.cs | 8 ------- .../MeasureMetricExtensionsTests.cs | 22 +++++++++++++++++++ 3 files changed, 23 insertions(+), 14 deletions(-) create mode 100644 src/Promitor.Tests.Unit/Core/Extensions/MeasureMetricExtensionsTests.cs diff --git a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs index f01c2b2cf..59c3e7abc 100644 --- a/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs +++ b/src/Promitor.Agents.Scraper/Scheduling/ResourcesScrapingJob.cs @@ -256,7 +256,7 @@ private async Task ScrapeMetrics(IEnumerable definition in batchScrapeDefinition.ScrapeDefinitions) - { - Logger.LogInformation("ResourceID: {ResourceID}, ResourceGroup: {ResourceGroup}, Prometheus metric name: {MetricName}, Batch Key: {BatchKey}", definition.Resource.ResourceName, definition.ResourceGroupName, definition.PrometheusMetricDefinition.Name, definition.BuildScrapingBatchInfo().BuildBatchHashKey()); - } await ScheduleLimitedConcurrencyAsyncTask(tasks, () => ScrapeMetricBatched(batchScrapeDefinition), cancellationToken); } } else { diff --git a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs index 53c6c632e..c9d2ce9d2 100644 --- a/src/Promitor.Core.Scraping/AzureMonitorScraper.cs +++ b/src/Promitor.Core.Scraping/AzureMonitorScraper.cs @@ -105,7 +105,6 @@ protected override async Task> BatchScrapeResourceAsync(strin subscriptionId: scrapeDefinition.SubscriptionId, resourceName: scrapeDefinition.Resource.ResourceName ); - Logger.LogWarning("Caching resource group {Group}, resource name {ResourceName}, subscription ID {SubscriptionID}, for {ResourceId}, of resource type {ResourceType}", resourceDefinitionToCache.ResourceGroupName, resourceDefinitionToCache.ResourceName, resourceDefinitionToCache.SubscriptionId, resourceUri, resourceDefinitionToCache.ResourceType); _resourceDefinitions.AddOrUpdate(resourceUri, new Tuple(resourceDefinitionToCache, (TResourceDefinition)scrapeDefinition.Resource), (newTuple, oldTuple) => oldTuple); } @@ -116,12 +115,7 @@ protected override async Task> BatchScrapeResourceAsync(strin try { // Query Azure Monitor for metrics - Logger.LogWarning("Querying Azure Monitor for metric {MetricName} with batch size {BatchSize}", metricName, resourceUriList.Count); resourceIdTaggedMeasuredMetrics = await AzureMonitorClient.BatchQueryMetricAsync(metricName, dimensionNames, aggregationType, aggregationInterval, resourceUriList, null, metricLimit); - foreach (var resourceMetric in resourceIdTaggedMeasuredMetrics) - { - Logger.LogWarning("Discovered value {Value} for metric {Metric} and resource ID {ResourceID}", resourceMetric.Value, metricName, resourceMetric.ResourceId); - } } catch (MetricInformationNotFoundException metricsNotFoundException) { @@ -144,9 +138,7 @@ protected override async Task> BatchScrapeResourceAsync(strin var resourceDefinition = resourceDefinitionTuple.Item1; var metricLabels = DetermineMetricLabels(resourceDefinitionTuple.Item2); var finalMetricValues = EnrichMeasuredMetrics(resourceDefinitionTuple.Item2, dimensionNames, resourceMetricsGroup.ToImmutableList()); - Logger.LogWarning("Processing {MetricsCount} measured metrics for resourceID {ResourceId} of resource group {ResourceGroup}", finalMetricValues.Count, resourceId, resourceDefinition.ResourceGroupName); scrapeResults.Add(new ScrapeResult(subscriptionId, resourceDefinition.ResourceGroupName, resourceDefinition.ResourceName, resourceId, finalMetricValues, metricLabels)); - Logger.LogWarning("Processed {MetricsCount} measured metrics for Metric {MetricName} and resource {ResourceName}", finalMetricValues.Count, metricName, resourceId); } } diff --git a/src/Promitor.Tests.Unit/Core/Extensions/MeasureMetricExtensionsTests.cs b/src/Promitor.Tests.Unit/Core/Extensions/MeasureMetricExtensionsTests.cs new file mode 100644 index 000000000..604d0fd1e --- /dev/null +++ b/src/Promitor.Tests.Unit/Core/Extensions/MeasureMetricExtensionsTests.cs @@ -0,0 +1,22 @@ +using System.Collections.Generic; +using System.ComponentModel; +using Azure.Monitor.Query.Models; +using Promitor.Core.Extensions; +using Promitor.Core.Metrics; +using Xunit; + +namespace Promitor.Tests.Unit.Core.Extensions +{ + [Category("Unit")] + public class MeasureMetricExtensionsTests + { + [Fact] + public void AssociateWithResourceId() + { + var measuredMetricUnassociated = MeasuredMetric.CreateWithoutDimensions(1); + var resourceId = "/subscriptions/abc/providers/def/test"; + var measuredMetricAssociated = measuredMetricUnassociated.WithResourceIdAssociation(resourceId); + Assert.Equal(resourceId, measuredMetricAssociated.ResourceId); + } + } +} \ No newline at end of file From c790b5414ec470d70f3187cc091dea0b9b37e4c0 Mon Sep 17 00:00:00 2001 From: xchen Date: Thu, 26 Sep 2024 00:46:42 -0700 Subject: [PATCH 108/131] add a link to docs --- .../Batching/AzureResourceDefinitionBatching.cs | 1 + .../Core/Extensions/MeasureMetricExtensionsTests.cs | 2 -- .../Generators/Config/RuntimeConfigurationGenerator.cs | 5 ----- 3 files changed, 1 insertion(+), 7 deletions(-) diff --git a/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs b/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs index b08639b58..6cbf0f502 100644 --- a/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs +++ b/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs @@ -13,6 +13,7 @@ public static class AzureResourceDefinitionBatching /// 2. Definitions in a batch must target the same Azure metric with identical dimensions /// 3. Definitions in a batch must have the same time granularity /// 4. Batch size cannot exceed configured maximum + /// /// public static List> GroupScrapeDefinitions(IEnumerable> allScrapeDefinitions, int maxBatchSize) { diff --git a/src/Promitor.Tests.Unit/Core/Extensions/MeasureMetricExtensionsTests.cs b/src/Promitor.Tests.Unit/Core/Extensions/MeasureMetricExtensionsTests.cs index 604d0fd1e..65d19d61b 100644 --- a/src/Promitor.Tests.Unit/Core/Extensions/MeasureMetricExtensionsTests.cs +++ b/src/Promitor.Tests.Unit/Core/Extensions/MeasureMetricExtensionsTests.cs @@ -1,6 +1,4 @@ -using System.Collections.Generic; using System.ComponentModel; -using Azure.Monitor.Query.Models; using Promitor.Core.Extensions; using Promitor.Core.Metrics; using Xunit; diff --git a/src/Promitor.Tests.Unit/Generators/Config/RuntimeConfigurationGenerator.cs b/src/Promitor.Tests.Unit/Generators/Config/RuntimeConfigurationGenerator.cs index a7ef73637..245e24045 100644 --- a/src/Promitor.Tests.Unit/Generators/Config/RuntimeConfigurationGenerator.cs +++ b/src/Promitor.Tests.Unit/Generators/Config/RuntimeConfigurationGenerator.cs @@ -349,11 +349,6 @@ public async Task GenerateAsync() if (_runtimeConfiguration?.AzureMonitor.Integration?.History != null) { - // configurationBuilder.AppendLine(" integration:"); - - // configurationBuilder.AppendLine($" useAzureMonitorSdk: {_runtimeConfiguration?.AzureMonitor.Integration.UseAzureMonitorSdk}"); - // configurationBuilder.AppendLine(" history:"); - // configurationBuilder.AppendLine($" startingFromInHours: {_runtimeConfiguration?.AzureMonitor.Integration.History.StartingFromInHours}"); configurationBuilder.AppendLine(" integration:"); configurationBuilder.AppendLine($" useAzureMonitorSdk: {_runtimeConfiguration?.AzureMonitor.Integration.UseAzureMonitorSdk}"); configurationBuilder.AppendLine(" history:"); From ce56dbbb8b804f1c1587ece20419b3e92b6e7e9c Mon Sep 17 00:00:00 2001 From: xchen Date: Thu, 26 Sep 2024 00:50:25 -0700 Subject: [PATCH 109/131] close ref --- .../Batching/AzureResourceDefinitionBatching.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs b/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs index 6cbf0f502..d98f297f4 100644 --- a/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs +++ b/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs @@ -13,7 +13,7 @@ public static class AzureResourceDefinitionBatching /// 2. Definitions in a batch must target the same Azure metric with identical dimensions /// 3. Definitions in a batch must have the same time granularity /// 4. Batch size cannot exceed configured maximum - /// + /// /// public static List> GroupScrapeDefinitions(IEnumerable> allScrapeDefinitions, int maxBatchSize) { From 04ff115df866c7c07f0aafbab43338f01fbd7b8d Mon Sep 17 00:00:00 2001 From: xchen Date: Thu, 26 Sep 2024 00:58:43 -0700 Subject: [PATCH 110/131] do not initialize batch client under single-resource scraping --- .../AzureMonitorQueryClient.cs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs index f39ef53a7..d4178ce83 100644 --- a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs +++ b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs @@ -61,7 +61,10 @@ public AzureMonitorQueryClient(AzureCloud azureCloud, string tenantId, string su _azureMonitorIntegrationConfiguration = azureMonitorIntegrationConfiguration; _logger = loggerFactory.CreateLogger(); _metricsQueryClient = CreateAzureMonitorMetricsClient(azureCloud, tenantId, subscriptionId, azureAuthenticationInfo, metricSinkWriter, azureScrapingSystemMetricsPublisher, azureMonitorLoggingConfiguration); - _metricsBatchQueryClient = CreateAzureMonitorMetricsBatchClient(azureCloud, tenantId, azureAuthenticationInfo, azureMonitorIntegrationConfiguration, azureMonitorLoggingConfiguration); + if (_azureMonitorIntegrationConfiguration.Value.MetricsBatching.Enabled) + { + _metricsBatchQueryClient = CreateAzureMonitorMetricsBatchClient(azureCloud, tenantId, azureAuthenticationInfo, azureMonitorIntegrationConfiguration, azureMonitorLoggingConfiguration); + } } /// @@ -109,6 +112,7 @@ public async Task> BatchQueryMetricAsync( { Guard.NotNullOrWhitespace(metricName, nameof(metricName)); Guard.NotLessThan(resourceIds.Count(), 1, nameof(resourceIds)); + Guard.NotNull(_metricsBatchQueryClient, nameof(_metricsBatchQueryClient)); // Get all metrics var startQueryingTime = DateTime.UtcNow; From 31669c5f756b8a5f3cd435a715fea145d4c2e734 Mon Sep 17 00:00:00 2001 From: xchen Date: Thu, 26 Sep 2024 01:20:01 -0700 Subject: [PATCH 111/131] handle windows time --- .../ModifyOutgoingAzureMonitorRequestsPolicy.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs index 027bab749..278af2407 100644 --- a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs +++ b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs @@ -40,12 +40,12 @@ private void ModifyDateTimeParam(List paramNames, HttpMessage message) foreach (var param in paramNames) { - if (DateTimeOffset.TryParseExact(query[param], "MM/dd/yyyy HH:mm:ss zzz", CultureInfo.InvariantCulture, DateTimeStyles.None, out DateTimeOffset dateTime)) + if (DateTimeOffset.TryParseExact(query[param], ["MM/dd/yyyy HH:mm:ss zzz", "M/d/yyyy h:mm:ss tt zzz"], CultureInfo.InvariantCulture, DateTimeStyles.None, out DateTimeOffset dateTime)) { // Transform to ISO 8601 format (e.g., "2024-09-09T20:46:14") query[param] = dateTime.ToString("yyyy-MM-ddTHH:mm:ss.fffffffZ"); queryModified = true; - } + } } if (queryModified) { message.Request.Uri.Query = uriBuilder.Query; From b82b73d35288a36f25874249858c5e8c14bce12f Mon Sep 17 00:00:00 2001 From: xchen Date: Mon, 30 Sep 2024 14:59:15 -0700 Subject: [PATCH 112/131] (temporary) change GitHub action to build another Linux image --- .github/workflows/templates-build-push-image.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/templates-build-push-image.yml b/.github/workflows/templates-build-push-image.yml index fc7a2afc6..23e9403d7 100644 --- a/.github/workflows/templates-build-push-image.yml +++ b/.github/workflows/templates-build-push-image.yml @@ -1,5 +1,6 @@ +name: Build and Push (Linux) on: - workflow_call: + workflow_dispatch: inputs: image_name: required: true @@ -15,6 +16,8 @@ jobs: linux: name: Build & Push (Linux) runs-on: ubuntu-latest + permissions: + packages: write steps: - name: Checkout Code uses: actions/checkout@v4 @@ -22,15 +25,12 @@ jobs: - name: "Determine preview artifact version" run: | echo "artifact_full_version=${{ inputs.version }}-${{ github.sha }}" >> $GITHUB_ENV - - name: "Determine full container image uri (commit)" run: | echo "image_commit_uri=${{ inputs.image_name }}:$artifact_full_version" >> $GITHUB_ENV - - name: "Determine full container image uri (latest)" run: | echo "image_latest_uri=${{ inputs.image_name }}:${{ inputs.version }}" >> $GITHUB_ENV - - name: Determine container image metadata uses: docker/metadata-action@v5.5.1 with: @@ -40,7 +40,7 @@ jobs: uses: docker/login-action@v3 with: registry: ghcr.io - username: tomkerkhove + username: hkfgo password: ${{ secrets.GITHUB_TOKEN }} - name: Build and push preview image @@ -49,5 +49,5 @@ jobs: build-args: VERSION="${{ env.artifact_full_version }}" context: ./src/ file: ./src/${{ inputs.project_name }}/Dockerfile.linux - tags: ${{ env.image_commit_uri }},${{ env.image_latest_uri }} + tags: ghcr.io/hkfgo/${{ env.image_commit_uri }},ghcr.io/hkfgo/${{ env.image_latest_uri }} push: true \ No newline at end of file From c4b1041a0e05b75ff515be2d2c5d7ec4281b719f Mon Sep 17 00:00:00 2001 From: xchen Date: Mon, 30 Sep 2024 18:13:17 -0700 Subject: [PATCH 113/131] revert back GitHub Action chagnes --- .../AzureMonitorQueryClient.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs index d4178ce83..5e47b5da0 100644 --- a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs +++ b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs @@ -113,7 +113,7 @@ public async Task> BatchQueryMetricAsync( Guard.NotNullOrWhitespace(metricName, nameof(metricName)); Guard.NotLessThan(resourceIds.Count(), 1, nameof(resourceIds)); Guard.NotNull(_metricsBatchQueryClient, nameof(_metricsBatchQueryClient)); - + // Get all metrics var startQueryingTime = DateTime.UtcNow; var metricNamespaces = await _metricsQueryClient.GetAndCacheMetricNamespacesAsync(resourceIds.First(), _resourceMetricDefinitionMemoryCache, _metricDefinitionCacheDuration); From fa2fc3f77bf8e33e8ce1f21c7eb5bd85187389be Mon Sep 17 00:00:00 2001 From: xchen Date: Mon, 30 Sep 2024 20:43:35 -0700 Subject: [PATCH 114/131] ci changes --- .github/workflows/templates-build-push-image.yml | 12 ++++++------ config/promitor/scraper/ci-runtime.yaml | 5 +++++ 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/.github/workflows/templates-build-push-image.yml b/.github/workflows/templates-build-push-image.yml index 23e9403d7..fc7a2afc6 100644 --- a/.github/workflows/templates-build-push-image.yml +++ b/.github/workflows/templates-build-push-image.yml @@ -1,6 +1,5 @@ -name: Build and Push (Linux) on: - workflow_dispatch: + workflow_call: inputs: image_name: required: true @@ -16,8 +15,6 @@ jobs: linux: name: Build & Push (Linux) runs-on: ubuntu-latest - permissions: - packages: write steps: - name: Checkout Code uses: actions/checkout@v4 @@ -25,12 +22,15 @@ jobs: - name: "Determine preview artifact version" run: | echo "artifact_full_version=${{ inputs.version }}-${{ github.sha }}" >> $GITHUB_ENV + - name: "Determine full container image uri (commit)" run: | echo "image_commit_uri=${{ inputs.image_name }}:$artifact_full_version" >> $GITHUB_ENV + - name: "Determine full container image uri (latest)" run: | echo "image_latest_uri=${{ inputs.image_name }}:${{ inputs.version }}" >> $GITHUB_ENV + - name: Determine container image metadata uses: docker/metadata-action@v5.5.1 with: @@ -40,7 +40,7 @@ jobs: uses: docker/login-action@v3 with: registry: ghcr.io - username: hkfgo + username: tomkerkhove password: ${{ secrets.GITHUB_TOKEN }} - name: Build and push preview image @@ -49,5 +49,5 @@ jobs: build-args: VERSION="${{ env.artifact_full_version }}" context: ./src/ file: ./src/${{ inputs.project_name }}/Dockerfile.linux - tags: ghcr.io/hkfgo/${{ env.image_commit_uri }},ghcr.io/hkfgo/${{ env.image_latest_uri }} + tags: ${{ env.image_commit_uri }},${{ env.image_latest_uri }} push: true \ No newline at end of file diff --git a/config/promitor/scraper/ci-runtime.yaml b/config/promitor/scraper/ci-runtime.yaml index b8eefbcd5..c51ecf74b 100644 --- a/config/promitor/scraper/ci-runtime.yaml +++ b/config/promitor/scraper/ci-runtime.yaml @@ -15,6 +15,11 @@ telemetry: verbosity: trace defaultVerbosity: trace azureMonitor: + integration: + metricsBatching: + enabled: true + maxBatchSize: 2 + azureRegion: westeurope logging: isEnabled: false resourceDiscovery: From 216812b1175fc7a101d8ee137eb709a0f3e62276 Mon Sep 17 00:00:00 2001 From: xchen Date: Mon, 30 Sep 2024 20:57:25 -0700 Subject: [PATCH 115/131] add logging --- .../ModifyOutgoingAzureMonitorRequestsPolicy.cs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs index 278af2407..96b1a0f09 100644 --- a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs +++ b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs @@ -46,12 +46,14 @@ private void ModifyDateTimeParam(List paramNames, HttpMessage message) query[param] = dateTime.ToString("yyyy-MM-ddTHH:mm:ss.fffffffZ"); queryModified = true; } + + if (queryModified) { + message.Request.Uri.Query = uriBuilder.Query; + } else { + _logger.LogWarning("Failed to modify parameters {Parms}", string.Join("and ", paramNames)); + } } - if (queryModified) { - message.Request.Uri.Query = uriBuilder.Query; - } else { - _logger.LogWarning("Failed to modify parameters {Parms}", string.Join("and ", paramNames)); - } + } } } From c7a4a6a8f8ed3e3957e2e37bada59180fe5bf939 Mon Sep 17 00:00:00 2001 From: xchen Date: Mon, 30 Sep 2024 21:24:04 -0700 Subject: [PATCH 116/131] add logging --- .../ModifyOutgoingAzureMonitorRequestsPolicy.cs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs index 96b1a0f09..b7cadfe2e 100644 --- a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs +++ b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs @@ -45,13 +45,13 @@ private void ModifyDateTimeParam(List paramNames, HttpMessage message) // Transform to ISO 8601 format (e.g., "2024-09-09T20:46:14") query[param] = dateTime.ToString("yyyy-MM-ddTHH:mm:ss.fffffffZ"); queryModified = true; + _logger.LogWarning("Modified {param} to modify parameters to be value {Value}", param, dateTime.ToString("yyyy-MM-ddTHH:mm:ss.fffffffZ")); } - - if (queryModified) { - message.Request.Uri.Query = uriBuilder.Query; - } else { - _logger.LogWarning("Failed to modify parameters {Parms}", string.Join("and ", paramNames)); - } + } + if (queryModified) { + message.Request.Uri.Query = uriBuilder.Query; + } else { + _logger.LogWarning("Failed to modify parameters {Parms}", string.Join("and ", paramNames)); } } From e27d58102a5755d0a1f95190894ffe9999dcdd3c Mon Sep 17 00:00:00 2001 From: xchen Date: Mon, 30 Sep 2024 21:34:13 -0700 Subject: [PATCH 117/131] add logging --- .../ModifyOutgoingAzureMonitorRequestsPolicy.cs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs index b7cadfe2e..6bc82d0d3 100644 --- a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs +++ b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs @@ -53,6 +53,7 @@ private void ModifyDateTimeParam(List paramNames, HttpMessage message) } else { _logger.LogWarning("Failed to modify parameters {Parms}", string.Join("and ", paramNames)); } + _logger.LogWarning("Final url is {URI}", message.Request.Uri.ToString()); } } From f1a2c4889b89413214b0670380f261c644c49020 Mon Sep 17 00:00:00 2001 From: xchen Date: Mon, 30 Sep 2024 21:46:00 -0700 Subject: [PATCH 118/131] add logging --- .../ModifyOutgoingAzureMonitorRequestsPolicy.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs index 6bc82d0d3..aa236a8b3 100644 --- a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs +++ b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs @@ -49,7 +49,7 @@ private void ModifyDateTimeParam(List paramNames, HttpMessage message) } } if (queryModified) { - message.Request.Uri.Query = uriBuilder.Query; + message.Request.Uri.Query = query.ToString(); } else { _logger.LogWarning("Failed to modify parameters {Parms}", string.Join("and ", paramNames)); } From c72bda1f255fdbec1d5ab6ed66b12aa44f240b5b Mon Sep 17 00:00:00 2001 From: xchen Date: Mon, 30 Sep 2024 21:52:00 -0700 Subject: [PATCH 119/131] disable ReSharper when modifying outgoing URL --- .../ModifyOutgoingAzureMonitorRequestsPolicy.cs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs index aa236a8b3..d5813d627 100644 --- a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs +++ b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs @@ -31,6 +31,7 @@ public override void Process(HttpMessage message, ReadOnlyMemory paramNames, HttpMessage message) { // Modify the request URL by updating or adding a query parameter From 206e155ea4985b58265601960ec93bbdb972b351 Mon Sep 17 00:00:00 2001 From: xchen Date: Mon, 30 Sep 2024 23:01:01 -0700 Subject: [PATCH 120/131] remove some more logging + add null checks --- .../Batching/AzureResourceDefinitionBatching.cs | 9 ++++++++- .../AzureMonitorQueryClient.cs | 1 - .../Extensions/AzureMonitorQueryTasks.cs | 1 - .../ModifyOutgoingAzureMonitorRequestsPolicy.cs | 4 +--- 4 files changed, 9 insertions(+), 6 deletions(-) diff --git a/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs b/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs index d98f297f4..47ad758bf 100644 --- a/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs +++ b/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs @@ -1,5 +1,6 @@ using System.Collections.Generic; using System.Linq; +using GuardNet; using Promitor.Core.Contracts; using Promitor.Core.Scraping.Configuration.Model.Metrics; @@ -17,6 +18,9 @@ public static class AzureResourceDefinitionBatching /// public static List> GroupScrapeDefinitions(IEnumerable> allScrapeDefinitions, int maxBatchSize) { + Guard.NotNull(allScrapeDefinitions, nameof(allScrapeDefinitions)); + Guard.NotLessThan(allScrapeDefinitions.Count(), 1, nameof(allScrapeDefinitions)); + return allScrapeDefinitions.GroupBy(def => def.BuildScrapingBatchInfo()) .ToDictionary(group => group.Key, group => group.ToList()) // first pass to build batches that could exceed max .ToDictionary(group => group.Key, group => SplitScrapeDefinitionBatch(group.Value, maxBatchSize)) // split to right-sized batches @@ -24,11 +28,14 @@ public static List> GroupScrapeD .ToList(); // flatten } - /// + /// /// splits the "raw" batch according to max batch size configured /// private static List>> SplitScrapeDefinitionBatch(List> batchToSplit, int maxBatchSize) { + Guard.NotNull(batchToSplit, nameof(batchToSplit)); + Guard.NotLessThan(batchToSplit.Count(), 1, nameof(batchToSplit)); + int numNewGroups = ((batchToSplit.Count - 1) / maxBatchSize) + 1; return Enumerable.Range(0, numNewGroups) diff --git a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs index 5e47b5da0..68253287a 100644 --- a/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs +++ b/src/Promitor.Integrations.AzureMonitor/AzureMonitorQueryClient.cs @@ -133,7 +133,6 @@ public async Task> BatchQueryMetricAsync( // Get the most recent metric var metricResultsList = await _metricsBatchQueryClient.GetRelevantMetricForResources(resourceIds, metricName, metricNamespace, MetricAggregationTypeConverter.AsMetricAggregationType(aggregationType), closestAggregationInterval, metricFilter, metricDimensions, metricLimit, startQueryingTime, _azureMonitorIntegrationConfiguration, _logger); - _logger.LogWarning("Azure monitor has returned {ResultsCount} results for metric {MetricName}", metricResultsList.Count, metricName); //TODO: This is potentially a lot of results to process in a single thread. Think of ways to utilize additional parallelism return metricResultsList diff --git a/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs b/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs index 930253f26..26b89c7fa 100644 --- a/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs +++ b/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs @@ -84,7 +84,6 @@ public static async Task> GetRelevantMetricForResources(this var metricsBatchQueryResponse = await metricsClient.QueryResourcesAsync(resourceIdentifiers, [metricName], metricNamespace, queryOptions); var metricsQueryResults = metricsBatchQueryResponse.Value; - logger.LogWarning("Got response"); return metricsQueryResults.Values .Select(result => GetRelevantMetricResultOrThrow(result, metricName)) .ToList(); diff --git a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs index d5813d627..b9f578fb2 100644 --- a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs +++ b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs @@ -53,9 +53,7 @@ private void ModifyDateTimeParam(List paramNames, HttpMessage message) message.Request.Uri.Query = query.ToString(); } else { _logger.LogWarning("Failed to modify parameters {Parms}", string.Join("and ", paramNames)); - } - _logger.LogWarning("Final url is {URI}", message.Request.Uri.ToString()); - + } } } } From 67bf0a5e3641b06ccc1b5f5c622e0a020d0b9bf2 Mon Sep 17 00:00:00 2001 From: xchen Date: Mon, 30 Sep 2024 23:11:09 -0700 Subject: [PATCH 121/131] remove some more logging + add null checks --- .../Batching/AzureResourceDefinitionBatching.cs | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs b/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs index 47ad758bf..b029bb750 100644 --- a/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs +++ b/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs @@ -19,7 +19,6 @@ public static class AzureResourceDefinitionBatching public static List> GroupScrapeDefinitions(IEnumerable> allScrapeDefinitions, int maxBatchSize) { Guard.NotNull(allScrapeDefinitions, nameof(allScrapeDefinitions)); - Guard.NotLessThan(allScrapeDefinitions.Count(), 1, nameof(allScrapeDefinitions)); return allScrapeDefinitions.GroupBy(def => def.BuildScrapingBatchInfo()) .ToDictionary(group => group.Key, group => group.ToList()) // first pass to build batches that could exceed max @@ -34,7 +33,6 @@ public static List> GroupScrapeD private static List>> SplitScrapeDefinitionBatch(List> batchToSplit, int maxBatchSize) { Guard.NotNull(batchToSplit, nameof(batchToSplit)); - Guard.NotLessThan(batchToSplit.Count(), 1, nameof(batchToSplit)); int numNewGroups = ((batchToSplit.Count - 1) / maxBatchSize) + 1; From f04cb8743957f26075fada4da5e0d50894f98c03 Mon Sep 17 00:00:00 2001 From: xchen Date: Mon, 30 Sep 2024 23:41:29 -0700 Subject: [PATCH 122/131] remove some more logging + add null checks --- .../Batching/AzureResourceDefinitionBatching.cs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs b/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs index b029bb750..9126227ff 100644 --- a/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs +++ b/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs @@ -18,6 +18,7 @@ public static class AzureResourceDefinitionBatching /// public static List> GroupScrapeDefinitions(IEnumerable> allScrapeDefinitions, int maxBatchSize) { + // ReSharper disable once PossibleMultipleEnumeration Guard.NotNull(allScrapeDefinitions, nameof(allScrapeDefinitions)); return allScrapeDefinitions.GroupBy(def => def.BuildScrapingBatchInfo()) @@ -32,6 +33,7 @@ public static List> GroupScrapeD /// private static List>> SplitScrapeDefinitionBatch(List> batchToSplit, int maxBatchSize) { + // ReSharper disable once PossibleMultipleEnumeration Guard.NotNull(batchToSplit, nameof(batchToSplit)); int numNewGroups = ((batchToSplit.Count - 1) / maxBatchSize) + 1; From 377bdfd5979862f5461c4e7cfe171db10a5e76ba Mon Sep 17 00:00:00 2001 From: xchen Date: Mon, 30 Sep 2024 23:48:20 -0700 Subject: [PATCH 123/131] remove some more logging + add null checks --- .../Batching/AzureResourceDefinitionBatching.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs b/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs index 9126227ff..c5d5eec6e 100644 --- a/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs +++ b/src/Promitor.Core.Scraping/Batching/AzureResourceDefinitionBatching.cs @@ -18,7 +18,7 @@ public static class AzureResourceDefinitionBatching /// public static List> GroupScrapeDefinitions(IEnumerable> allScrapeDefinitions, int maxBatchSize) { - // ReSharper disable once PossibleMultipleEnumeration + // ReSharper disable PossibleMultipleEnumeration Guard.NotNull(allScrapeDefinitions, nameof(allScrapeDefinitions)); return allScrapeDefinitions.GroupBy(def => def.BuildScrapingBatchInfo()) @@ -33,7 +33,7 @@ public static List> GroupScrapeD /// private static List>> SplitScrapeDefinitionBatch(List> batchToSplit, int maxBatchSize) { - // ReSharper disable once PossibleMultipleEnumeration + // ReSharper disable PossibleMultipleEnumeration Guard.NotNull(batchToSplit, nameof(batchToSplit)); int numNewGroups = ((batchToSplit.Count - 1) / maxBatchSize) + 1; From 431f37d2c3cdf3ead8b5cb8a5f6fb74f045b905e Mon Sep 17 00:00:00 2001 From: xchen Date: Mon, 30 Sep 2024 23:55:07 -0700 Subject: [PATCH 124/131] remove some more logging + add null checks --- .../ModifyOutgoingAzureMonitorRequestsPolicy.cs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs index b9f578fb2..4e89a4f64 100644 --- a/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs +++ b/src/Promitor.Integrations.AzureMonitor/HttpPipelinePolicies/ModifyOutgoingAzureMonitorRequestsPolicy.cs @@ -46,12 +46,14 @@ private void ModifyDateTimeParam(List paramNames, HttpMessage message) // Transform to ISO 8601 format (e.g., "2024-09-09T20:46:14") query[param] = dateTime.ToString("yyyy-MM-ddTHH:mm:ss.fffffffZ"); queryModified = true; - _logger.LogWarning("Modified {param} to modify parameters to be value {Value}", param, dateTime.ToString("yyyy-MM-ddTHH:mm:ss.fffffffZ")); } } - if (queryModified) { + if (queryModified) + { message.Request.Uri.Query = query.ToString(); - } else { + } + else + { _logger.LogWarning("Failed to modify parameters {Parms}", string.Join("and ", paramNames)); } } From 86b4ddd1f1fd732fadc469892322c4be78236f79 Mon Sep 17 00:00:00 2001 From: xchen Date: Tue, 1 Oct 2024 00:00:58 -0700 Subject: [PATCH 125/131] do not run batch mode in CI --- config/promitor/scraper/ci-runtime.yaml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/config/promitor/scraper/ci-runtime.yaml b/config/promitor/scraper/ci-runtime.yaml index c51ecf74b..b8eefbcd5 100644 --- a/config/promitor/scraper/ci-runtime.yaml +++ b/config/promitor/scraper/ci-runtime.yaml @@ -15,11 +15,6 @@ telemetry: verbosity: trace defaultVerbosity: trace azureMonitor: - integration: - metricsBatching: - enabled: true - maxBatchSize: 2 - azureRegion: westeurope logging: isEnabled: false resourceDiscovery: From eb6d6766fa2aca8d0978944c3000d8e844c6530e Mon Sep 17 00:00:00 2001 From: xchen Date: Tue, 1 Oct 2024 11:10:20 -0700 Subject: [PATCH 126/131] trigger CI again --- .github/workflows/templates-build-push-image.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/templates-build-push-image.yml b/.github/workflows/templates-build-push-image.yml index fc7a2afc6..60a8380f8 100644 --- a/.github/workflows/templates-build-push-image.yml +++ b/.github/workflows/templates-build-push-image.yml @@ -50,4 +50,4 @@ jobs: context: ./src/ file: ./src/${{ inputs.project_name }}/Dockerfile.linux tags: ${{ env.image_commit_uri }},${{ env.image_latest_uri }} - push: true \ No newline at end of file + push: true From f2e0087f71da7071703454733e6c0570866169f7 Mon Sep 17 00:00:00 2001 From: xchen Date: Tue, 1 Oct 2024 11:13:50 -0700 Subject: [PATCH 127/131] Modify workflow file again --- .github/workflows/templates-build-push-image.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/templates-build-push-image.yml b/.github/workflows/templates-build-push-image.yml index 60a8380f8..23e9403d7 100644 --- a/.github/workflows/templates-build-push-image.yml +++ b/.github/workflows/templates-build-push-image.yml @@ -1,5 +1,6 @@ +name: Build and Push (Linux) on: - workflow_call: + workflow_dispatch: inputs: image_name: required: true @@ -15,6 +16,8 @@ jobs: linux: name: Build & Push (Linux) runs-on: ubuntu-latest + permissions: + packages: write steps: - name: Checkout Code uses: actions/checkout@v4 @@ -22,15 +25,12 @@ jobs: - name: "Determine preview artifact version" run: | echo "artifact_full_version=${{ inputs.version }}-${{ github.sha }}" >> $GITHUB_ENV - - name: "Determine full container image uri (commit)" run: | echo "image_commit_uri=${{ inputs.image_name }}:$artifact_full_version" >> $GITHUB_ENV - - name: "Determine full container image uri (latest)" run: | echo "image_latest_uri=${{ inputs.image_name }}:${{ inputs.version }}" >> $GITHUB_ENV - - name: Determine container image metadata uses: docker/metadata-action@v5.5.1 with: @@ -40,7 +40,7 @@ jobs: uses: docker/login-action@v3 with: registry: ghcr.io - username: tomkerkhove + username: hkfgo password: ${{ secrets.GITHUB_TOKEN }} - name: Build and push preview image @@ -49,5 +49,5 @@ jobs: build-args: VERSION="${{ env.artifact_full_version }}" context: ./src/ file: ./src/${{ inputs.project_name }}/Dockerfile.linux - tags: ${{ env.image_commit_uri }},${{ env.image_latest_uri }} - push: true + tags: ghcr.io/hkfgo/${{ env.image_commit_uri }},ghcr.io/hkfgo/${{ env.image_latest_uri }} + push: true \ No newline at end of file From fedf8144c1c4ef1c6ddb5823e16531c2b826ede5 Mon Sep 17 00:00:00 2001 From: xchen Date: Tue, 1 Oct 2024 12:05:31 -0700 Subject: [PATCH 128/131] revert GitHubaction --- .github/workflows/templates-build-push-image.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/templates-build-push-image.yml b/.github/workflows/templates-build-push-image.yml index 23e9403d7..fc7a2afc6 100644 --- a/.github/workflows/templates-build-push-image.yml +++ b/.github/workflows/templates-build-push-image.yml @@ -1,6 +1,5 @@ -name: Build and Push (Linux) on: - workflow_dispatch: + workflow_call: inputs: image_name: required: true @@ -16,8 +15,6 @@ jobs: linux: name: Build & Push (Linux) runs-on: ubuntu-latest - permissions: - packages: write steps: - name: Checkout Code uses: actions/checkout@v4 @@ -25,12 +22,15 @@ jobs: - name: "Determine preview artifact version" run: | echo "artifact_full_version=${{ inputs.version }}-${{ github.sha }}" >> $GITHUB_ENV + - name: "Determine full container image uri (commit)" run: | echo "image_commit_uri=${{ inputs.image_name }}:$artifact_full_version" >> $GITHUB_ENV + - name: "Determine full container image uri (latest)" run: | echo "image_latest_uri=${{ inputs.image_name }}:${{ inputs.version }}" >> $GITHUB_ENV + - name: Determine container image metadata uses: docker/metadata-action@v5.5.1 with: @@ -40,7 +40,7 @@ jobs: uses: docker/login-action@v3 with: registry: ghcr.io - username: hkfgo + username: tomkerkhove password: ${{ secrets.GITHUB_TOKEN }} - name: Build and push preview image @@ -49,5 +49,5 @@ jobs: build-args: VERSION="${{ env.artifact_full_version }}" context: ./src/ file: ./src/${{ inputs.project_name }}/Dockerfile.linux - tags: ghcr.io/hkfgo/${{ env.image_commit_uri }},ghcr.io/hkfgo/${{ env.image_latest_uri }} + tags: ${{ env.image_commit_uri }},${{ env.image_latest_uri }} push: true \ No newline at end of file From 988da9b9a0c4e47aa12212456e1da07b294b57ba Mon Sep 17 00:00:00 2001 From: xchen Date: Tue, 1 Oct 2024 12:05:56 -0700 Subject: [PATCH 129/131] remove more debug logging --- .../Extensions/AzureMonitorQueryTasks.cs | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs b/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs index 26b89c7fa..0240335e3 100644 --- a/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs +++ b/src/Promitor.Integrations.AzureMonitor/Extensions/AzureMonitorQueryTasks.cs @@ -79,8 +79,6 @@ public static async Task> GetRelevantMetricForResources(this TimeRange= new QueryTimeRange(new DateTimeOffset(recordDateTime.AddHours(-historyStartingFromInHours)), new DateTimeOffset(recordDateTime)) }; } - logger.LogWarning("Batch query range: {Range}, size: {Size}, granularity: {Interval}, aggregation: {Aggregation}, filter: {Filter}", queryOptions.TimeRange, queryOptions.Size, queryOptions.Granularity, queryOptions.Aggregations, queryOptions.Filter); - logger.LogWarning("Resource IDs: {IDs}", resourceIds); var metricsBatchQueryResponse = await metricsClient.QueryResourcesAsync(resourceIdentifiers, [metricName], metricNamespace, queryOptions); var metricsQueryResults = metricsBatchQueryResponse.Value; From fd14f63d55c43f1e8bcce6e43c5dc0891ca8feb0 Mon Sep 17 00:00:00 2001 From: xchen Date: Tue, 1 Oct 2024 12:54:19 -0700 Subject: [PATCH 130/131] retry-ci --- .github/workflows/templates-build-push-image.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/templates-build-push-image.yml b/.github/workflows/templates-build-push-image.yml index fc7a2afc6..d43e66afa 100644 --- a/.github/workflows/templates-build-push-image.yml +++ b/.github/workflows/templates-build-push-image.yml @@ -50,4 +50,5 @@ jobs: context: ./src/ file: ./src/${{ inputs.project_name }}/Dockerfile.linux tags: ${{ env.image_commit_uri }},${{ env.image_latest_uri }} - push: true \ No newline at end of file + push: true + \ No newline at end of file From 1face3039433578133a791c388842f0be5ea609f Mon Sep 17 00:00:00 2001 From: xchen Date: Tue, 1 Oct 2024 17:46:00 -0700 Subject: [PATCH 131/131] retry-ci --- .github/workflows/templates-build-push-image.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/templates-build-push-image.yml b/.github/workflows/templates-build-push-image.yml index d43e66afa..fc7a2afc6 100644 --- a/.github/workflows/templates-build-push-image.yml +++ b/.github/workflows/templates-build-push-image.yml @@ -50,5 +50,4 @@ jobs: context: ./src/ file: ./src/${{ inputs.project_name }}/Dockerfile.linux tags: ${{ env.image_commit_uri }},${{ env.image_latest_uri }} - push: true - \ No newline at end of file + push: true \ No newline at end of file