From 17b9693e1ed265db3bfd5e799c8977d7710d4864 Mon Sep 17 00:00:00 2001 From: nr-opensource-bot Date: Thu, 17 Feb 2022 00:04:49 +0000 Subject: [PATCH] chore(related-content): updated related content data --- src/data/related-pages.json | 12000 +++++++++++++++++----------------- 1 file changed, 5972 insertions(+), 6028 deletions(-) diff --git a/src/data/related-pages.json b/src/data/related-pages.json index dc17e02f..ac00f70f 100644 --- a/src/data/related-pages.json +++ b/src/data/related-pages.json @@ -35,7 +35,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 160.21188, + "_score": 152.05011, "_version": null, "_explanation": null, "sort": null, @@ -78,7 +78,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 110.49971, + "_score": 108.24081, "_version": null, "_explanation": null, "sort": null, @@ -117,7 +117,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 109.82003, + "_score": 107.5746, "_version": null, "_explanation": null, "sort": null, @@ -156,7 +156,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 95.24931, + "_score": 94.640625, "_version": null, "_explanation": null, "sort": null, @@ -195,7 +195,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 88.17746, + "_score": 88.016014, "_version": null, "_explanation": null, "sort": null, @@ -236,7 +236,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -276,7 +276,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -316,7 +316,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -356,7 +356,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -396,7 +396,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -438,7 +438,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -478,7 +478,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -518,7 +518,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -558,7 +558,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -598,7 +598,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -640,7 +640,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -680,7 +680,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -720,7 +720,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -760,7 +760,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -800,7 +800,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -842,7 +842,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -882,7 +882,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -922,7 +922,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -962,7 +962,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -1002,7 +1002,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -1044,7 +1044,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -1084,7 +1084,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -1124,7 +1124,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -1164,7 +1164,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -1204,7 +1204,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -1259,7 +1259,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 93.30194, + "_score": 86.50932, "_version": null, "_explanation": null, "sort": null, @@ -1294,7 +1294,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 91.16302, + "_score": 85.40664, "_version": null, "_explanation": null, "sort": null, @@ -1333,7 +1333,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.26323, + "_score": 76.73684, "_version": null, "_explanation": null, "sort": null, @@ -1345,6 +1345,46 @@ }, "id": "604418abe7b9d2d1015799cc" }, + { + "sections": [ + "Install Android apps with Gradle and Android Studio", + "Install your Android app", + "Configure with Gradle and Android Studio", + "Project level build.gradle file:", + "App level build.gradle file:", + "Important", + "Update your Android installation", + "Android 4.x: Multidex support", + "New Relic Gradle Plugin configuration" + ], + "title": "Install Android apps with Gradle and Android Studio", + "type": "docs", + "tags": [ + "Install configure", + "New Relic Mobile Android", + "Mobile monitoring" + ], + "external_id": "51fab3eba87ddee949cd4729de8b5f64534de9c7", + "image": "", + "url": "https://docs.newrelic.com/docs/mobile-monitoring/new-relic-mobile-android/install-configure/install-android-apps-gradle-android-studio/", + "published_at": "2022-02-14T11:59:24Z", + "updated_at": "2022-02-04T07:09:38Z", + "document_type": "page", + "popularity": 1, + "body": "If you use Gradle or Android Studio, follow these instructions to install New Relic's mobile monitoring for the first time. If you have previously installed the Android agent SDK for mobile monitoring, follow the steps before upgrading to the latest version with Gradle and Android Studio. Install your Android app As part of the installation process for mobile monitoring, New Relic automatically generates an application token. This is a 40-character hexadecimal string for authenticating each mobile app you monitor in New Relic. Go to one.newrelic.com > Mobile. If applicable: From the Mobile apps index, select Add a new app. From the Get started page, select Android as the platform for mobile monitoring. Type a meaningful name for your mobile app, and select Continue. Continue with the steps to configure mobile monitoring with Gradle and Android Studio. OR: To complete the configuration process for a new mobile app later: Go to one.newrelic.com > Mobile, then select See instructions next to your mobile app name. To upgrade an existing Android installation: Go to one.newrelic.com > Mobile > (select an app) > Settings > Installation. Configure with Gradle and Android Studio These procedures to configure your Android app with Gradle and Android Studio also appear on the Get started page in New Relic. Merge New Relic's mobile monitoring code in the Gradle & Android Studio tab to your build.gradle file. Project level build.gradle file: In this example, AGENT_VERSION represents your agent version number. See the agent release notes, and use the latest version. buildscript { repositories { mavenCentral() } dependencies { classpath \"com.newrelic.agent.android:agent-gradle-plugin:AGENT_VERSION\" } } Copy App level build.gradle file: In this example, AGENT_VERSION represents your agent version number. See the agent release notes, and use the latest version. repositories { mavenCentral() } apply plugin: 'android' apply plugin: 'newrelic' dependencies { implementation 'com.newrelic.agent.android:android-agent:AGENT_VERSION' } Copy ProGuard or DexGuard: In your project’s root directory (projectname/app), add a newrelic.properties file with the following line: com.newrelic.application_token=GENERATED_TOKEN Copy Follow the additional, required configuration steps for using ProGuard or DexGuard with New Relic. Set app permissions: Ensure that your Android app requests INTERNET and ACCESS_NETWORK_STATE permissions by adding these lines to your AndroidManifest.xml file: Copy To start New Relic's mobile monitoring agent: In your Default Activity (as defined in your Manifest), import the NewRelic class: import com.newrelic.agent.android.NewRelic; Copy Important We do not support starting the mobile monitoring agent in other classes, as that can cause unexpected or unstable behavior. To initialize mobile monitoring: In the onCreate() method, add this call: NewRelic.withApplicationToken(\"GENERATED_TOKEN\").start(this.getApplication()); Copy Optional: Change the logging level. To build and run your application: Clean your project, then run your app in an emulator or device to generate traffic. Wait a few minutes, then view data for your Android app from the Overview page: Go to one.newrelic.com > Mobile > (select an app). If you have problems with your Android installation, or if you do not see data in the mobile monitoring UI, follow the troubleshooting procedures. Update your Android installation To keep your Android agent up to date, follow the procedures to upgrade the Android agent SDK for New Relic. To install the latest Android version for your mobile app: Go to one.newrelic.com > Mobile > (select an app) > Settings > Installation. Android 4.x: Multidex support New Relic's mobile monitoring for Android versions prior to Android 5.0 (API level 21) use the Dalvik runtime to execute app code. By default, Dalvik limits apps to a single classes.dex bytecode file per APK. In order to get around this limitation, you must enable multidex support. Then you can use the multidex support library, which becomes part of the primary DEX file of your app and then manages access to the additional DEX files and the code they contain. When building each DEX file for a multidex app, the build tools perform complex decision making to determine which classes are needed in the primary DEX file so that your app can start successfully. If any class required during startup is not provided in the primary DEX file, then your app crashes with the error java.lang.NoClassDefFoundError. If you see the java.lang.NoClassDefFoundError error, then you must manually specify these additional classes as required in the primary DEX file: Create a proguard.multidex.config file within the /app folder of your project. Update mypackage to reflect your package name. #################### # keep class names # #################### #Keep New Relic in the main dex -keep class com.newrelic.** { *; } -keep class com.mypackage.activities.** { *; } Copy Merge the following code into the app-level build.gradle file: android { defaultConfig{ … multiDexKeepProguard file(\"proguard.multidex.config\") } } Copy For more information, see the Android Developers documentation on declaring classes required in the primary DEX file. New Relic Gradle Plugin configuration For details on how to configure the behavior of the mobile monitoring agent plugin during Gradle builds, see the New Relic Gradle plugin extension guide.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 62.383125, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "tags": "New Relic Mobile Android", + "body": "If you use Gradle or Android Studio, follow these instructions to install New Relic's mobile monitoring for the first time. If you have previously installed the Android agent SDK for mobile monitoring, follow the steps before upgrading to the latest version with Gradle and Android Studio. Install" + }, + "id": "603ea70128ccbc59c2eba74e" + }, { "image": "", "url": "https://docs.newrelic.com/attribute-dictionary/", @@ -1401,7 +1441,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 66.64697, + "_score": 62.33004, "_version": null, "_explanation": null, "sort": null, @@ -1410,50 +1450,6 @@ "body": ". Recommendation: Upgrade to the most recent mobile monitoring agent version to take full advantage of the new event types. Attribute name Definition Data types category The type of data, either session or interaction. MobileSession Mobile interactionDuration For interaction category events only" }, "id": "603f53b164441f41894e8875" - }, - { - "sections": [ - "Mobile monitoring alert information", - "Mobile alert conditions", - "Execution time", - "Errors", - "Crash reporting", - "Tip", - "Custom", - "View policies and conditions", - "View events and activities", - "View alert violations", - "Important" - ], - "title": "Mobile monitoring alert information", - "type": "docs", - "tags": [ - "Get started", - "New Relic Mobile", - "Mobile monitoring" - ], - "external_id": "93dd2fd0c629d0653bcb69533dd62162814a5ebd", - "image": "", - "url": "https://docs.newrelic.com/docs/mobile-monitoring/new-relic-mobile/get-started/mobile-monitoring-alert-information/", - "published_at": "2022-02-15T12:26:47Z", - "updated_at": "2022-02-15T12:26:47Z", - "document_type": "page", - "popularity": 1, - "body": "Well-defined alerts help notify individuals or teams about changes in their systems. You can use any of our alerts and applied intelligence capabilities across all the entities you monitor with New Relic. For example, you can use alerts to manage alert policies and conditions that focus on the metrics for mobile apps you monitor with New Relic. After you define alert conditions for your mobile apps, you can view that information in the alerts UI or in other UI experiences, like the mobile monitoring UI. Mobile alert conditions Use any of the following conditions and thresholds when you set up alerts for your mobile apps: Execution time Execution time metrics include Database, Images, JSON, Network, and View loading. Define the thresholds for these metrics by selecting a time and duration. Errors Error metrics include Network error percentage and Status error percentage. Define the thresholds for these metrics by selecting a error percentage and duration. Crash reporting You may want to be notified when your app's crash rate goes above a certain threshold. Define the thresholds for these metrics by selecting an error percentage and duration. Tip Get notified about your exceeding app crash rate as soon as it happens by setting up notification channels, including PagerDuty and Slack. Custom Create your own custom metric to fit specific alert needs. Add a name for the customized metric, and define your own thresholds. View policies and conditions To view alert policy and condition information for a specific mobile app: Go to one.newrelic.com, click Mobile, and click Alert conditions. From the Alert conditions page, use the available tools to search, sort, view, or update the alert conditions and their associated policies. View events and activities To view summary information about events and other activity directly from the mobile monitoring UI: Go to one.newrelic.com and click Mobile. From the index, mouse over the entity's color-coded health status, select a link from the Mobile activity list, or select a mobile app to view additional details. For example, if a Critical alert violation occurs: The health status indicator turns red on the mobile index and on the selected app. The background color for various charts changes to pink. On your list of monitored mobile apps, the Application activity section shows Warning (yellow) and Critical (red) violations as applicable. To learn more about an alert violation, mouse over or select any pink area on a chart. View alert violations Important In July 2020, we transitioned alert violations for browser apps, mobile apps, and synthetic monitors to a new format in one.newrelic.com. For more information, see the Applied Intelligence and alerting docs and this Explorers Hub post. If an alert condition has thresholds set up for Warning (yellow) or Critical (red) violations, the color-coded health status for a product entity will change to indicate a violation. You can view the violations directly from the mobile app's page in New Relic: Go to one.newrelic.com and click Mobile. Select a mobile app, and then review its Open violations.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 64.1752, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "title": "Mobile monitoring alert information", - "sections": "Mobile monitoring alert information", - "tags": "New Relic Mobile", - "body": " on the metrics for mobile apps you monitor with New Relic. After you define alert conditions for your mobile apps, you can view that information in the alerts UI or in other UI experiences, like the mobile monitoring UI. Mobile alert conditions Use any of the following conditions and thresholds when you set up" - }, - "id": "6044144228ccbcd7422c608a" } ], "/aws-health/b9835593-aa0a-4d02-845b-33d5cec4da42": [ @@ -1484,7 +1480,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -1524,7 +1520,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -1564,7 +1560,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -1604,7 +1600,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -1644,7 +1640,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -1659,91 +1655,6 @@ } ], "/aws-efs/29b8f0ce-b573-48e3-98ab-52e4231d2225": [ - { - "sections": [ - "Amazon DocumentDB monitoring integration", - "Important", - "Activate integration", - "Configuration and polling", - "Find and use data", - "Metric data", - "DocumentDB Cluster data", - "DocumentDB ClusterByRole data", - "DocumentDB Instance data" - ], - "title": "Amazon DocumentDB monitoring integration", - "type": "docs", - "tags": [ - "AWS integrations list", - "Amazon integrations", - "Integrations" - ], - "external_id": "2254fd18215db2b24649f91f164707a6b0a253fb", - "image": "", - "url": "https://docs.newrelic.com/docs/infrastructure/amazon-integrations/aws-integrations-list/aws-documentdb-monitoring-integration/", - "published_at": "2022-02-14T10:53:00Z", - "updated_at": "2022-02-14T10:52:59Z", - "document_type": "page", - "popularity": 1, - "body": "Important Enable the AWS CloudWatch Metric Streams integration to monitor all CloudWatch metrics from your AWS services, including custom namespaces. Individual integrations are no longer our recommended option. New Relic offers an integration for reporting your Amazon DocumentDB data. This document explains how to activate this integration and describes the data that can be reported. Activate integration To enable this integration follow standard procedures to Connect AWS services to New Relic. Configuration and polling You can change the polling frequency and filter data using configuration options. Default polling information for the Amazon DocumentDB integration: New Relic polling interval: 5 minutes Amazon CloudWatch data interval: 1 minute Find and use data To find your integration data, go to one.newrelic.com > Infrastructure > AWS and select an integration. You can query and explore your data using the following event type: Entity Event Type Provider Cluster AwsDocDbClusterSample AwsDocDbCluster ClusterByRole AwsDocDbClusterByRoleSample AwsDocDbClusterByRole Instance AwsDocDbInstanceSample AwsDocDbInstance For more on how to use your data, see Understand and use integration data. Metric data This integration collects Amazon DocumentDB data for Cluster, ClusterByRole, and Instance. DocumentDB Cluster data Metric Unit Description CPUUtilization Percent The percentage of CPU used by an instance. DatabaseConnections Count The number of connections to an instance. FreeLocalStorage Bytes This metric reports the amount of storage available to each instance for temporary tables and logs. This value depends on the instance class. You can increase the amount of free storage space for an instance by choosing a larger instance class for your instance. FreeableMemory Bytes The amount of available random access memory, in bytes. WriteIOPS CountPerSecond The average number of disk write I/O operations per second. Read and write IOPS are reported separately, on 1-minute intervals. ReadIOPS CountPerSecond The average number of disk read I/O operations per second. Amazon DocumentDB reports read and write IOPS separately, and on one-minute intervals. WriteLatency Milliseconds The average amount of time, in milliseconds, taken per disk I/O operation. ReadLatency Milliseconds The average amount of time taken per disk I/O operation, in milliseconds. WriteThroughput Bytes The average number of bytes written to disk per second. ReadThroughput Bytes The average number of bytes read from disk per second. NetworkThroughput Bytes The amount of network throughput, in bytes per second, both received from and transmitted to clients by each instance in the Amazon DocumentDB cluster. This throughput doesn't include network traffic between instances in the cluster and the cluster volume. NetworkTransmitThroughput Bytes The amount of network throughput, in bytes per second, sent to clients by each instance in the cluster. This throughput doesn't include network traffic between instances in the cluster and the cluster volume. NetworkReceiveThroughput Bytes The amount of network throughput, in bytes per second, received from clients by each instance in the cluster. This throughput doesn't include network traffic between instances in the cluster and the cluster volume. EngineUptime Seconds The amount of time, in seconds, that the instance has been running. SwapUsage Bytes The amount of swap space used on the instance. DiskQueueDepth Count The number of outstanding read/write requests waiting to access the disk. BufferCacheHitRatio Percent The percentage of requests that are served by the buffer cache. BackupRetentionPeriodStorageUsed Bytes The total amount of backup storage in GiB used to support the point-in-time restore feature within the Amazon DocumentDB's retention window. Included in the total reported by the TotalBackupStorageBilled metric. Computed separately for each Amazon DocumentDB cluster. SnapshotStorageUsed Bytes The total amount of backup storage in GiB consumed by all snapshots for a given Amazon DocumentDB cluster outside its backup retention window. Included in the total reported by the TotalBackupStorageBilled metric. Computed separately for each Amazon DocumentDB cluster. TotalBackupStorageBilled Bytes The total amount of backup storage in GiB for which you are billed for a given Amazon DocumentDB cluster. Includes the backup storage measured by the BackupRetentionPeriodStorageUsed and SnapshotStorageUsed metrics. Computed separately for each Amazon DocumentDB cluster. DBInstanceReplicaLag Milliseconds The amount of lag, in milliseconds, when replicating updates from the primary instance to a replica instance. DBClusterReplicaLagMaximum Milliseconds The maximum amount of lag, in milliseconds, between the primary instance and each Amazon DocumentDB instance in the cluster. DBClusterReplicaLagMinimum Milliseconds The minimum amount of lag, in milliseconds, between the primary instance and each replica instance in the cluster. VolumeBytesUsed Bytes The amount of storage, in bytes, used by your cluster, in bytes. This value affects the cost of the cluster (for pricing information, see the Amazon DocumentDB product page). VolumeReadIOPs Count The average number of billed read I/O operations from a cluster volume, reported at 5-minute intervals. Billed read operations are calculated at the cluster volume level, aggregated from all instances in the cluster, and then reported at 5-minute intervals. The value is calculated by taking the value of the read operations metric over a 5-minute period. You can determine the amount of billed read operations per second by taking the value of the billed read operations metric and dividing by 300 seconds. For example, if the billed read operations returns 13,686, then the billed read operations per second is 45 (13,686 / 300 = 45.62). You accrue billed read operations for queries that request database pages that are not present in the buffer cache and therefore must be loaded from storage. You might see spikes in billed read operations as query results are read from storage and then loaded into the buffer cache. VolumeWriteIOPs Count The average number of billed write I/O operations from a cluster volume, reported at 5-minute intervals. Billed write operations are calculated at the cluster volume level, aggregated from all instances in the cluster, and then reported at 5-minute intervals. The value is calculated by taking the value of the write operations metric over a 5-minute period. You can determine the amount of billed write operations per second by taking the value of the billed write operations metric and dividing by 300 seconds. For example, if the billed write operations returns 13,686, then the billed write operations per second is 45 (13,686 / 300 = 45.62). DocumentDB ClusterByRole data Metric Unit Description CPUUtilization Percent The percentage of CPU used by an instance. DatabaseConnections Count The number of connections to an instance. FreeLocalStorage Bytes This metric reports the amount of storage available to each instance for temporary tables and logs. This value depends on the instance class. You can increase the amount of free storage space for an instance by choosing a larger instance class for your instance. FreeableMemory Bytes The amount of available random access memory, in bytes. WriteIOPS CountPerSecond The average number of disk write I/O operations per second. Read and write IOPS are reported separately, on 1-minute intervals. ReadIOPS CountPerSecond The average number of disk read I/O operations per second. Amazon DocumentDB reports read and write IOPS separately, and on one-minute intervals. WriteLatency Milliseconds The average amount of time, in milliseconds, taken per disk I/O operation. ReadLatency Milliseconds The average amount of time taken per disk I/O operation, in milliseconds. WriteThroughput Bytes The average number of bytes written to disk per second. ReadThroughput Bytes The average number of bytes read from disk per second. NetworkThroughput Bytes The amount of network throughput, in bytes per second, both received from and transmitted to clients by each instance in the Amazon DocumentDB cluster. This throughput doesn't include network traffic between instances in the cluster and the cluster volume. NetworkTransmitThroughput Bytes The amount of network throughput, in bytes per second, sent to clients by each instance in the cluster. This throughput doesn't include network traffic between instances in the cluster and the cluster volume. NetworkReceiveThroughput Bytes The amount of network throughput, in bytes per second, received from clients by each instance in the cluster. This throughput doesn't include network traffic between instances in the cluster and the cluster volume. EngineUptime Seconds The amount of time, in seconds, that the instance has been running. SwapUsage Bytes The amount of swap space used on the instance. DiskQueueDepth Count The number of outstanding read/write requests waiting to access the disk. BufferCacheHitRatio Percent The percentage of requests that are served by the buffer cache. BackupRetentionPeriodStorageUsed Bytes The total amount of backup storage in GiB used to support the point-in-time restore feature within the Amazon DocumentDB's retention window. Included in the total reported by the TotalBackupStorageBilled metric. Computed separately for each Amazon DocumentDB cluster. SnapshotStorageUsed Bytes The total amount of backup storage in GiB consumed by all snapshots for a given Amazon DocumentDB cluster outside its backup retention window. Included in the total reported by the TotalBackupStorageBilled metric. Computed separately for each Amazon DocumentDB cluster. TotalBackupStorageBilled Bytes The total amount of backup storage in GiB for which you are billed for a given Amazon DocumentDB cluster. Includes the backup storage measured by the BackupRetentionPeriodStorageUsed and SnapshotStorageUsed metrics. Computed separately for each Amazon DocumentDB cluster. DBInstanceReplicaLag Milliseconds The amount of lag, in milliseconds, when replicating updates from the primary instance to a replica instance. DBClusterReplicaLagMaximum Milliseconds The maximum amount of lag, in milliseconds, between the primary instance and each Amazon DocumentDB instance in the cluster. DBClusterReplicaLagMinimum Milliseconds The minimum amount of lag, in milliseconds, between the primary instance and each replica instance in the cluster. VolumeBytesUsed Bytes The amount of storage, in bytes, used by your cluster, in bytes. This value affects the cost of the cluster (for pricing information, see the Amazon DocumentDB product page). VolumeReadIOPs Count The average number of billed read I/O operations from a cluster volume, reported at 5-minute intervals. Billed read operations are calculated at the cluster volume level, aggregated from all instances in the cluster, and then reported at 5-minute intervals. The value is calculated by taking the value of the read operations metric over a 5-minute period. You can determine the amount of billed read operations per second by taking the value of the billed read operations metric and dividing by 300 seconds. For example, if the billed read operations returns 13,686, then the billed read operations per second is 45 (13,686 / 300 = 45.62). You accrue billed read operations for queries that request database pages that are not present in the buffer cache and therefore must be loaded from storage. You might see spikes in billed read operations as query results are read from storage and then loaded into the buffer cache. VolumeWriteIOPs Count The average number of billed write I/O operations from a cluster volume, reported at 5-minute intervals. Billed write operations are calculated at the cluster volume level, aggregated from all instances in the cluster, and then reported at 5-minute intervals. The value is calculated by taking the value of the write operations metric over a 5-minute period. You can determine the amount of billed write operations per second by taking the value of the billed write operations metric and dividing by 300 seconds. For example, if the billed write operations returns 13,686, then the billed write operations per second is 45 (13,686 / 300 = 45.62). DocumentDB Instance data Metric Unit Description CPUUtilization Percent The percentage of CPU used by an instance. DatabaseConnections Count The number of connections to an instance. FreeLocalStorage Bytes This metric reports the amount of storage available to each instance for temporary tables and logs. This value depends on the instance class. You can increase the amount of free storage space for an instance by choosing a larger instance class for your instance. FreeableMemory Bytes The amount of available random access memory, in bytes. WriteIOPS CountPerSecond The average number of disk write I/O operations per second. Read and write IOPS are reported separately, on 1-minute intervals. ReadIOPS CountPerSecond The average number of disk read I/O operations per second. Amazon DocumentDB reports read and write IOPS separately, and on one-minute intervals. WriteLatency Milliseconds The average amount of time, in milliseconds, taken per disk I/O operation. ReadLatency Milliseconds The average amount of time taken per disk I/O operation, in milliseconds. WriteThroughput Bytes The average number of bytes written to disk per second. ReadThroughput Bytes The average number of bytes read from disk per second. NetworkThroughput Bytes The amount of network throughput, in bytes per second, both received from and transmitted to clients by each instance in the Amazon DocumentDB cluster. This throughput doesn't include network traffic between instances in the cluster and the cluster volume. NetworkTransmitThroughput Bytes The amount of network throughput, in bytes per second, sent to clients by each instance in the cluster. This throughput doesn't include network traffic between instances in the cluster and the cluster volume. NetworkReceiveThroughput Bytes The amount of network throughput, in bytes per second, received from clients by each instance in the cluster. This throughput doesn't include network traffic between instances in the cluster and the cluster volume. EngineUptime Seconds The amount of time, in seconds, that the instance has been running. SwapUsage Bytes The amount of swap space used on the instance. DiskQueueDepth Count The number of outstanding read/write requests waiting to access the disk. BufferCacheHitRatio Percent The percentage of requests that are served by the buffer cache. BackupRetentionPeriodStorageUsed Bytes The total amount of backup storage in GiB used to support the point-in-time restore feature within the Amazon DocumentDB's retention window. Included in the total reported by the TotalBackupStorageBilled metric. Computed separately for each Amazon DocumentDB cluster. SnapshotStorageUsed Bytes The total amount of backup storage in GiB consumed by all snapshots for a given Amazon DocumentDB cluster outside its backup retention window. Included in the total reported by the TotalBackupStorageBilled metric. Computed separately for each Amazon DocumentDB cluster. TotalBackupStorageBilled Bytes The total amount of backup storage in GiB for which you are billed for a given Amazon DocumentDB cluster. Includes the backup storage measured by the BackupRetentionPeriodStorageUsed and SnapshotStorageUsed metrics. Computed separately for each Amazon DocumentDB cluster. DBInstanceReplicaLag Milliseconds The amount of lag, in milliseconds, when replicating updates from the primary instance to a replica instance. DBClusterReplicaLagMaximum Milliseconds The maximum amount of lag, in milliseconds, between the primary instance and each Amazon DocumentDB instance in the cluster. DBClusterReplicaLagMinimum Milliseconds The minimum amount of lag, in milliseconds, between the primary instance and each replica instance in the cluster. VolumeBytesUsed Bytes The amount of storage, in bytes, used by your cluster, in bytes. This value affects the cost of the cluster (for pricing information, see the Amazon DocumentDB product page). VolumeReadIOPs Count The average number of billed read I/O operations from a cluster volume, reported at 5-minute intervals. Billed read operations are calculated at the cluster volume level, aggregated from all instances in the cluster, and then reported at 5-minute intervals. The value is calculated by taking the value of the read operations metric over a 5-minute period. You can determine the amount of billed read operations per second by taking the value of the billed read operations metric and dividing by 300 seconds. For example, if the billed read operations returns 13,686, then the billed read operations per second is 45 (13,686 / 300 = 45.62). You accrue billed read operations for queries that request database pages that are not present in the buffer cache and therefore must be loaded from storage. You might see spikes in billed read operations as query results are read from storage and then loaded into the buffer cache. VolumeWriteIOPs Count The average number of billed write I/O operations from a cluster volume, reported at 5-minute intervals. Billed write operations are calculated at the cluster volume level, aggregated from all instances in the cluster, and then reported at 5-minute intervals. The value is calculated by taking the value of the write operations metric over a 5-minute period. You can determine the amount of billed write operations per second by taking the value of the billed write operations metric and dividing by 300 seconds. For example, if the billed write operations returns 13,686, then the billed write operations per second is 45 (13,686 / 300 = 45.62).", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 142.893, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "title": "Amazon DocumentDB monitoring integration", - "sections": "Amazon DocumentDB monitoring integration", - "tags": "AWS integrations list", - "body": "Important Enable the AWS CloudWatch Metric Streams integration to monitor all CloudWatch metrics from your AWS services, including custom namespaces. Individual integrations are no longer our recommended option. New Relic offers an integration for reporting your Amazon DocumentDB data" - }, - "id": "617da7ad64441fbd20fbc7da" - }, - { - "sections": [ - "Amazon Neptune monitoring integration", - "Important", - "Activate integration", - "Configuration and polling", - "View and use data", - "Metric data", - "Neptune Instance data", - "Neptune Cluster data", - "Neptune ClusterByRole data", - "Neptune DatabaseClass data" - ], - "title": "Amazon Neptune monitoring integration", - "type": "docs", - "tags": [ - "AWS integrations list", - "Amazon integrations", - "Integrations" - ], - "external_id": "b9c9de74ee9724c8e21e901c0bfca7da703ab850", - "image": "", - "url": "https://docs.newrelic.com/docs/infrastructure/amazon-integrations/aws-integrations-list/aws-neptune-monitoring-integration/", - "published_at": "2022-02-14T09:29:00Z", - "updated_at": "2022-02-14T09:29:00Z", - "document_type": "page", - "popularity": 1, - "body": "Important Enable the AWS CloudWatch Metric Streams integration to monitor all CloudWatch metrics from your AWS services, including custom namespaces. Individual integrations are no longer our recommended option. New Relic infrastructure integrations include an integration for reporting your Amazon Neptune data to New Relic. This document explains how to activate this integration and describes the data that can be reported. Activate integration To enable this integration, follow standard procedures to connect AWS services to New Relic. Configuration and polling You can change the polling frequency and filter data using configuration options. Default polling information for the Amazon Neptune integration: New Relic polling interval: 5 minutes Amazon CloudWatch data interval: 1 minute View and use data To view and use your integration data, go to one.newrelic.com > Infrastructure > AWS and select an integration. You can query and explore your data using the following event type: Entity Event type Provider Instance AwsNeptuneInstanceSample AwsNeptuneInstance Cluster AwsNeptuneClusterSample AwsNeptuneCluster ClusterByRole AwsNeptuneClusterByRoleSample AwsNeptuneClusterByRole DatabaseClass AwsNeptuneDatabaseClassSample AwsNeptuneDatabaseClass Metric data This integration collects Amazon Neptune data for Instance, Cluster, ClusterByRole and DatabaseClass. Neptune Instance data Metric Unit Description backupRetentionPeriodStorageUsed Bytes The total amount of backup storage, in bytes, used to support from the Neptune DB cluster's backup retention window. Included in the total reported by the TotalBackupStorageBilled metric. clusterReplicaLag Milliseconds For a read replica, the amount of lag when replicating updates from the primary instance, in milliseconds. clusterReplicaLagMaximum Milliseconds The maximum amount of lag between the primary instance and each Neptune DB instance in the DB cluster, in milliseconds. clusterReplicaLagMinimum Milliseconds The minimum amount of lag between the primary instance and each Neptune DB instance in the DB cluster, in milliseconds. cpuUtilization Percent The percentage of CPU utilization. engineUptime Seconds The amount of time that the instance has been running, in seconds. freeableMemory Bytes The amount of available random access memory, in bytes. gremlinRequestsPerSec Count Number of requests per second to the Gremlin engine. gremlinWebSocketOpenConnections Count The number of open WebSocket connections to Neptune. loaderRequestsPerSec Count Number of loader requests per second. mainRequestQueuePendingRequests Count The number of requests waiting in the input queue pending execution. Neptune starts throttling requests when they exceed the maximum queue capacity. networkReceiveThroughput BytesPerSecond The incoming (Receive) network traffic on the DB instance, including both customer database traffic and Neptune traffic used for monitoring and replication, in bytes/second. networkThroughput BytesPerSecond The amount of network throughput both received from and transmitted to clients by each instance in the Neptune DB cluster, in bytes per second. This throughput doesn't include network traffic between instances in the DB cluster and the cluster volume. networkTransmitThroughput BytesPerSecond The outgoing (Transmit) network traffic on the DB instance, including both customer database traffic and Neptune traffic used for monitoring and replication, in bytes/second. numTxCommitted Count The number of transactions successfully committed per second. numTxOpened Count The number of transactions opened on the server per second. numTxRolledBack Count The number of transactions per second rolled back on the server because of errors. snapshotStorageUsed Bytes The total amount of backup storage consumed by all snapshots for a Neptune DB cluster outside its backup retention window, in bytes. Included in the total reported by the TotalBackupStorageBilled metric. sparqlRequestsPerSec Count The number of requests per second to the SPARQL engine. totalBackupStorageBilled Bytes The total amount of backup storage for which you are billed for a given Neptune DB cluster, in bytes. Includes the backup storage measured by the BackupRetentionPeriodStorageUsed and SnapshotStorageUsed metrics. totalRequestsPerSec Count The total number of requests per second to the server from all sources. totalClientErrorsPerSec Count The total number per second of requests that errored out because of client-side issues. totalServerErrorsPerSec Count The total number per second of requests that errored out on the server because of internal failures. volumeBytesUsed Bytes The amount of storage used by your Neptune DB instance, in bytes. This value affects the cost of the Neptune DB cluster. volumeReadIOPs Count The average number of billed read I/O operations from a cluster volume, reported at 5-minute intervals. Billed read operations are calculated at the cluster volume level, aggregated from all instances in the Neptune DB cluster, and then reported at 5-minute intervals. volumeWriteIOPs Count The average number of write disk I/O operations to the cluster volume, reported at 5-minute intervals. Neptune Cluster data Metric Unit Description backupRetentionPeriodStorageUsed Bytes The total amount of backup storage, in bytes, used to support from the Neptune DB cluster's backup retention window. Included in the total reported by the TotalBackupStorageBilled metric. clusterReplicaLag Milliseconds For a read replica, the amount of lag when replicating updates from the primary instance, in milliseconds. clusterReplicaLagMaximum Milliseconds The maximum amount of lag between the primary instance and each Neptune DB instance in the DB cluster, in milliseconds. clusterReplicaLagMinimum Milliseconds The minimum amount of lag between the primary instance and each Neptune DB instance in the DB cluster, in milliseconds. cpuUtilization Percent The percentage of CPU utilization. engineUptime Seconds The amount of time that the instance has been running, in seconds. freeableMemory Bytes The amount of available random access memory, in bytes. gremlinRequestsPerSec Count Number of requests per second to the Gremlin engine. gremlinWebSocketOpenConnections Count The number of open WebSocket connections to Neptune. loaderRequestsPerSec Count Number of loader requests per second. mainRequestQueuePendingRequests Count The number of requests waiting in the input queue pending execution. Neptune starts throttling requests when they exceed the maximum queue capacity. networkReceiveThroughput BytesPerSecond The incoming (Receive) network traffic on the DB instance, including both customer database traffic and Neptune traffic used for monitoring and replication, in bytes/second. networkThroughput BytesPerSecond The amount of network throughput both received from and transmitted to clients by each instance in the Neptune DB cluster, in bytes per second. This throughput doesn't include network traffic between instances in the DB cluster and the cluster volume. networkTransmitThroughput BytesPerSecond The outgoing (Transmit) network traffic on the DB instance, including both customer database traffic and Neptune traffic used for monitoring and replication, in bytes/second. numTxCommitted Count The number of transactions successfully committed per second. numTxOpened Count The number of transactions opened on the server per second. numTxRolledBack Count The number of transactions per second rolled back on the server because of errors. snapshotStorageUsed Bytes The total amount of backup storage consumed by all snapshots for a Neptune DB cluster outside its backup retention window, in bytes. Included in the total reported by the TotalBackupStorageBilled metric. sparqlRequestsPerSec Count The number of requests per second to the SPARQL engine. totalBackupStorageBilled Bytes The total amount of backup storage for which you are billed for a given Neptune DB cluster, in bytes. Includes the backup storage measured by the BackupRetentionPeriodStorageUsed and SnapshotStorageUsed metrics. totalRequestsPerSec Count The total number of requests per second to the server from all sources. totalClientErrorsPerSec Count The total number per second of requests that errored out because of client-side issues. totalServerErrorsPerSec Count The total number per second of requests that errored out on the server because of internal failures. volumeBytesUsed Bytes The amount of storage used by your Neptune DB instance, in bytes. This value affects the cost of the Neptune DB cluster. volumeReadIOPs Count The average number of billed read I/O operations from a cluster volume, reported at 5-minute intervals. Billed read operations are calculated at the cluster volume level, aggregated from all instances in the Neptune DB cluster, and then reported at 5-minute intervals. volumeWriteIOPs Count The average number of write disk I/O operations to the cluster volume, reported at 5-minute intervals. Neptune ClusterByRole data Metric Unit Description backupRetentionPeriodStorageUsed Bytes The total amount of backup storage, in bytes, used to support from the Neptune DB cluster's backup retention window. Included in the total reported by the TotalBackupStorageBilled metric. clusterReplicaLag Milliseconds For a read replica, the amount of lag when replicating updates from the primary instance, in milliseconds. clusterReplicaLagMaximum Milliseconds The maximum amount of lag between the primary instance and each Neptune DB instance in the DB cluster, in milliseconds. clusterReplicaLagMinimum Milliseconds The minimum amount of lag between the primary instance and each Neptune DB instance in the DB cluster, in milliseconds. cpuUtilization Percent The percentage of CPU utilization. engineUptime Seconds The amount of time that the instance has been running, in seconds. freeableMemory Bytes The amount of available random access memory, in bytes. gremlinRequestsPerSec Count Number of requests per second to the Gremlin engine. gremlinWebSocketOpenConnections Count The number of open WebSocket connections to Neptune. loaderRequestsPerSec Count Number of loader requests per second. mainRequestQueuePendingRequests Count The number of requests waiting in the input queue pending execution. Neptune starts throttling requests when they exceed the maximum queue capacity. networkReceiveThroughput BytesPerSecond The incoming (Receive) network traffic on the DB instance, including both customer database traffic and Neptune traffic used for monitoring and replication, in bytes/second. networkThroughput BytesPerSecond The amount of network throughput both received from and transmitted to clients by each instance in the Neptune DB cluster, in bytes per second. This throughput doesn't include network traffic between instances in the DB cluster and the cluster volume. networkTransmitThroughput BytesPerSecond The outgoing (Transmit) network traffic on the DB instance, including both customer database traffic and Neptune traffic used for monitoring and replication, in bytes/second. numTxCommitted Count The number of transactions successfully committed per second. numTxOpened Count The number of transactions opened on the server per second. numTxRolledBack Count The number of transactions per second rolled back on the server because of errors. snapshotStorageUsed Bytes The total amount of backup storage consumed by all snapshots for a Neptune DB cluster outside its backup retention window, in bytes. Included in the total reported by the TotalBackupStorageBilled metric. sparqlRequestsPerSec Count The number of requests per second to the SPARQL engine. totalBackupStorageBilled Bytes The total amount of backup storage for which you are billed for a given Neptune DB cluster, in bytes. Includes the backup storage measured by the BackupRetentionPeriodStorageUsed and SnapshotStorageUsed metrics. totalRequestsPerSec Count The total number of requests per second to the server from all sources. totalClientErrorsPerSec Count The total number per second of requests that errored out because of client-side issues. totalServerErrorsPerSec Count The total number per second of requests that errored out on the server because of internal failures. volumeBytesUsed Bytes The amount of storage used by your Neptune DB instance, in bytes. This value affects the cost of the Neptune DB cluster. volumeReadIOPs Count The average number of billed read I/O operations from a cluster volume, reported at 5-minute intervals. Billed read operations are calculated at the cluster volume level, aggregated from all instances in the Neptune DB cluster, and then reported at 5-minute intervals. volumeWriteIOPs Count The average number of write disk I/O operations to the cluster volume, reported at 5-minute intervals. Neptune DatabaseClass data Metric Unit Description backupRetentionPeriodStorageUsed Bytes The total amount of backup storage, in bytes, used to support from the Neptune DB cluster's backup retention window. Included in the total reported by the TotalBackupStorageBilled metric. clusterReplicaLag Milliseconds For a read replica, the amount of lag when replicating updates from the primary instance, in milliseconds. clusterReplicaLagMaximum Milliseconds The maximum amount of lag between the primary instance and each Neptune DB instance in the DB cluster, in milliseconds. clusterReplicaLagMinimum Milliseconds The minimum amount of lag between the primary instance and each Neptune DB instance in the DB cluster, in milliseconds. cpuUtilization Percent The percentage of CPU utilization. engineUptime Seconds The amount of time that the instance has been running, in seconds. freeableMemory Bytes The amount of available random access memory, in bytes. gremlinRequestsPerSec Count Number of requests per second to the Gremlin engine. gremlinWebSocketOpenConnections Count The number of open WebSocket connections to Neptune. loaderRequestsPerSec Count Number of loader requests per second. mainRequestQueuePendingRequests Count The number of requests waiting in the input queue pending execution. Neptune starts throttling requests when they exceed the maximum queue capacity. networkReceiveThroughput BytesPerSecond The incoming (Receive) network traffic on the DB instance, including both customer database traffic and Neptune traffic used for monitoring and replication, in bytes/second. networkThroughput BytesPerSecond The amount of network throughput both received from and transmitted to clients by each instance in the Neptune DB cluster, in bytes per second. This throughput doesn't include network traffic between instances in the DB cluster and the cluster volume. networkTransmitThroughput BytesPerSecond The outgoing (Transmit) network traffic on the DB instance, including both customer database traffic and Neptune traffic used for monitoring and replication, in bytes/second. numTxCommitted Count The number of transactions successfully committed per second. numTxOpened Count The number of transactions opened on the server per second. numTxRolledBack Count The number of transactions per second rolled back on the server because of errors. snapshotStorageUsed Bytes The total amount of backup storage consumed by all snapshots for a Neptune DB cluster outside its backup retention window, in bytes. Included in the total reported by the TotalBackupStorageBilled metric. sparqlRequestsPerSec Count The number of requests per second to the SPARQL engine. totalBackupStorageBilled Bytes The total amount of backup storage for which you are billed for a given Neptune DB cluster, in bytes. Includes the backup storage measured by the BackupRetentionPeriodStorageUsed and SnapshotStorageUsed metrics. totalRequestsPerSec Count The total number of requests per second to the server from all sources. totalClientErrorsPerSec Count The total number per second of requests that errored out because of client-side issues. totalServerErrorsPerSec Count The total number per second of requests that errored out on the server because of internal failures. volumeBytesUsed Bytes The amount of storage used by your Neptune DB instance, in bytes. This value affects the cost of the Neptune DB cluster. volumeReadIOPs Count The average number of billed read I/O operations from a cluster volume, reported at 5-minute intervals. Billed read operations are calculated at the cluster volume level, aggregated from all instances in the Neptune DB cluster, and then reported at 5-minute intervals. volumeWriteIOPs Count The average number of write disk I/O operations to the cluster volume, reported at 5-minute intervals.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.87129, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "title": "Amazon Neptune monitoring integration", - "sections": "Amazon Neptune monitoring integration", - "tags": "AWS integrations list", - "body": "Important Enable the AWS CloudWatch Metric Streams integration to monitor all CloudWatch metrics from your AWS services, including custom namespaces. Individual integrations are no longer our recommended option. New Relic infrastructure integrations include an integration for reporting your Amazon" - }, - "id": "617d6d5d64441ffa79fbca19" - }, { "sections": [ "AWS Kinesis Data Streams", @@ -1771,7 +1682,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20995, "_version": null, "_explanation": null, "sort": null, @@ -1811,7 +1722,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20995, "_version": null, "_explanation": null, "sort": null, @@ -1851,7 +1762,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20995, "_version": null, "_explanation": null, "sort": null, @@ -1863,6 +1774,86 @@ "body": "What's included AWS EMR installation docs Monitor AWS EMR by connecting AWS to New Relic. Doc What is AWS EMR? Process and manage big data inputs from popular frameworks. Get started! Start monitoring AWS EMR by connecting Amazon Web Services (AWS) to New Relic! Check out our AWS EMR documentation" }, "id": "61566b7b64441f603909962d" + }, + { + "sections": [ + "AWS Billing", + "What's included", + "AWS Billing installation docs", + "What is AWS Billing?", + "Get started!", + "More info" + ], + "title": "AWS Billing", + "type": "quickstarts", + "tags": [ + "aws", + "amazon web services" + ], + "quick_start_name": "AWS Billing", + "external_id": "748c52cca409971c66b5151e152f477fd124606a", + "image": "", + "url": "https://developer.newrelic.com/instant-observability/aws-billing/c7f15a54-e243-4e23-95dd-ff08937041ed/", + "published_at": "2022-02-04T02:03:04Z", + "updated_at": "2021-10-06T13:59:37Z", + "document_type": "page", + "popularity": 1, + "body": "What's included AWS Billing installation docs Monitor AWS Billing by connecting AWS to New Relic. Doc What is AWS Billing? Service that can be used to pay your AWS bill, monitor your usage, and analyze and control your costs. Get started! Start monitoring AWS Billing by connecting Amazon Web Services (AWS) to New Relic! Check out our AWS Billing documentation to instrument your cloud service and manage the stability, scalability, and reliability of your systems with New Relic's infrastructure monitoring capabilities. More info Check out the documentation to learn more about New Relic monitoring for AWS Billing. This quickstart doesn't include any dashboards . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo This quickstart doesn't include any alerts . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo AWS Billing observability quickstart contains 1 data source . This is how you'll get your data into New Relic. AWS Billing installation docs Monitor AWS Billing by connecting AWS to New Relic. Docs", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 141.2099, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "title": "AWS Billing", + "sections": "AWS Billing", + "tags": "amazon web services", + "quick_start_name": "AWS Billing", + "body": "What's included AWS Billing installation docs Monitor AWS Billing by connecting AWS to New Relic. Doc What is AWS Billing? Service that can be used to pay your AWS bill, monitor your usage, and analyze and control your costs. Get started! Start monitoring AWS Billing by connecting Amazon Web" + }, + "id": "61566b7be7b9d2c9178de374" + }, + { + "sections": [ + "AWS Elastic Beanstalk", + "What's included", + "AWS Elastic Beanstalk installation docs", + "What is AWS Elastic Beanstalk?", + "Get started!", + "More info" + ], + "title": "AWS Elastic Beanstalk", + "type": "quickstarts", + "tags": [ + "aws", + "amazon web services" + ], + "quick_start_name": "AWS Elastic Beanstalk", + "external_id": "5f1d325b5bea77e6f550976224e344225a073f7b", + "image": "", + "url": "https://developer.newrelic.com/instant-observability/aws-elastic-beanstalk/b232f332-e27a-4d34-a966-e7f678d35145/", + "published_at": "2022-02-07T01:45:28Z", + "updated_at": "2021-10-06T13:59:37Z", + "document_type": "page", + "popularity": 1, + "body": "What's included AWS Elastic Beanstalk installation docs Monitor AWS Elastic Beanstalk by connecting AWS to New Relic. Doc What is AWS Elastic Beanstalk? Dynamic service that allows easy deployment and scalability for your applications on AWS. Get started! Start monitoring AWS Elastic Beanstalk by connecting Amazon Web Services (AWS) to New Relic! Check out our AWS Elastic Beanstalk documentation to instrument your cloud service and manage the stability, scalability, and reliability of your systems with New Relic's infrastructure monitoring capabilities. More info Check out the documentation to learn more about New Relic monitoring for AWS Elastic Beanstalk. This quickstart doesn't include any dashboards . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo This quickstart doesn't include any alerts . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo AWS Elastic Beanstalk observability quickstart contains 1 data source . This is how you'll get your data into New Relic. AWS Elastic Beanstalk installation docs Monitor AWS Elastic Beanstalk by connecting AWS to New Relic. Docs", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 141.2099, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "title": "AWS Elastic Beanstalk", + "sections": "AWS Elastic Beanstalk", + "tags": "amazon web services", + "quick_start_name": "AWS Elastic Beanstalk", + "body": " by connecting Amazon Web Services (AWS) to New Relic! Check out our AWS Elastic Beanstalk documentation to instrument your cloud service and manage the stability, scalability, and reliability of your systems with New Relic's infrastructure monitoring capabilities. More info Check out the documentation" + }, + "id": "61566912196a678b22b70db3" } ], "/aws-elemental-mediapackage-vod/5e7868a3-df26-4c03-bad1-39b99ca89841": [ @@ -1893,7 +1884,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -1933,7 +1924,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -1973,7 +1964,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -2013,7 +2004,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -2053,7 +2044,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -2095,7 +2086,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -2135,7 +2126,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -2175,7 +2166,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -2190,83 +2181,83 @@ }, { "sections": [ - "AWS Billing", + "AWS Auto Scaling", "What's included", - "AWS Billing installation docs", - "What is AWS Billing?", + "AWS Auto Scaling installation docs", + "What is AWS Auto Scaling?", "Get started!", "More info" ], - "title": "AWS Billing", + "title": "AWS Auto Scaling", "type": "quickstarts", "tags": [ "aws", "amazon web services" ], - "quick_start_name": "AWS Billing", - "external_id": "748c52cca409971c66b5151e152f477fd124606a", + "quick_start_name": "AWS Auto Scaling", + "external_id": "80047bd0bf951243f585ba8b0982f0aa92c39810", "image": "", - "url": "https://developer.newrelic.com/instant-observability/aws-billing/c7f15a54-e243-4e23-95dd-ff08937041ed/", - "published_at": "2022-02-04T02:03:04Z", - "updated_at": "2021-10-06T13:59:37Z", + "url": "https://developer.newrelic.com/instant-observability/aws-auto-scaling/e6a1654d-2e00-4956-a2da-39f1f5f5f5e9/", + "published_at": "2022-02-10T01:40:38Z", + "updated_at": "2021-10-06T13:59:42Z", "document_type": "page", "popularity": 1, - "body": "What's included AWS Billing installation docs Monitor AWS Billing by connecting AWS to New Relic. Doc What is AWS Billing? Service that can be used to pay your AWS bill, monitor your usage, and analyze and control your costs. Get started! Start monitoring AWS Billing by connecting Amazon Web Services (AWS) to New Relic! Check out our AWS Billing documentation to instrument your cloud service and manage the stability, scalability, and reliability of your systems with New Relic's infrastructure monitoring capabilities. More info Check out the documentation to learn more about New Relic monitoring for AWS Billing. This quickstart doesn't include any dashboards . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo This quickstart doesn't include any alerts . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo AWS Billing observability quickstart contains 1 data source . This is how you'll get your data into New Relic. AWS Billing installation docs Monitor AWS Billing by connecting AWS to New Relic. Docs", + "body": "What's included AWS Auto Scaling installation docs Monitor AWS Auto Scaling by connecting AWS to New Relic. Doc What is AWS Auto Scaling? Launch or terminate EC2 instances automatically, adapting capacity based on user-defined policies, schedules, and health checks. Get started! Start monitoring AWS Auto Scaling by connecting Amazon Web Services (AWS) to New Relic! Check out our AWS Auto Scaling documentation to instrument your cloud service and manage the stability, scalability, and reliability of your systems with New Relic's infrastructure monitoring capabilities. More info Check out the documentation to learn more about New Relic monitoring for AWS Auto Scaling. This quickstart doesn't include any dashboards . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo This quickstart doesn't include any alerts . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo AWS Auto Scaling observability quickstart contains 1 data source . This is how you'll get your data into New Relic. AWS Auto Scaling installation docs Monitor AWS Auto Scaling by connecting AWS to New Relic. Docs", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, "highlight": { - "title": "AWS Billing", - "sections": "AWS Billing", + "title": "AWS Auto Scaling", + "sections": "AWS Auto Scaling", "tags": "amazon web services", - "quick_start_name": "AWS Billing", - "body": "What's included AWS Billing installation docs Monitor AWS Billing by connecting AWS to New Relic. Doc What is AWS Billing? Service that can be used to pay your AWS bill, monitor your usage, and analyze and control your costs. Get started! Start monitoring AWS Billing by connecting Amazon Web" + "quick_start_name": "AWS Auto Scaling", + "body": " AWS Auto Scaling by connecting Amazon Web Services (AWS) to New Relic! Check out our AWS Auto Scaling documentation to instrument your cloud service and manage the stability, scalability, and reliability of your systems with New Relic's infrastructure monitoring capabilities. More info Check out" }, - "id": "61566b7be7b9d2c9178de374" + "id": "61566b7b28ccbcfa59f2145d" }, { "sections": [ - "AWS Elastic Beanstalk", + "AWS Billing", "What's included", - "AWS Elastic Beanstalk installation docs", - "What is AWS Elastic Beanstalk?", + "AWS Billing installation docs", + "What is AWS Billing?", "Get started!", "More info" ], - "title": "AWS Elastic Beanstalk", + "title": "AWS Billing", "type": "quickstarts", "tags": [ "aws", "amazon web services" ], - "quick_start_name": "AWS Elastic Beanstalk", - "external_id": "5f1d325b5bea77e6f550976224e344225a073f7b", + "quick_start_name": "AWS Billing", + "external_id": "748c52cca409971c66b5151e152f477fd124606a", "image": "", - "url": "https://developer.newrelic.com/instant-observability/aws-elastic-beanstalk/b232f332-e27a-4d34-a966-e7f678d35145/", - "published_at": "2022-02-07T01:45:28Z", + "url": "https://developer.newrelic.com/instant-observability/aws-billing/c7f15a54-e243-4e23-95dd-ff08937041ed/", + "published_at": "2022-02-04T02:03:04Z", "updated_at": "2021-10-06T13:59:37Z", "document_type": "page", "popularity": 1, - "body": "What's included AWS Elastic Beanstalk installation docs Monitor AWS Elastic Beanstalk by connecting AWS to New Relic. Doc What is AWS Elastic Beanstalk? Dynamic service that allows easy deployment and scalability for your applications on AWS. Get started! Start monitoring AWS Elastic Beanstalk by connecting Amazon Web Services (AWS) to New Relic! Check out our AWS Elastic Beanstalk documentation to instrument your cloud service and manage the stability, scalability, and reliability of your systems with New Relic's infrastructure monitoring capabilities. More info Check out the documentation to learn more about New Relic monitoring for AWS Elastic Beanstalk. This quickstart doesn't include any dashboards . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo This quickstart doesn't include any alerts . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo AWS Elastic Beanstalk observability quickstart contains 1 data source . This is how you'll get your data into New Relic. AWS Elastic Beanstalk installation docs Monitor AWS Elastic Beanstalk by connecting AWS to New Relic. Docs", + "body": "What's included AWS Billing installation docs Monitor AWS Billing by connecting AWS to New Relic. Doc What is AWS Billing? Service that can be used to pay your AWS bill, monitor your usage, and analyze and control your costs. Get started! Start monitoring AWS Billing by connecting Amazon Web Services (AWS) to New Relic! Check out our AWS Billing documentation to instrument your cloud service and manage the stability, scalability, and reliability of your systems with New Relic's infrastructure monitoring capabilities. More info Check out the documentation to learn more about New Relic monitoring for AWS Billing. This quickstart doesn't include any dashboards . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo This quickstart doesn't include any alerts . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo AWS Billing observability quickstart contains 1 data source . This is how you'll get your data into New Relic. AWS Billing installation docs Monitor AWS Billing by connecting AWS to New Relic. Docs", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20692, "_version": null, "_explanation": null, "sort": null, "highlight": { - "title": "AWS Elastic Beanstalk", - "sections": "AWS Elastic Beanstalk", + "title": "AWS Billing", + "sections": "AWS Billing", "tags": "amazon web services", - "quick_start_name": "AWS Elastic Beanstalk", - "body": " by connecting Amazon Web Services (AWS) to New Relic! Check out our AWS Elastic Beanstalk documentation to instrument your cloud service and manage the stability, scalability, and reliability of your systems with New Relic's infrastructure monitoring capabilities. More info Check out the documentation" + "quick_start_name": "AWS Billing", + "body": "What's included AWS Billing installation docs Monitor AWS Billing by connecting AWS to New Relic. Doc What is AWS Billing? Service that can be used to pay your AWS bill, monitor your usage, and analyze and control your costs. Get started! Start monitoring AWS Billing by connecting Amazon Web" }, - "id": "61566912196a678b22b70db3" + "id": "61566b7be7b9d2c9178de374" } ], "/aws-elastic-beanstalk/b232f332-e27a-4d34-a966-e7f678d35145": [ @@ -2297,7 +2288,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -2337,7 +2328,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -2377,7 +2368,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -2417,7 +2408,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -2457,7 +2448,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -2498,7 +2489,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 194.57608, + "_score": 193.38779, "_version": null, "_explanation": null, "sort": null, @@ -2563,7 +2554,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 173.65556, + "_score": 163.53116, "_version": null, "_explanation": null, "sort": null, @@ -2601,7 +2592,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 154.93489, + "_score": 154.82718, "_version": null, "_explanation": null, "sort": null, @@ -2646,7 +2637,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 154.86427, + "_score": 144.02089, "_version": null, "_explanation": null, "sort": null, @@ -2685,7 +2676,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20995, "_version": null, "_explanation": null, "sort": null, @@ -2726,7 +2717,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 190.39679, + "_score": 176.81328, "_version": null, "_explanation": null, "sort": null, @@ -2769,7 +2760,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.7772, + "_score": 167.9541, "_version": null, "_explanation": null, "sort": null, @@ -2814,7 +2805,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 172.26666, + "_score": 160.51024, "_version": null, "_explanation": null, "sort": null, @@ -2852,7 +2843,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 171.41086, + "_score": 159.069, "_version": null, "_explanation": null, "sort": null, @@ -2866,45 +2857,43 @@ }, { "sections": [ - "AWS Connect monitoring integration", + "Amazon Transit Gateway monitoring integration", "Important", "Activate integration", "Configuration and polling", "Find and use data", "Metric data", - "Connect ContactFlow data", - "Connect Instance data", - "Connect Queue data" + "Transit Gateway TransitGateway data" ], - "title": "AWS Connect monitoring integration", + "title": "Amazon Transit Gateway monitoring integration", "type": "docs", "tags": [ "AWS integrations list", "Amazon integrations", "Integrations" ], - "external_id": "b2059ff3f22c28a6697290f776d1792350e1f385", + "external_id": "3a87c9231443d9fc38bfae0f40c8320bb28009d3", "image": "", - "url": "https://docs.newrelic.com/docs/infrastructure/amazon-integrations/aws-integrations-list/aws-connect-monitoring-integration/", - "published_at": "2022-02-15T19:47:17Z", - "updated_at": "2022-02-15T19:47:17Z", + "url": "https://docs.newrelic.com/docs/infrastructure/amazon-integrations/aws-integrations-list/amazon-transit-gateway-monitoring-integration/", + "published_at": "2022-02-15T09:45:53Z", + "updated_at": "2022-02-15T09:45:53Z", "document_type": "page", "popularity": 1, - "body": "Important Enable the AWS CloudWatch Metric Streams integration to monitor all CloudWatch metrics from your AWS services, including custom namespaces. Individual integrations are no longer our recommended option. New Relic infrastructure integrations include an integration for reporting your AWS Connect data to New Relic. Here we explain how to activate the integration and what data it collects. Activate integration To enable this integration follow standard procedures to Connect AWS services to New Relic. Configuration and polling You can change the polling frequency and filter data using configuration options. Default polling information for the AWS Connect integration: New Relic polling interval: 5 minutes Amazon CloudWatch data interval: 1 minute Find and use data To find your integration data, go to one.newrelic.com > Infrastructure > AWS and select an integration. Data is attached to the following event type: Entity Event Type Provider ContactFlow AwsConnectContactFlowSample AwsConnectContactFlow Instance AwsConnectInstanceSample AwsConnectInstance Queue AwsConnectQueueSample AwsConnectQueue For more on how to use your data, see Understand and use integration data. Metric data This integration collects AWS Connect data for ContactFlow, Instance and Queue. Connect ContactFlow data Metric Unit Description callRecordingUploadError Count The number of call recordings that failed to upload to the Amazon S3 bucket configured for your instance. contactFlowErrors Count The number of times the error branch for a contact flow was executed. contactFlowFatalErrors Count The number of times a contact flow failed to execute due to a system error. misconfiguredPhoneNumbers Count The number of calls that failed because the phone number is not associated with a contact flow. publicSigningKeyUsage Count The number of times a contact flow security key (public signing key) was used to encrypt customer input in a contact flow. Connect Instance data Metric Unit Description callsBreachingConcurrencyQuota Count The total number of voice calls that exceeded the concurrent calls quota for the instance. callsPerInterval Count The number of voice calls, both inbound and outbound, received or placed per second in the instance. concurrentCalls Count The number of concurrent active voice calls in the instance. concurrentCallsPercentage Percent The percentage of the concurrent active voice calls service quota used in the instance. missedCalls Count The number of voice calls that were missed by agents during the refresh interval selected throttledCalls Count The number of voice calls that were rejected because the rate of calls per second exceeded the maximum supported quota. Connect Queue data Metric Unit Description callBackNotDialableNumber Count The number of times a queued callback to a customer could not be dialed because the customer's number is in a country for which outbound calls are not allowed for the instance. longestQueueWaitTime Seconds The longest amount of time, in seconds, that a contact waited in a queue. queueCapacityExceededError Count The number of calls that were rejected because the queue was full. queueSize Count The number of contacts in the queue.", + "body": "Important Enable the AWS CloudWatch Metric Streams integration to monitor all CloudWatch metrics from your AWS services, including custom namespaces. Individual integrations are no longer our recommended option. New Relic infrastructure integrations include an integration for reporting your AWS Transit Gateway data to New Relic. Here we explain how to activate the integration and what data it collects. Activate integration To enable this integration follow standard procedures to Connect AWS services to New Relic. Configuration and polling You can change the polling frequency and filter data using configuration options. Default polling information for the AWS Transit Gateway integration: New Relic polling interval: 5 minutes Amazon CloudWatch data interval: 1 minute Find and use data To find your integration data, go to one.newrelic.com > Infrastructure > AWS and select an integration. Data is attached to the following event type: Entity Event Type Provider TransitGateway AwsTransitGatewayTransitGatewaySample AwsTransitGatewayTransitGateway For more on how to use your data, see Understand and use integration data. Metric data This integration collects AWS Transit Gateway data for TransitGateway. Transit Gateway TransitGateway data Metric Unit Description bytesIn Count The number of bytes received by the transit gateway. bytesOut Count The number of bytes sent from the transit gateway. packetsIn Count The number of packets received by the transit gateway. packetsOut Count The number of packets sent by the transit gateway. packetDropCountBlackhole Count The number of packets dropped because they matched a blackhole route. packetDropCountNoRoute Count The number of packets dropped because they did not match a route. bytesDropCountBlackhole Count The number of bytes dropped because they matched a blackhole route. bytesDropCountNoRoute Count The number of bytes dropped because they did not match a route.", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 169.90015, + "_score": 158.18307, "_version": null, "_explanation": null, "sort": null, "highlight": { - "title": "AWS Connect monitoring integration", - "sections": "AWS Connect monitoring integration", + "title": "Amazon Transit Gateway monitoring integration", + "sections": "Amazon Transit Gateway monitoring integration", "tags": "AWS integrations list", "body": "Important Enable the AWS CloudWatch Metric Streams integration to monitor all CloudWatch metrics from your AWS services, including custom namespaces. Individual integrations are no longer our recommended option. New Relic infrastructure integrations include an integration for reporting your AWS" }, - "id": "617da673e7b9d274a0c06009" + "id": "617da500196a676aaef7c7a0" } ], "/aws-api-gateway/b2fe368b-6e09-4f63-9c10-e888aa22a292": [ @@ -2936,7 +2925,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.61609, + "_score": 189.59981, "_version": null, "_explanation": null, "sort": null, @@ -2977,7 +2966,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.51884, + "_score": 189.5039, "_version": null, "_explanation": null, "sort": null, @@ -3018,7 +3007,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.51877, + "_score": 189.50383, "_version": null, "_explanation": null, "sort": null, @@ -3059,7 +3048,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.51848, + "_score": 189.50354, "_version": null, "_explanation": null, "sort": null, @@ -3099,7 +3088,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20995, "_version": null, "_explanation": null, "sort": null, @@ -3141,7 +3130,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -3181,7 +3170,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -3221,7 +3210,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -3261,7 +3250,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -3301,7 +3290,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -3346,7 +3335,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 159.02142, + "_score": 147.45116, "_version": null, "_explanation": null, "sort": null, @@ -3389,7 +3378,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 154.99384, + "_score": 144.44461, "_version": null, "_explanation": null, "sort": null, @@ -3401,47 +3390,6 @@ }, "id": "617da50164441f41c4fbf2ea" }, - { - "sections": [ - "AWS Elemental MediaConvert monitoring integration", - "Important", - "Activate the integration", - "Configuration and polling", - "Find and use data", - "Metric data", - "Elemental MediaConvert Queue data", - "Elemental MediaConvert Operation data" - ], - "title": "AWS Elemental MediaConvert monitoring integration", - "type": "docs", - "tags": [ - "AWS integrations list", - "Amazon integrations", - "Integrations" - ], - "external_id": "4e3af1ef7b8a79842f689fde5561e79fa9acfbb0", - "image": "", - "url": "https://docs.newrelic.com/docs/infrastructure/amazon-integrations/aws-integrations-list/aws-elemental-mediaconvert-monitoring-integration/", - "published_at": "2022-02-14T11:37:19Z", - "updated_at": "2022-02-14T11:37:19Z", - "document_type": "page", - "popularity": 1, - "body": "Important Enable the AWS CloudWatch Metric Streams integration to monitor all CloudWatch metrics from your AWS services, including custom namespaces. Individual integrations are no longer our recommended option. New Relic includes an integration for reporting your AWS Elemental MediaConvert data to our platform. Here we explain how to activate the integration and what data it collects. Activate the integration To enable this integration follow standard procedures to Connect AWS services. Configuration and polling You can change the polling frequency and filter data using configuration options. Default polling information for the AWS Elemental MediaConvert integration: New Relic polling interval: 5 minutes Amazon CloudWatch data interval: 1 minute Find and use data To find your integration data in Infrastructure, go to one.newrelic.com > Infrastructure > AWS and select an integration. Data is attached to the following event type: Entity Event Type Provider Queue AwsMediaConvertQueueSample AwsMediaConvertQueue Operation AwsMediaConvertOperationSample AwsMediaConvertOperation For more on how to use your data, see Understand and use integration data. Metric data This integration collects AWS Elemental MediaConvert data for Queue and Operation. Elemental MediaConvert Queue data Metric Unit Description audioOutputDuration Seconds The number of seconds of audio-only output for a queue. sDOutputDuration Seconds The number of seconds of standard definition (SD) output for a queue. hDOutputDuration Seconds The number of seconds of high-definition (HD) output for a queue. uHDOutputDuration Seconds The number of seconds of ultra-high-definition (UHD) output for a queue. 8KOutputDuration Seconds The number of seconds of 8K output for a queue. jobsCompletedCount Count The number of jobs completed in this queue. jobsErroredCount Count The number of jobs that failed because of invalid inputs, such as a request to transcode a file that is not in the specified input bucket. standbyTime Seconds The number of seconds before AWS Elemental MediaConvert starts transcoding a job. transcodingTime Seconds The number of seconds for AWS Elemental MediaConvert to complete transcoding. Elemental MediaConvert Operation data Metric Unit Description errors Count errors", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 147.11035, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "title": "AWS Elemental MediaConvert monitoring integration", - "sections": "AWS Elemental MediaConvert monitoring integration", - "tags": "AWS integrations list", - "body": "Important Enable the AWS CloudWatch Metric Streams integration to monitor all CloudWatch metrics from your AWS services, including custom namespaces. Individual integrations are no longer our recommended option. New Relic includes an integration for reporting your AWS Elemental MediaConvert data" - }, - "id": "617d6cbe64441f6988fbd4e7" - }, { "sections": [ "AWS Kinesis Data Streams", @@ -3469,7 +3417,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20995, "_version": null, "_explanation": null, "sort": null, @@ -3509,7 +3457,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20995, "_version": null, "_explanation": null, "sort": null, @@ -3521,6 +3469,46 @@ "body": "What's included AWS Health installation docs Monitor AWS Health by connecting AWS to New Relic. Doc What is AWS Health? Oversight and alerts for how AWS outages and maintenance may affect your services. Get started! Start monitoring AWS Health by connecting Amazon Web Services (AWS) to New Relic" }, "id": "61566912196a679a39b70d4f" + }, + { + "sections": [ + "AWS EMR", + "What's included", + "AWS EMR installation docs", + "What is AWS EMR?", + "Get started!", + "More info" + ], + "title": "AWS EMR", + "type": "quickstarts", + "tags": [ + "aws", + "amazon web services" + ], + "quick_start_name": "AWS EMR", + "external_id": "239ca030e9a6e12b688167c0a2c68c35a9f081ca", + "image": "", + "url": "https://developer.newrelic.com/instant-observability/aws-emr/d84051ba-365f-4542-8db9-0829384ea55a/", + "published_at": "2022-02-07T01:45:29Z", + "updated_at": "2021-10-06T14:00:38Z", + "document_type": "page", + "popularity": 1, + "body": "What's included AWS EMR installation docs Monitor AWS EMR by connecting AWS to New Relic. Doc What is AWS EMR? Process and manage big data inputs from popular frameworks. Get started! Start monitoring AWS EMR by connecting Amazon Web Services (AWS) to New Relic! Check out our AWS EMR documentation to instrument your cloud service and manage the stability, scalability, and reliability of your systems with New Relic's infrastructure monitoring capabilities. More info Check out the documentation to learn more about New Relic monitoring for AWS EMR. This quickstart doesn't include any dashboards . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo This quickstart doesn't include any alerts . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo AWS EMR observability quickstart contains 1 data source . This is how you'll get your data into New Relic. AWS EMR installation docs Monitor AWS EMR by connecting AWS to New Relic. Docs", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 141.20995, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "title": "AWS EMR", + "sections": "AWS EMR", + "tags": "amazon web services", + "quick_start_name": "AWS EMR", + "body": "What's included AWS EMR installation docs Monitor AWS EMR by connecting AWS to New Relic. Doc What is AWS EMR? Process and manage big data inputs from popular frameworks. Get started! Start monitoring AWS EMR by connecting Amazon Web Services (AWS) to New Relic! Check out our AWS EMR documentation" + }, + "id": "61566b7b64441f603909962d" } ], "/aws-lambda/6908e6c3-fed5-4c6c-a3fc-f85e467cefa1": [ @@ -3550,7 +3538,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 155.14026, + "_score": 144.5479, "_version": null, "_explanation": null, "sort": null, @@ -3589,7 +3577,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20995, "_version": null, "_explanation": null, "sort": null, @@ -3629,7 +3617,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20995, "_version": null, "_explanation": null, "sort": null, @@ -3669,7 +3657,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20995, "_version": null, "_explanation": null, "sort": null, @@ -3709,7 +3697,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.2099, "_version": null, "_explanation": null, "sort": null, @@ -3752,7 +3740,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.2279, + "_score": 177.22371, "_version": null, "_explanation": null, "sort": null, @@ -3793,7 +3781,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.2279, + "_score": 177.22371, "_version": null, "_explanation": null, "sort": null, @@ -3834,7 +3822,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.22784, + "_score": 177.22365, "_version": null, "_explanation": null, "sort": null, @@ -3875,7 +3863,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.22777, + "_score": 177.22359, "_version": null, "_explanation": null, "sort": null, @@ -3916,7 +3904,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.09012, + "_score": 177.0878, "_version": null, "_explanation": null, "sort": null, @@ -3958,7 +3946,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -3998,7 +3986,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -4038,7 +4026,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -4078,7 +4066,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -4118,7 +4106,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -4164,7 +4152,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 82.06964, + "_score": 76.61313, "_version": null, "_explanation": null, "sort": null, @@ -4213,7 +4201,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.643425, + "_score": 76.2801, "_version": null, "_explanation": null, "sort": null, @@ -4261,7 +4249,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.63191, + "_score": 76.27109, "_version": null, "_explanation": null, "sort": null, @@ -4303,7 +4291,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.62691, + "_score": 76.26717, "_version": null, "_explanation": null, "sort": null, @@ -4346,7 +4334,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.5959, + "_score": 76.24292, "_version": null, "_explanation": null, "sort": null, @@ -4388,7 +4376,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.61609, + "_score": 189.5944, "_version": null, "_explanation": null, "sort": null, @@ -4429,7 +4417,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.51884, + "_score": 189.49849, "_version": null, "_explanation": null, "sort": null, @@ -4470,7 +4458,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.51877, + "_score": 189.49841, "_version": null, "_explanation": null, "sort": null, @@ -4511,7 +4499,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.51848, + "_score": 189.49814, "_version": null, "_explanation": null, "sort": null, @@ -4551,7 +4539,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -4594,7 +4582,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.2279, + "_score": 177.22371, "_version": null, "_explanation": null, "sort": null, @@ -4635,7 +4623,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.2279, + "_score": 177.22371, "_version": null, "_explanation": null, "sort": null, @@ -4676,7 +4664,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.22784, + "_score": 177.22365, "_version": null, "_explanation": null, "sort": null, @@ -4717,7 +4705,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.22777, + "_score": 177.22359, "_version": null, "_explanation": null, "sort": null, @@ -4758,7 +4746,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.09012, + "_score": 177.0878, "_version": null, "_explanation": null, "sort": null, @@ -4802,7 +4790,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 120.249985, + "_score": 113.49166, "_version": null, "_explanation": null, "sort": null, @@ -4851,7 +4839,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 84.977295, + "_score": 80.729095, "_version": null, "_explanation": null, "sort": null, @@ -4892,7 +4880,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 82.06964, + "_score": 76.61224, "_version": null, "_explanation": null, "sort": null, @@ -4941,7 +4929,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.643425, + "_score": 76.27921, "_version": null, "_explanation": null, "sort": null, @@ -4989,7 +4977,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.63191, + "_score": 76.2702, "_version": null, "_explanation": null, "sort": null, @@ -5038,7 +5026,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 160.212, + "_score": 152.05011, "_version": null, "_explanation": null, "sort": null, @@ -5049,6 +5037,50 @@ }, "id": "6107859064441f8baf47abd9" }, + { + "sections": [ + "Understand and manage data ingest", + "Data ingestion UI", + "Data ingestion sources", + "Understand where data is coming from", + "How ingested data is broken down", + "Set alerts for data use", + "Adjust your data ingest", + "Drop unwanted data", + "Disable agents and integrations", + "Adjust APM data ingest", + "Adjust infrastructure data ingest", + "Adjust log data ingest" + ], + "title": "Understand and manage data ingest", + "type": "docs", + "tags": [ + "Manage data", + "Ingest and manage data" + ], + "external_id": "f1c46558041c874a2076f781fa975a21105f60e4", + "image": "https://docs.newrelic.com/static/82d3c36157005ac0efe40cd6e10fe06b/b23ad/data-facet.png", + "url": "https://docs.newrelic.com/docs/data-apis/manage-data/manage-data-coming-new-relic/", + "published_at": "2022-02-14T02:25:03Z", + "updated_at": "2022-02-04T12:05:12Z", + "document_type": "page", + "popularity": 1, + "body": "When you report data to New Relic, we process what we receive and apply data dropping and transformation rules. Then we count the bytes needed to represent your data in a standard format, like JSON. If you're on our New Relic One pricing model, you're charged for the number of bytes written to our database that are above and beyond the free per-month amount. If you're trying to estimate the cost of your data ingest, see Estimate data ingest. Data ingestion UI To learn how to easily analyze the data your account is ingesting, watch this short video (3:18 minutes). The Data ingestion tab is located in the Data management UI. The Data ingestion UI shows your ingest rates for the time period specified by the time picker in the upper right. The page shows your daily average GBs, and the total GBs for that time range. You can toggle between an overall ingest view and an account view to identify which of your accounts are sending the most data. The page also provides the current month-to-date, and the projected end-of-month total ingest rates. With this information, you can proactively manage your data ingest in various ways. To see the underlying NRQL query that is used to generate the chart, click View query. From the account dropdown, select Manage your data, and then select Data ingestion. For how to get more details about ingested data, see Get ingest details. Data ingestion sources The data ingestion UI chart shows you a high level breakdown of your billable data usage. The table below explains those sources. In this table, \"usage metric group\" refers to the value of that source's usageMetric attribute value on the NrConsumption event. Data sources Description Metrics In the data ingestion chart, Metrics is a combination of two types of metrics: metric timeslice data and dimensional metrics. Usage metric group: MetricsBytes. Metric timeslice data averages to one-hour periods after eight days. After 90 days, the permanent metric data continues to be stored in one-hour periods. We currently store the raw metric data for 30 days. You are only billed for the initial ingest volume. You are not billed for subsequent rollups. APM This includes APM events, like Transaction and TransactionError. Usage metric group: ApmEventsBytes. Infrastructure Includes several categories of infrastructure monitoring events, described below. Infrastructure host data. Usage metric group:InfraHostBytes. Information related to your servers and virtual machines coming from infrastructure agents, including storage and network data. Infrastructure process data stored in ProcessSample. Usage metric group: InfraProcessBytes. Data related to each process running on the hosts running the infrastructure agent. This feature is turned off by default. For more information, see Process metrics. Infrastructure integrations. Usage metric group: InfraIntegrationBytes. Performance data related to applications and services, typically managed by the customer, including data related to Docker containers, Windows services, Nagios checks, and cloud integrations such as managed services in AWS, Azure, and GCP. Logging Includes logs and any Log_ custom data partition created. Usage metric group: LoggingBytes. Log records are stored on the Log data type by default. Additional custom data partitions will create new data types, which are always prefixed with Log_ and are counted as part of the overall set of log data stored. With LogExtendedRecord, log messages longer than 4KB are split into multiple events that, when needed, are stitched together to display the original message; this reduces the size of message data. As of September 2021, log storage as blobs replaces LogExtendedRecord. With blob storage, NRDB can store up to 128,000 bytes for any attribute, not just messages. For more information, see our log blobs docs. Default Custom events. Usage metric group: CustomEventsBytes Mobile events Mobile events, including the general Mobile event, MobileRequestError, MobileBreadcrumb, MobileSession, MobileHandledException, MobileCrash. Usage metric group: MobileEventsBytes. Tracing Usage metric group: TracingBytes. This includes the Span data type and OpenTelemetry's SpanEvent. You are not charged for DistributedTraceSummary events. Browser events Browser events, including the namespaces of Browser, Browser:EventLog, Browser:JSErrors, and PcvPerf (PageView timing). Usage metric group: BrowserEventsBytes. Lambda AWS Lambda events. Usage metric group: ServerlessBytes. Understand where data is coming from You can inspect your data ingest to gain more information about your ingest health. From the data ingestion UI page, you can analyze your usage in more detail. Spending some time understanding your ingested data and where it comes from and how it changes over time can be valuable. You'll know your ingest baselines, and you'll be able to more easily spot anomalies, like ingest spikes, and understand their source. On the data ingestion chart, time is on the X axis and the bands representing data sources are located along the Y axis. Click on a data source band you want to inspect at the spot in the X axis that corresponds with the date you want to investigate. This image shows the data source band for June 15 right before it's clicked. A modal opens with the account, data source, and facet selected. You can do a handful of things on this page: Change the account, data source, or facet you want to drill down into. Change the time range. Review the results of the query in chart form. The chart displays the top 15 results for the facet query. Open the NRQL query in the Query builder where you'll find additional facets that you can use. For more about creating more detailed queries: Learn some NRQL basics. See some example usage-related queries. How ingested data is broken down Some of the usage data in this UI can vary depending on your account. This information is intended to help you understand how we're working with your ingest data: The chart on the Data ingestion page shows data usage for a little longer time frame than that covered by your retention settings for each data ingest source. If you choose a date outside of your designated retention period for an ingest source, you'll get the message that there's no chart data available. Select a more recent date to fix this problem. If you inspect a data source for an account that has less than a terrabyte of data, we compute the ingest volume over a 24 hour period; otherwise, we compute it for a one hour period. The ingest value provided on the main Data ingestion chart will be slightly different from that reflected during inspection. This is because our facet computation is an estimate. Set alerts for data use For how to set alerts that will notify you when you're reaching data ingest levels you don't want to cross, see Query and alert on usage data. For example, you might set an alert on logs, which can accumulate quickly in an active system. Adjust your data ingest Here are some ideas for managing your data: Drop unwanted data On ingest, we apply data dropping rules so you won't be charged for data that's not useful. Learn how to set additional data dropping rules yourself. For how to drop log data, see Drop log data. Disable agents and integrations If you have agents or integrations that you don't need at all, you can uninstall/delete those tools. For instructions, see the specific docs for that tool. Adjust APM data ingest Options for adjusting APM data include: Configure the sampling rate for transaction events. Set appropriate Apdex scores, for example, for frequency of traces. Optimize custom instrumentation and/or custom metrics. Adjust infrastructure data ingest Options for adjusting infrastructure data include: Adjust sampling rate for network, storage, and system events. Disable process metrics. Adjust polling intervals: Polling for cloud integrations. For on-host integrations: edit the configuration file for a specific integration. Control the reporting of specific attributes. Manage Kubernetes events integration. Adjust log data ingest Options for adjusting log data ingest include: Use the log forwarder to filter log events on the sending side. Drop log data, either via the UI or with NerdGraph.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 139.61166, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "title": "Understand and manage data ingest", + "sections": "Adjust log data ingest", + "tags": "Ingest and manage data", + "body": " containers, Windows services, Nagios checks, and cloud integrations such as managed services in AWS, Azure, and GCP. Logging Includes logs and any Log_<value> custom data partition created. Usage metric group: LoggingBytes. Log records are stored on the Log data type by default. Additional custom" + }, + "id": "603e978228ccbc8984eba79e" + }, { "sections": [ "Forward logs from Google Cloud Platform", @@ -5091,7 +5123,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 145.78674, + "_score": 137.88327, "_version": null, "_explanation": null, "sort": null, @@ -5103,50 +5135,6 @@ }, "id": "61740cdc28ccbc5833c6a8c2" }, - { - "sections": [ - "Understand and manage data ingest", - "Data ingestion UI", - "Data ingestion sources", - "Understand where data is coming from", - "How ingested data is broken down", - "Set alerts for data use", - "Adjust your data ingest", - "Drop unwanted data", - "Disable agents and integrations", - "Adjust APM data ingest", - "Adjust infrastructure data ingest", - "Adjust log data ingest" - ], - "title": "Understand and manage data ingest", - "type": "docs", - "tags": [ - "Manage data", - "Ingest and manage data" - ], - "external_id": "f1c46558041c874a2076f781fa975a21105f60e4", - "image": "https://docs.newrelic.com/static/82d3c36157005ac0efe40cd6e10fe06b/b23ad/data-facet.png", - "url": "https://docs.newrelic.com/docs/data-apis/manage-data/manage-data-coming-new-relic/", - "published_at": "2022-02-14T02:25:03Z", - "updated_at": "2022-02-04T12:05:12Z", - "document_type": "page", - "popularity": 1, - "body": "When you report data to New Relic, we process what we receive and apply data dropping and transformation rules. Then we count the bytes needed to represent your data in a standard format, like JSON. If you're on our New Relic One pricing model, you're charged for the number of bytes written to our database that are above and beyond the free per-month amount. If you're trying to estimate the cost of your data ingest, see Estimate data ingest. Data ingestion UI To learn how to easily analyze the data your account is ingesting, watch this short video (3:18 minutes). The Data ingestion tab is located in the Data management UI. The Data ingestion UI shows your ingest rates for the time period specified by the time picker in the upper right. The page shows your daily average GBs, and the total GBs for that time range. You can toggle between an overall ingest view and an account view to identify which of your accounts are sending the most data. The page also provides the current month-to-date, and the projected end-of-month total ingest rates. With this information, you can proactively manage your data ingest in various ways. To see the underlying NRQL query that is used to generate the chart, click View query. From the account dropdown, select Manage your data, and then select Data ingestion. For how to get more details about ingested data, see Get ingest details. Data ingestion sources The data ingestion UI chart shows you a high level breakdown of your billable data usage. The table below explains those sources. In this table, \"usage metric group\" refers to the value of that source's usageMetric attribute value on the NrConsumption event. Data sources Description Metrics In the data ingestion chart, Metrics is a combination of two types of metrics: metric timeslice data and dimensional metrics. Usage metric group: MetricsBytes. Metric timeslice data averages to one-hour periods after eight days. After 90 days, the permanent metric data continues to be stored in one-hour periods. We currently store the raw metric data for 30 days. You are only billed for the initial ingest volume. You are not billed for subsequent rollups. APM This includes APM events, like Transaction and TransactionError. Usage metric group: ApmEventsBytes. Infrastructure Includes several categories of infrastructure monitoring events, described below. Infrastructure host data. Usage metric group:InfraHostBytes. Information related to your servers and virtual machines coming from infrastructure agents, including storage and network data. Infrastructure process data stored in ProcessSample. Usage metric group: InfraProcessBytes. Data related to each process running on the hosts running the infrastructure agent. This feature is turned off by default. For more information, see Process metrics. Infrastructure integrations. Usage metric group: InfraIntegrationBytes. Performance data related to applications and services, typically managed by the customer, including data related to Docker containers, Windows services, Nagios checks, and cloud integrations such as managed services in AWS, Azure, and GCP. Logging Includes logs and any Log_ custom data partition created. Usage metric group: LoggingBytes. Log records are stored on the Log data type by default. Additional custom data partitions will create new data types, which are always prefixed with Log_ and are counted as part of the overall set of log data stored. With LogExtendedRecord, log messages longer than 4KB are split into multiple events that, when needed, are stitched together to display the original message; this reduces the size of message data. As of September 2021, log storage as blobs replaces LogExtendedRecord. With blob storage, NRDB can store up to 128,000 bytes for any attribute, not just messages. For more information, see our log blobs docs. Default Custom events. Usage metric group: CustomEventsBytes Mobile events Mobile events, including the general Mobile event, MobileRequestError, MobileBreadcrumb, MobileSession, MobileHandledException, MobileCrash. Usage metric group: MobileEventsBytes. Tracing Usage metric group: TracingBytes. This includes the Span data type and OpenTelemetry's SpanEvent. You are not charged for DistributedTraceSummary events. Browser events Browser events, including the namespaces of Browser, Browser:EventLog, Browser:JSErrors, and PcvPerf (PageView timing). Usage metric group: BrowserEventsBytes. Lambda AWS Lambda events. Usage metric group: ServerlessBytes. Understand where data is coming from You can inspect your data ingest to gain more information about your ingest health. From the data ingestion UI page, you can analyze your usage in more detail. Spending some time understanding your ingested data and where it comes from and how it changes over time can be valuable. You'll know your ingest baselines, and you'll be able to more easily spot anomalies, like ingest spikes, and understand their source. On the data ingestion chart, time is on the X axis and the bands representing data sources are located along the Y axis. Click on a data source band you want to inspect at the spot in the X axis that corresponds with the date you want to investigate. This image shows the data source band for June 15 right before it's clicked. A modal opens with the account, data source, and facet selected. You can do a handful of things on this page: Change the account, data source, or facet you want to drill down into. Change the time range. Review the results of the query in chart form. The chart displays the top 15 results for the facet query. Open the NRQL query in the Query builder where you'll find additional facets that you can use. For more about creating more detailed queries: Learn some NRQL basics. See some example usage-related queries. How ingested data is broken down Some of the usage data in this UI can vary depending on your account. This information is intended to help you understand how we're working with your ingest data: The chart on the Data ingestion page shows data usage for a little longer time frame than that covered by your retention settings for each data ingest source. If you choose a date outside of your designated retention period for an ingest source, you'll get the message that there's no chart data available. Select a more recent date to fix this problem. If you inspect a data source for an account that has less than a terrabyte of data, we compute the ingest volume over a 24 hour period; otherwise, we compute it for a one hour period. The ingest value provided on the main Data ingestion chart will be slightly different from that reflected during inspection. This is because our facet computation is an estimate. Set alerts for data use For how to set alerts that will notify you when you're reaching data ingest levels you don't want to cross, see Query and alert on usage data. For example, you might set an alert on logs, which can accumulate quickly in an active system. Adjust your data ingest Here are some ideas for managing your data: Drop unwanted data On ingest, we apply data dropping rules so you won't be charged for data that's not useful. Learn how to set additional data dropping rules yourself. For how to drop log data, see Drop log data. Disable agents and integrations If you have agents or integrations that you don't need at all, you can uninstall/delete those tools. For instructions, see the specific docs for that tool. Adjust APM data ingest Options for adjusting APM data include: Configure the sampling rate for transaction events. Set appropriate Apdex scores, for example, for frequency of traces. Optimize custom instrumentation and/or custom metrics. Adjust infrastructure data ingest Options for adjusting infrastructure data include: Adjust sampling rate for network, storage, and system events. Disable process metrics. Adjust polling intervals: Polling for cloud integrations. For on-host integrations: edit the configuration file for a specific integration. Control the reporting of specific attributes. Manage Kubernetes events integration. Adjust log data ingest Options for adjusting log data ingest include: Use the log forwarder to filter log events on the sending side. Drop log data, either via the UI or with NerdGraph.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 142.6196, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "title": "Understand and manage data ingest", - "sections": "Adjust log data ingest", - "tags": "Ingest and manage data", - "body": " containers, Windows services, Nagios checks, and cloud integrations such as managed services in AWS, Azure, and GCP. Logging Includes logs and any Log_<value> custom data partition created. Usage metric group: LoggingBytes. Log records are stored on the Log data type by default. Additional custom" - }, - "id": "603e978228ccbc8984eba79e" - }, { "sections": [ "New Relic's data management hub", @@ -5174,7 +5162,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 137.46387, + "_score": 134.56442, "_version": null, "_explanation": null, "sort": null, @@ -5221,7 +5209,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 135.67694, + "_score": 128.91484, "_version": null, "_explanation": null, "sort": null, @@ -5263,7 +5251,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 496.72217, + "_score": 469.76855, "_version": null, "_explanation": null, "sort": null, @@ -5320,7 +5308,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 488.7296, + "_score": 462.98602, "_version": null, "_explanation": null, "sort": null, @@ -5361,7 +5349,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 328.72095, + "_score": 308.13437, "_version": null, "_explanation": null, "sort": null, @@ -5398,7 +5386,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 313.41904, + "_score": 295.99622, "_version": null, "_explanation": null, "sort": null, @@ -5460,7 +5448,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 166.43884, + "_score": 165.55818, "_version": null, "_explanation": null, "sort": null, @@ -5506,7 +5494,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 103.48776, + "_score": 96.60191, "_version": null, "_explanation": null, "sort": null, @@ -5680,7 +5668,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 99.52762, + "_score": 93.745605, "_version": null, "_explanation": null, "sort": null, @@ -5720,7 +5708,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 99.31943, + "_score": 92.71203, "_version": null, "_explanation": null, "sort": null, @@ -5764,7 +5752,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 97.18211, + "_score": 90.59553, "_version": null, "_explanation": null, "sort": null, @@ -5805,7 +5793,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 91.7493, + "_score": 85.80809, "_version": null, "_explanation": null, "sort": null, @@ -5847,7 +5835,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 73.19882, + "_score": 68.4103, "_version": null, "_explanation": null, "sort": null, @@ -5888,7 +5876,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 72.23144, + "_score": 67.64889, "_version": null, "_explanation": null, "sort": null, @@ -6063,7 +6051,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 68.924126, + "_score": 65.015686, "_version": null, "_explanation": null, "sort": null, @@ -6107,7 +6095,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 66.939064, + "_score": 63.41249, "_version": null, "_explanation": null, "sort": null, @@ -6158,7 +6146,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 59.409058, + "_score": 55.499584, "_version": null, "_explanation": null, "sort": null, @@ -6206,7 +6194,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 116.41478, + "_score": 109.80813, "_version": null, "_explanation": null, "sort": null, @@ -6249,7 +6237,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 82.06964, + "_score": 76.61224, "_version": null, "_explanation": null, "sort": null, @@ -6298,7 +6286,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.643425, + "_score": 76.27921, "_version": null, "_explanation": null, "sort": null, @@ -6346,7 +6334,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.63191, + "_score": 76.2702, "_version": null, "_explanation": null, "sort": null, @@ -6388,7 +6376,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.62691, + "_score": 76.26629, "_version": null, "_explanation": null, "sort": null, @@ -6430,7 +6418,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.2279, + "_score": 177.22371, "_version": null, "_explanation": null, "sort": null, @@ -6471,7 +6459,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.2279, + "_score": 177.22371, "_version": null, "_explanation": null, "sort": null, @@ -6512,7 +6500,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.22784, + "_score": 177.22365, "_version": null, "_explanation": null, "sort": null, @@ -6553,7 +6541,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.22777, + "_score": 177.22359, "_version": null, "_explanation": null, "sort": null, @@ -6594,7 +6582,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.09012, + "_score": 177.0878, "_version": null, "_explanation": null, "sort": null, @@ -6638,7 +6626,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 128.11615, + "_score": 121.35664, "_version": null, "_explanation": null, "sort": null, @@ -6685,7 +6673,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 104.747665, + "_score": 98.93043, "_version": null, "_explanation": null, "sort": null, @@ -6720,7 +6708,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 98.664474, + "_score": 93.16171, "_version": null, "_explanation": null, "sort": null, @@ -6761,7 +6749,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 82.60044, + "_score": 82.6869, "_version": null, "_explanation": null, "sort": null, @@ -6804,7 +6792,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 82.06964, + "_score": 76.61319, "_version": null, "_explanation": null, "sort": null, @@ -6846,7 +6834,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.2279, + "_score": 177.22371, "_version": null, "_explanation": null, "sort": null, @@ -6887,7 +6875,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.2279, + "_score": 177.22371, "_version": null, "_explanation": null, "sort": null, @@ -6928,7 +6916,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.22784, + "_score": 177.22365, "_version": null, "_explanation": null, "sort": null, @@ -6969,7 +6957,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.22777, + "_score": 177.22359, "_version": null, "_explanation": null, "sort": null, @@ -7010,7 +6998,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.09012, + "_score": 177.0878, "_version": null, "_explanation": null, "sort": null, @@ -7053,7 +7041,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.61609, + "_score": 189.5944, "_version": null, "_explanation": null, "sort": null, @@ -7094,7 +7082,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.51884, + "_score": 189.49849, "_version": null, "_explanation": null, "sort": null, @@ -7135,7 +7123,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.51877, + "_score": 189.49841, "_version": null, "_explanation": null, "sort": null, @@ -7176,7 +7164,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.51848, + "_score": 189.49814, "_version": null, "_explanation": null, "sort": null, @@ -7216,7 +7204,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -7258,7 +7246,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -7298,7 +7286,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -7338,7 +7326,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -7378,7 +7366,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -7418,7 +7406,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -7461,7 +7449,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.61609, + "_score": 189.5944, "_version": null, "_explanation": null, "sort": null, @@ -7502,7 +7490,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.51884, + "_score": 189.49849, "_version": null, "_explanation": null, "sort": null, @@ -7543,7 +7531,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.51877, + "_score": 189.49841, "_version": null, "_explanation": null, "sort": null, @@ -7584,7 +7572,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.51848, + "_score": 189.49814, "_version": null, "_explanation": null, "sort": null, @@ -7624,7 +7612,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -7667,7 +7655,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.2279, + "_score": 177.22371, "_version": null, "_explanation": null, "sort": null, @@ -7708,7 +7696,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.2279, + "_score": 177.22371, "_version": null, "_explanation": null, "sort": null, @@ -7749,7 +7737,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.22784, + "_score": 177.22365, "_version": null, "_explanation": null, "sort": null, @@ -7790,7 +7778,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.22777, + "_score": 177.22359, "_version": null, "_explanation": null, "sort": null, @@ -7831,7 +7819,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.09012, + "_score": 177.0878, "_version": null, "_explanation": null, "sort": null, @@ -7873,7 +7861,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -7913,7 +7901,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -7953,7 +7941,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -7993,7 +7981,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -8033,7 +8021,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -8077,7 +8065,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 194.11386, + "_score": 184.13321, "_version": null, "_explanation": null, "sort": null, @@ -8115,7 +8103,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 191.00505, + "_score": 181.24066, "_version": null, "_explanation": null, "sort": null, @@ -8162,7 +8150,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 184.99162, + "_score": 175.80434, "_version": null, "_explanation": null, "sort": null, @@ -8213,7 +8201,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.3023, + "_score": 168.52115, "_version": null, "_explanation": null, "sort": null, @@ -8251,7 +8239,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 166.25168, + "_score": 154.38867, "_version": null, "_explanation": null, "sort": null, @@ -8292,7 +8280,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -8332,7 +8320,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -8372,7 +8360,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -8412,7 +8400,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -8452,7 +8440,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -8497,7 +8485,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 159.02142, + "_score": 147.45116, "_version": null, "_explanation": null, "sort": null, @@ -8540,7 +8528,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 154.99384, + "_score": 144.44461, "_version": null, "_explanation": null, "sort": null, @@ -8552,47 +8540,6 @@ }, "id": "617da50164441f41c4fbf2ea" }, - { - "sections": [ - "AWS Elemental MediaConvert monitoring integration", - "Important", - "Activate the integration", - "Configuration and polling", - "Find and use data", - "Metric data", - "Elemental MediaConvert Queue data", - "Elemental MediaConvert Operation data" - ], - "title": "AWS Elemental MediaConvert monitoring integration", - "type": "docs", - "tags": [ - "AWS integrations list", - "Amazon integrations", - "Integrations" - ], - "external_id": "4e3af1ef7b8a79842f689fde5561e79fa9acfbb0", - "image": "", - "url": "https://docs.newrelic.com/docs/infrastructure/amazon-integrations/aws-integrations-list/aws-elemental-mediaconvert-monitoring-integration/", - "published_at": "2022-02-14T11:37:19Z", - "updated_at": "2022-02-14T11:37:19Z", - "document_type": "page", - "popularity": 1, - "body": "Important Enable the AWS CloudWatch Metric Streams integration to monitor all CloudWatch metrics from your AWS services, including custom namespaces. Individual integrations are no longer our recommended option. New Relic includes an integration for reporting your AWS Elemental MediaConvert data to our platform. Here we explain how to activate the integration and what data it collects. Activate the integration To enable this integration follow standard procedures to Connect AWS services. Configuration and polling You can change the polling frequency and filter data using configuration options. Default polling information for the AWS Elemental MediaConvert integration: New Relic polling interval: 5 minutes Amazon CloudWatch data interval: 1 minute Find and use data To find your integration data in Infrastructure, go to one.newrelic.com > Infrastructure > AWS and select an integration. Data is attached to the following event type: Entity Event Type Provider Queue AwsMediaConvertQueueSample AwsMediaConvertQueue Operation AwsMediaConvertOperationSample AwsMediaConvertOperation For more on how to use your data, see Understand and use integration data. Metric data This integration collects AWS Elemental MediaConvert data for Queue and Operation. Elemental MediaConvert Queue data Metric Unit Description audioOutputDuration Seconds The number of seconds of audio-only output for a queue. sDOutputDuration Seconds The number of seconds of standard definition (SD) output for a queue. hDOutputDuration Seconds The number of seconds of high-definition (HD) output for a queue. uHDOutputDuration Seconds The number of seconds of ultra-high-definition (UHD) output for a queue. 8KOutputDuration Seconds The number of seconds of 8K output for a queue. jobsCompletedCount Count The number of jobs completed in this queue. jobsErroredCount Count The number of jobs that failed because of invalid inputs, such as a request to transcode a file that is not in the specified input bucket. standbyTime Seconds The number of seconds before AWS Elemental MediaConvert starts transcoding a job. transcodingTime Seconds The number of seconds for AWS Elemental MediaConvert to complete transcoding. Elemental MediaConvert Operation data Metric Unit Description errors Count errors", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 147.11035, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "title": "AWS Elemental MediaConvert monitoring integration", - "sections": "AWS Elemental MediaConvert monitoring integration", - "tags": "AWS integrations list", - "body": "Important Enable the AWS CloudWatch Metric Streams integration to monitor all CloudWatch metrics from your AWS services, including custom namespaces. Individual integrations are no longer our recommended option. New Relic includes an integration for reporting your AWS Elemental MediaConvert data" - }, - "id": "617d6cbe64441f6988fbd4e7" - }, { "sections": [ "AWS Kinesis Data Streams", @@ -8620,7 +8567,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20995, "_version": null, "_explanation": null, "sort": null, @@ -8660,7 +8607,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20995, "_version": null, "_explanation": null, "sort": null, @@ -8672,6 +8619,46 @@ "body": "What's included AWS Health installation docs Monitor AWS Health by connecting AWS to New Relic. Doc What is AWS Health? Oversight and alerts for how AWS outages and maintenance may affect your services. Get started! Start monitoring AWS Health by connecting Amazon Web Services (AWS) to New Relic" }, "id": "61566912196a679a39b70d4f" + }, + { + "sections": [ + "AWS EMR", + "What's included", + "AWS EMR installation docs", + "What is AWS EMR?", + "Get started!", + "More info" + ], + "title": "AWS EMR", + "type": "quickstarts", + "tags": [ + "aws", + "amazon web services" + ], + "quick_start_name": "AWS EMR", + "external_id": "239ca030e9a6e12b688167c0a2c68c35a9f081ca", + "image": "", + "url": "https://developer.newrelic.com/instant-observability/aws-emr/d84051ba-365f-4542-8db9-0829384ea55a/", + "published_at": "2022-02-07T01:45:29Z", + "updated_at": "2021-10-06T14:00:38Z", + "document_type": "page", + "popularity": 1, + "body": "What's included AWS EMR installation docs Monitor AWS EMR by connecting AWS to New Relic. Doc What is AWS EMR? Process and manage big data inputs from popular frameworks. Get started! Start monitoring AWS EMR by connecting Amazon Web Services (AWS) to New Relic! Check out our AWS EMR documentation to instrument your cloud service and manage the stability, scalability, and reliability of your systems with New Relic's infrastructure monitoring capabilities. More info Check out the documentation to learn more about New Relic monitoring for AWS EMR. This quickstart doesn't include any dashboards . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo This quickstart doesn't include any alerts . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo AWS EMR observability quickstart contains 1 data source . This is how you'll get your data into New Relic. AWS EMR installation docs Monitor AWS EMR by connecting AWS to New Relic. Docs", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 141.20995, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "title": "AWS EMR", + "sections": "AWS EMR", + "tags": "amazon web services", + "quick_start_name": "AWS EMR", + "body": "What's included AWS EMR installation docs Monitor AWS EMR by connecting AWS to New Relic. Doc What is AWS EMR? Process and manage big data inputs from popular frameworks. Get started! Start monitoring AWS EMR by connecting Amazon Web Services (AWS) to New Relic! Check out our AWS EMR documentation" + }, + "id": "61566b7b64441f603909962d" } ], "/ansible/c377d781-da77-4dd6-b5a8-6a25a39e2582": [ @@ -8682,31 +8669,31 @@ "Automate workflows", "Guides to automate workflows", "Quickly tag resources", - "Set up New Relic using the Kubernetes operator", "Automate common tasks", + "Set up New Relic using the Kubernetes operator", "Automatically tag a simple \"Hello World\" Demo across the entire stack", "Getting started with New Relic and Terraform", "Set up New Relic using Helm charts" ], - "published_at": "2022-02-15T01:37:23Z", + "published_at": "2022-02-16T01:38:14Z", "title": "Automate workflows", - "updated_at": "2022-02-15T01:37:23Z", + "updated_at": "2022-02-16T01:38:14Z", "type": "developer", "external_id": "d4f408f077ed950dc359ad44829e9cfbd2ca4871", "document_type": "page", "popularity": 1, - "body": "When building today's complex systems, you want an easy, predictable way to verify that your configuration is defined as expected. This concept, Observability as Code, is brought to life through a collection of New Relic-supported orchestration tools, including Terraform, AWS CloudFormation, and a command-line interface. These tools enable you to integrate New Relic into your existing workflows, easing adoption, accelerating deployment, and returning focus to your main job — getting stuff done. In addition to our Terraform and CLI guides below, find more automation solutions in our Developer Toolkit. Guides to automate workflows Quickly tag resources Add tags to apps for easy filtering 5 min Set up New Relic using the Kubernetes operator Learn how to provision New Relic resources using the Kubernetes operator 20 min Automate common tasks Use the New Relic CLI to tag apps and create deployment markers 20 min Automatically tag a simple \"Hello World\" Demo across the entire stack See how easy it is to leverage automation in your DevOps environment! 30 min Getting started with New Relic and Terraform Learn how to provision New Relic resources using Terraform 30 min Set up New Relic using Helm charts Learn how to set up New Relic using Helm charts 20 min", + "body": "When building today's complex systems, you want an easy, predictable way to verify that your configuration is defined as expected. This concept, Observability as Code, is brought to life through a collection of New Relic-supported orchestration tools, including Terraform, AWS CloudFormation, and a command-line interface. These tools enable you to integrate New Relic into your existing workflows, easing adoption, accelerating deployment, and returning focus to your main job — getting stuff done. In addition to our Terraform and CLI guides below, find more automation solutions in our Developer Toolkit. Guides to automate workflows Quickly tag resources Add tags to apps for easy filtering 5 min Automate common tasks Use the New Relic CLI to tag apps and create deployment markers 20 min Set up New Relic using the Kubernetes operator Learn how to provision New Relic resources using the Kubernetes operator 20 min Automatically tag a simple \"Hello World\" Demo across the entire stack See how easy it is to leverage automation in your DevOps environment! 30 min Getting started with New Relic and Terraform Learn how to provision New Relic resources using Terraform 30 min Set up New Relic using Helm charts Learn how to set up New Relic using Helm charts 20 min", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 95.14711, + "_score": 94.86789, "_version": null, "_explanation": null, "sort": null, "highlight": { "title": "Automate workflows", "sections": "Automate workflows", - "body": " deployment markers 20 min Automatically tag a simple "Hello World" Demo across the entire stack See how easy it is to leverage automation in your DevOps environment! 30 min Getting started with New Relic and Terraform Learn how to provision New Relic resources using Terraform 30 min Set up New Relic using Helm charts Learn how to set up New Relic using Helm charts 20 min" + "body": " the Kubernetes operator 20 min Automatically tag a simple "Hello World" Demo across the entire stack See how easy it is to leverage automation in your DevOps environment! 30 min Getting started with New Relic and Terraform Learn how to provision New Relic resources using Terraform 30 min Set up New Relic using Helm charts Learn how to set up New Relic using Helm charts 20 min" }, "id": "6091f7c8e7b9d2f6715068f1" }, @@ -8731,7 +8718,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.15608, + "_score": 94.278885, "_version": null, "_explanation": null, "sort": null, @@ -8777,7 +8764,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 86.503174, + "_score": 81.99892, "_version": null, "_explanation": null, "sort": null, @@ -8811,7 +8798,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 86.496025, + "_score": 81.99311, "_version": null, "_explanation": null, "sort": null, @@ -8856,7 +8843,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 86.49205, + "_score": 81.98988, "_version": null, "_explanation": null, "sort": null, @@ -8895,7 +8882,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.2279, + "_score": 177.22371, "_version": null, "_explanation": null, "sort": null, @@ -8936,7 +8923,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.2279, + "_score": 177.22371, "_version": null, "_explanation": null, "sort": null, @@ -8977,7 +8964,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.22784, + "_score": 177.22365, "_version": null, "_explanation": null, "sort": null, @@ -9018,7 +9005,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.22777, + "_score": 177.22359, "_version": null, "_explanation": null, "sort": null, @@ -9059,7 +9046,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.09012, + "_score": 177.0878, "_version": null, "_explanation": null, "sort": null, @@ -9101,7 +9088,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -9141,7 +9128,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -9181,7 +9168,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -9221,7 +9208,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -9261,7 +9248,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -9303,7 +9290,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -9343,7 +9330,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -9383,7 +9370,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -9398,83 +9385,83 @@ }, { "sections": [ - "AWS Billing", + "AWS Auto Scaling", "What's included", - "AWS Billing installation docs", - "What is AWS Billing?", + "AWS Auto Scaling installation docs", + "What is AWS Auto Scaling?", "Get started!", "More info" ], - "title": "AWS Billing", + "title": "AWS Auto Scaling", "type": "quickstarts", "tags": [ "aws", "amazon web services" ], - "quick_start_name": "AWS Billing", - "external_id": "748c52cca409971c66b5151e152f477fd124606a", + "quick_start_name": "AWS Auto Scaling", + "external_id": "80047bd0bf951243f585ba8b0982f0aa92c39810", "image": "", - "url": "https://developer.newrelic.com/instant-observability/aws-billing/c7f15a54-e243-4e23-95dd-ff08937041ed/", - "published_at": "2022-02-04T02:03:04Z", - "updated_at": "2021-10-06T13:59:37Z", + "url": "https://developer.newrelic.com/instant-observability/aws-auto-scaling/e6a1654d-2e00-4956-a2da-39f1f5f5f5e9/", + "published_at": "2022-02-10T01:40:38Z", + "updated_at": "2021-10-06T13:59:42Z", "document_type": "page", "popularity": 1, - "body": "What's included AWS Billing installation docs Monitor AWS Billing by connecting AWS to New Relic. Doc What is AWS Billing? Service that can be used to pay your AWS bill, monitor your usage, and analyze and control your costs. Get started! Start monitoring AWS Billing by connecting Amazon Web Services (AWS) to New Relic! Check out our AWS Billing documentation to instrument your cloud service and manage the stability, scalability, and reliability of your systems with New Relic's infrastructure monitoring capabilities. More info Check out the documentation to learn more about New Relic monitoring for AWS Billing. This quickstart doesn't include any dashboards . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo This quickstart doesn't include any alerts . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo AWS Billing observability quickstart contains 1 data source . This is how you'll get your data into New Relic. AWS Billing installation docs Monitor AWS Billing by connecting AWS to New Relic. Docs", + "body": "What's included AWS Auto Scaling installation docs Monitor AWS Auto Scaling by connecting AWS to New Relic. Doc What is AWS Auto Scaling? Launch or terminate EC2 instances automatically, adapting capacity based on user-defined policies, schedules, and health checks. Get started! Start monitoring AWS Auto Scaling by connecting Amazon Web Services (AWS) to New Relic! Check out our AWS Auto Scaling documentation to instrument your cloud service and manage the stability, scalability, and reliability of your systems with New Relic's infrastructure monitoring capabilities. More info Check out the documentation to learn more about New Relic monitoring for AWS Auto Scaling. This quickstart doesn't include any dashboards . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo This quickstart doesn't include any alerts . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo AWS Auto Scaling observability quickstart contains 1 data source . This is how you'll get your data into New Relic. AWS Auto Scaling installation docs Monitor AWS Auto Scaling by connecting AWS to New Relic. Docs", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, "highlight": { - "title": "AWS Billing", - "sections": "AWS Billing", + "title": "AWS Auto Scaling", + "sections": "AWS Auto Scaling", "tags": "amazon web services", - "quick_start_name": "AWS Billing", - "body": "What's included AWS Billing installation docs Monitor AWS Billing by connecting AWS to New Relic. Doc What is AWS Billing? Service that can be used to pay your AWS bill, monitor your usage, and analyze and control your costs. Get started! Start monitoring AWS Billing by connecting Amazon Web" + "quick_start_name": "AWS Auto Scaling", + "body": " AWS Auto Scaling by connecting Amazon Web Services (AWS) to New Relic! Check out our AWS Auto Scaling documentation to instrument your cloud service and manage the stability, scalability, and reliability of your systems with New Relic's infrastructure monitoring capabilities. More info Check out" }, - "id": "61566b7be7b9d2c9178de374" + "id": "61566b7b28ccbcfa59f2145d" }, { "sections": [ - "AWS Elastic Beanstalk", + "AWS Billing", "What's included", - "AWS Elastic Beanstalk installation docs", - "What is AWS Elastic Beanstalk?", + "AWS Billing installation docs", + "What is AWS Billing?", "Get started!", "More info" ], - "title": "AWS Elastic Beanstalk", + "title": "AWS Billing", "type": "quickstarts", "tags": [ "aws", "amazon web services" ], - "quick_start_name": "AWS Elastic Beanstalk", - "external_id": "5f1d325b5bea77e6f550976224e344225a073f7b", + "quick_start_name": "AWS Billing", + "external_id": "748c52cca409971c66b5151e152f477fd124606a", "image": "", - "url": "https://developer.newrelic.com/instant-observability/aws-elastic-beanstalk/b232f332-e27a-4d34-a966-e7f678d35145/", - "published_at": "2022-02-07T01:45:28Z", + "url": "https://developer.newrelic.com/instant-observability/aws-billing/c7f15a54-e243-4e23-95dd-ff08937041ed/", + "published_at": "2022-02-04T02:03:04Z", "updated_at": "2021-10-06T13:59:37Z", "document_type": "page", "popularity": 1, - "body": "What's included AWS Elastic Beanstalk installation docs Monitor AWS Elastic Beanstalk by connecting AWS to New Relic. Doc What is AWS Elastic Beanstalk? Dynamic service that allows easy deployment and scalability for your applications on AWS. Get started! Start monitoring AWS Elastic Beanstalk by connecting Amazon Web Services (AWS) to New Relic! Check out our AWS Elastic Beanstalk documentation to instrument your cloud service and manage the stability, scalability, and reliability of your systems with New Relic's infrastructure monitoring capabilities. More info Check out the documentation to learn more about New Relic monitoring for AWS Elastic Beanstalk. This quickstart doesn't include any dashboards . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo This quickstart doesn't include any alerts . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo AWS Elastic Beanstalk observability quickstart contains 1 data source . This is how you'll get your data into New Relic. AWS Elastic Beanstalk installation docs Monitor AWS Elastic Beanstalk by connecting AWS to New Relic. Docs", + "body": "What's included AWS Billing installation docs Monitor AWS Billing by connecting AWS to New Relic. Doc What is AWS Billing? Service that can be used to pay your AWS bill, monitor your usage, and analyze and control your costs. Get started! Start monitoring AWS Billing by connecting Amazon Web Services (AWS) to New Relic! Check out our AWS Billing documentation to instrument your cloud service and manage the stability, scalability, and reliability of your systems with New Relic's infrastructure monitoring capabilities. More info Check out the documentation to learn more about New Relic monitoring for AWS Billing. This quickstart doesn't include any dashboards . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo This quickstart doesn't include any alerts . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo AWS Billing observability quickstart contains 1 data source . This is how you'll get your data into New Relic. AWS Billing installation docs Monitor AWS Billing by connecting AWS to New Relic. Docs", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20692, "_version": null, "_explanation": null, "sort": null, "highlight": { - "title": "AWS Elastic Beanstalk", - "sections": "AWS Elastic Beanstalk", + "title": "AWS Billing", + "sections": "AWS Billing", "tags": "amazon web services", - "quick_start_name": "AWS Elastic Beanstalk", - "body": " by connecting Amazon Web Services (AWS) to New Relic! Check out our AWS Elastic Beanstalk documentation to instrument your cloud service and manage the stability, scalability, and reliability of your systems with New Relic's infrastructure monitoring capabilities. More info Check out the documentation" + "quick_start_name": "AWS Billing", + "body": "What's included AWS Billing installation docs Monitor AWS Billing by connecting AWS to New Relic. Doc What is AWS Billing? Service that can be used to pay your AWS bill, monitor your usage, and analyze and control your costs. Get started! Start monitoring AWS Billing by connecting Amazon Web" }, - "id": "61566912196a678b22b70db3" + "id": "61566b7be7b9d2c9178de374" } ], "/aws-iot/1ad84671-9c86-464e-aca0-87110057541c": [ @@ -9505,7 +9492,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -9545,7 +9532,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -9585,7 +9572,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -9625,7 +9612,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -9665,7 +9652,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -9708,7 +9695,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.2279, + "_score": 177.22371, "_version": null, "_explanation": null, "sort": null, @@ -9749,7 +9736,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.2279, + "_score": 177.22371, "_version": null, "_explanation": null, "sort": null, @@ -9790,7 +9777,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.22784, + "_score": 177.22365, "_version": null, "_explanation": null, "sort": null, @@ -9831,7 +9818,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.22777, + "_score": 177.22359, "_version": null, "_explanation": null, "sort": null, @@ -9872,7 +9859,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.09012, + "_score": 177.0878, "_version": null, "_explanation": null, "sort": null, @@ -9918,7 +9905,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 82.06964, + "_score": 76.61313, "_version": null, "_explanation": null, "sort": null, @@ -9967,7 +9954,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.643425, + "_score": 76.2801, "_version": null, "_explanation": null, "sort": null, @@ -10015,7 +10002,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.63191, + "_score": 76.27109, "_version": null, "_explanation": null, "sort": null, @@ -10057,7 +10044,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.62691, + "_score": 76.26717, "_version": null, "_explanation": null, "sort": null, @@ -10100,7 +10087,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.5959, + "_score": 76.24292, "_version": null, "_explanation": null, "sort": null, @@ -10114,54 +10101,6 @@ } ], "/aws-ecs-ecr/7360ba74-1822-474e-be8e-4636dd22e7ca": [ - { - "sections": [ - "Amazon ECS/ECR monitoring integration", - "Important", - "Features", - "Activate integration", - "Configuration and polling", - "Find and use data", - "Metric data", - "Tip", - "Cluster and service metrics", - "Cluster metrics", - "Service metrics", - "Inventory data", - "EOL NOTICE", - "aws/ecs/cluster", - "aws/ecs/service" - ], - "title": "Amazon ECS/ECR monitoring integration", - "type": "docs", - "tags": [ - "AWS integrations list", - "Amazon integrations", - "Integrations" - ], - "external_id": "58ee15de138543031f8b39f407369a50a15758b8", - "image": "", - "url": "https://docs.newrelic.com/docs/infrastructure/amazon-integrations/aws-integrations-list/aws-ecsecr-monitoring-integration/", - "published_at": "2022-02-15T16:47:33Z", - "updated_at": "2022-02-15T16:47:32Z", - "document_type": "page", - "popularity": 1, - "body": "Important Enable the AWS CloudWatch Metric Streams integration to monitor all CloudWatch metrics from your AWS services, including custom namespaces. Individual integrations are no longer our recommended option. New Relic infrastructure integrations include an integration for reporting your Amazon EC2 Container Service (ECS) data and your Amazon EC2 Container Registry (ECR) data to New Relic products. This document explains the integration's features, how to activate it, and what data can be reported. Important New Relic also offers an ECS on-host integration, which reports a different data set than this cloud integration. For complete ECS monitoring, we recommend enabling both integrations. Features With New Relic's ECS/ECR monitoring integration, you can monitor reserved vs. utilized capacity, task execution, and registry of containers. AWS integration data is also available for analysis and chart creation in New Relic One. Activate integration To enable this integration, follow standard procedures to Connect AWS services to New Relic. If you have services running on ECS, you can also enable monitoring of those services. Important In AWS, you have to opt-in for the new ARN format (announcement) to differentiate services with the same name in different clusters. If not, you could have data collision Configuration and polling You can change the polling frequency and filter data using configuration options. Default polling information for the Amazon ECS/ECR integration: New Relic polling interval: 5 minutes Amazon CloudWatch data interval: 1 minute or 5 minutes Find and use data To find this integration's data, go to one.newrelic.com > Infrastructure > AWS and select one of the Amazon ECS/ECR integration links. You can query and explore your data using the ComputeSample event type. Use a provider value of EcsCluster for cluster data, or a provider value of EcsService for cluster-service data. The integration collects these ECR/ECS definitions: Name Description ClusterName This dimension filters the data you request for all resources in a specified cluster. All Amazon ECS metrics are filtered by ClusterName. ServiceName This dimension filters the data you request for all resources in a specified service within a specified cluster. For more on how to find and use integration data, see Understand integration data. Metric data The ECS/ECR integration collects the following data: Tip For full descriptions of these metrics, see Amazon's documentation on ECS and ECR. Cluster and service metrics Name Data type CPUUtilization percent MemoryUtilization percent Cluster metrics Name Data type CPUReservation percent MemoryReservation percent Service metrics Name Description Active Service The number of services that are running on the cluster in an ACTIVE state Pending Tasks Number of tasks in the cluster that are in PENDING state Running Tasks Number of tasks in the cluster that are in RUNNING state Registered Instances Number of container instances registered into the cluster Inventory data EOL NOTICE After March 2022, we're discontinuing support for several capabilities, including inventory data for cloud integrations. For more details, including how you can easily prepare for this transition, see our Explorers Hub post. Inventory data provides information about the service's state and configuration. For more about inventory data, see Understand and use data. aws/ecs/cluster Name Description status The status of the cluster. The valid values are ACTIVE or INACTIVE. ACTIVE indicates that you can register container instances with the cluster and the associated instances can accept tasks. name User-generated string to identify the cluster. awsRegion AWS region where the cluster is running. aws/ecs/service Name Description status The status of the service. The valid values are ACTIVE, DRAINING or INACTIVE. ACTIVE means the instance accepts new tasks, DRAINING means the instance prevents new tasks from being started and notifies the service scheduler to move tasks to other instances in the cluster, generally used with the purpose of maintaining the instance or scale it down and INACTIVE means the instance is not active. clusterName User-generated string to identify the cluster. serviceName User-generated string to identify the service. launchType Type of infrastructure on which tasks and services are hosted. The valid values are EC2 and FARGATE. awsRegion AWS region where the service is running. deploymentMaximumPercent Upper limit on the number of service's tasks that are allowed in the RUNNING or PENDING state during a deployment, as a percentage of the desiredCount. deploymentMinimumPercent Lower limit on the number of service's tasks that must remain in the RUNNING state during a deployment, as a percentage of the desiredCount. desiredCount The number of instantiations of the specified task definition to place and keep running on the cluster. taskDefinition ARN of the task definition file that describes the containers that form the application.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 163.44888, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "title": "Amazon ECS/ECR monitoring integration", - "sections": "aws/ecs/service", - "tags": "AWS integrations list", - "body": "Important Enable the AWS CloudWatch Metric Streams integration to monitor all CloudWatch metrics from your AWS services, including custom namespaces. Individual integrations are no longer our recommended option. New Relic infrastructure integrations include an integration for reporting your Amazon" - }, - "id": "617d6c3064441facbdfbcea9" - }, { "sections": [ "AWS Health", @@ -10189,7 +10128,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 155.38576, + "_score": 155.13478, "_version": null, "_explanation": null, "sort": null, @@ -10229,7 +10168,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 155.38576, + "_score": 155.13478, "_version": null, "_explanation": null, "sort": null, @@ -10269,7 +10208,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 155.3857, + "_score": 155.13472, "_version": null, "_explanation": null, "sort": null, @@ -10309,7 +10248,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 155.3857, + "_score": 155.13472, "_version": null, "_explanation": null, "sort": null, @@ -10321,6 +10260,46 @@ "body": " by connecting Amazon Web Services (AWS) to New Relic! Check out our AWS Elastic Beanstalk documentation to instrument your cloud service and manage the stability, scalability, and reliability of your systems with New Relic's infrastructure monitoring capabilities. More info Check out the documentation" }, "id": "61566912196a678b22b70db3" + }, + { + "sections": [ + "AWS IAM", + "What's included", + "AWS IAM installation docs", + "What is AWS IAM?", + "Get started!", + "More info" + ], + "title": "AWS IAM", + "type": "quickstarts", + "tags": [ + "aws", + "amazon web services" + ], + "quick_start_name": "AWS IAM", + "external_id": "f9537b3fcd1f265288d5332112ff04d395d134fe", + "image": "", + "url": "https://developer.newrelic.com/instant-observability/aws-iam/f9e383f7-5851-4296-9cdd-1ae94f21abd3/", + "published_at": "2022-02-10T01:38:49Z", + "updated_at": "2021-10-06T01:50:38Z", + "document_type": "page", + "popularity": 1, + "body": "What's included AWS IAM installation docs Monitor AWS IAM by connecting AWS to New Relic. Doc What is AWS IAM? Securely control access to AWS services and resources, create and manage users, groups, and permissions. Get started! Start monitoring AWS IAM by connecting Amazon Web Services (AWS) to New Relic! Check out our AWS IAM documentation to instrument your cloud service and manage the stability, scalability, and reliability of your systems with New Relic's infrastructure monitoring capabilities. More info Check out the documentation to learn more about New Relic monitoring for AWS IAM. This quickstart doesn't include any dashboards . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo This quickstart doesn't include any alerts . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo AWS IAM observability quickstart contains 1 data source . This is how you'll get your data into New Relic. AWS IAM installation docs Monitor AWS IAM by connecting AWS to New Relic. Docs", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 155.09445, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "title": "AWS IAM", + "sections": "AWS IAM", + "tags": "amazon web services", + "quick_start_name": "AWS IAM", + "body": "What's included AWS IAM installation docs Monitor AWS IAM by connecting AWS to New Relic. Doc What is AWS IAM? Securely control access to AWS services and resources, create and manage users, groups, and permissions. Get started! Start monitoring AWS IAM by connecting Amazon Web Services (AWS" + }, + "id": "61566912e7b9d259f48de37d" } ], "/azure-logic-apps/f0ba712c-8ffb-4d0b-a806-8cf3c444fb13": [ @@ -10355,7 +10334,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 82.06964, + "_score": 76.61313, "_version": null, "_explanation": null, "sort": null, @@ -10404,7 +10383,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.643425, + "_score": 76.2801, "_version": null, "_explanation": null, "sort": null, @@ -10452,7 +10431,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.63191, + "_score": 76.27109, "_version": null, "_explanation": null, "sort": null, @@ -10494,7 +10473,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.62691, + "_score": 76.26717, "_version": null, "_explanation": null, "sort": null, @@ -10537,7 +10516,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.5959, + "_score": 76.24292, "_version": null, "_explanation": null, "sort": null, @@ -10578,7 +10557,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -10618,7 +10597,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -10658,7 +10637,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -10698,7 +10677,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -10738,7 +10717,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -10781,7 +10760,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.61609, + "_score": 189.5944, "_version": null, "_explanation": null, "sort": null, @@ -10822,7 +10801,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.51884, + "_score": 189.49849, "_version": null, "_explanation": null, "sort": null, @@ -10863,7 +10842,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.51877, + "_score": 189.49841, "_version": null, "_explanation": null, "sort": null, @@ -10904,7 +10883,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.51848, + "_score": 189.49814, "_version": null, "_explanation": null, "sort": null, @@ -10944,7 +10923,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -10992,7 +10971,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 163.32745, + "_score": 152.22571, "_version": null, "_explanation": null, "sort": null, @@ -11035,7 +11014,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 159.40988, + "_score": 148.58951, "_version": null, "_explanation": null, "sort": null, @@ -11078,7 +11057,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 158.56952, + "_score": 147.81004, "_version": null, "_explanation": null, "sort": null, @@ -11121,7 +11100,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 158.55879, + "_score": 147.80167, "_version": null, "_explanation": null, "sort": null, @@ -11170,7 +11149,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 152.63333, + "_score": 143.86478, "_version": null, "_explanation": null, "sort": null, @@ -11211,7 +11190,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -11251,7 +11230,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -11291,7 +11270,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -11331,7 +11310,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -11371,7 +11350,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -11419,7 +11398,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 163.3273, + "_score": 152.22571, "_version": null, "_explanation": null, "sort": null, @@ -11462,7 +11441,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 159.40974, + "_score": 148.58951, "_version": null, "_explanation": null, "sort": null, @@ -11505,7 +11484,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 158.56938, + "_score": 147.81004, "_version": null, "_explanation": null, "sort": null, @@ -11548,7 +11527,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 158.55865, + "_score": 147.80167, "_version": null, "_explanation": null, "sort": null, @@ -11597,7 +11576,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 152.63321, + "_score": 143.86478, "_version": null, "_explanation": null, "sort": null, @@ -11638,7 +11617,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20995, "_version": null, "_explanation": null, "sort": null, @@ -11678,7 +11657,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20995, "_version": null, "_explanation": null, "sort": null, @@ -11718,7 +11697,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20995, "_version": null, "_explanation": null, "sort": null, @@ -11758,7 +11737,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.2099, "_version": null, "_explanation": null, "sort": null, @@ -11798,7 +11777,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.2099, "_version": null, "_explanation": null, "sort": null, @@ -11841,7 +11820,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.61609, + "_score": 189.5944, "_version": null, "_explanation": null, "sort": null, @@ -11882,7 +11861,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.51884, + "_score": 189.49849, "_version": null, "_explanation": null, "sort": null, @@ -11923,7 +11902,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.51877, + "_score": 189.49841, "_version": null, "_explanation": null, "sort": null, @@ -11964,7 +11943,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.51848, + "_score": 189.49814, "_version": null, "_explanation": null, "sort": null, @@ -12004,7 +11983,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -12046,7 +12025,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -12086,7 +12065,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -12126,7 +12105,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -12141,83 +12120,83 @@ }, { "sections": [ - "AWS Billing", + "AWS Auto Scaling", "What's included", - "AWS Billing installation docs", - "What is AWS Billing?", + "AWS Auto Scaling installation docs", + "What is AWS Auto Scaling?", "Get started!", "More info" ], - "title": "AWS Billing", + "title": "AWS Auto Scaling", "type": "quickstarts", "tags": [ "aws", "amazon web services" ], - "quick_start_name": "AWS Billing", - "external_id": "748c52cca409971c66b5151e152f477fd124606a", + "quick_start_name": "AWS Auto Scaling", + "external_id": "80047bd0bf951243f585ba8b0982f0aa92c39810", "image": "", - "url": "https://developer.newrelic.com/instant-observability/aws-billing/c7f15a54-e243-4e23-95dd-ff08937041ed/", - "published_at": "2022-02-04T02:03:04Z", - "updated_at": "2021-10-06T13:59:37Z", + "url": "https://developer.newrelic.com/instant-observability/aws-auto-scaling/e6a1654d-2e00-4956-a2da-39f1f5f5f5e9/", + "published_at": "2022-02-10T01:40:38Z", + "updated_at": "2021-10-06T13:59:42Z", "document_type": "page", "popularity": 1, - "body": "What's included AWS Billing installation docs Monitor AWS Billing by connecting AWS to New Relic. Doc What is AWS Billing? Service that can be used to pay your AWS bill, monitor your usage, and analyze and control your costs. Get started! Start monitoring AWS Billing by connecting Amazon Web Services (AWS) to New Relic! Check out our AWS Billing documentation to instrument your cloud service and manage the stability, scalability, and reliability of your systems with New Relic's infrastructure monitoring capabilities. More info Check out the documentation to learn more about New Relic monitoring for AWS Billing. This quickstart doesn't include any dashboards . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo This quickstart doesn't include any alerts . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo AWS Billing observability quickstart contains 1 data source . This is how you'll get your data into New Relic. AWS Billing installation docs Monitor AWS Billing by connecting AWS to New Relic. Docs", + "body": "What's included AWS Auto Scaling installation docs Monitor AWS Auto Scaling by connecting AWS to New Relic. Doc What is AWS Auto Scaling? Launch or terminate EC2 instances automatically, adapting capacity based on user-defined policies, schedules, and health checks. Get started! Start monitoring AWS Auto Scaling by connecting Amazon Web Services (AWS) to New Relic! Check out our AWS Auto Scaling documentation to instrument your cloud service and manage the stability, scalability, and reliability of your systems with New Relic's infrastructure monitoring capabilities. More info Check out the documentation to learn more about New Relic monitoring for AWS Auto Scaling. This quickstart doesn't include any dashboards . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo This quickstart doesn't include any alerts . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo AWS Auto Scaling observability quickstart contains 1 data source . This is how you'll get your data into New Relic. AWS Auto Scaling installation docs Monitor AWS Auto Scaling by connecting AWS to New Relic. Docs", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, "highlight": { - "title": "AWS Billing", - "sections": "AWS Billing", + "title": "AWS Auto Scaling", + "sections": "AWS Auto Scaling", "tags": "amazon web services", - "quick_start_name": "AWS Billing", - "body": "What's included AWS Billing installation docs Monitor AWS Billing by connecting AWS to New Relic. Doc What is AWS Billing? Service that can be used to pay your AWS bill, monitor your usage, and analyze and control your costs. Get started! Start monitoring AWS Billing by connecting Amazon Web" + "quick_start_name": "AWS Auto Scaling", + "body": " AWS Auto Scaling by connecting Amazon Web Services (AWS) to New Relic! Check out our AWS Auto Scaling documentation to instrument your cloud service and manage the stability, scalability, and reliability of your systems with New Relic's infrastructure monitoring capabilities. More info Check out" }, - "id": "61566b7be7b9d2c9178de374" + "id": "61566b7b28ccbcfa59f2145d" }, { "sections": [ - "AWS Elastic Beanstalk", + "AWS Billing", "What's included", - "AWS Elastic Beanstalk installation docs", - "What is AWS Elastic Beanstalk?", + "AWS Billing installation docs", + "What is AWS Billing?", "Get started!", "More info" ], - "title": "AWS Elastic Beanstalk", + "title": "AWS Billing", "type": "quickstarts", "tags": [ "aws", "amazon web services" ], - "quick_start_name": "AWS Elastic Beanstalk", - "external_id": "5f1d325b5bea77e6f550976224e344225a073f7b", + "quick_start_name": "AWS Billing", + "external_id": "748c52cca409971c66b5151e152f477fd124606a", "image": "", - "url": "https://developer.newrelic.com/instant-observability/aws-elastic-beanstalk/b232f332-e27a-4d34-a966-e7f678d35145/", - "published_at": "2022-02-07T01:45:28Z", + "url": "https://developer.newrelic.com/instant-observability/aws-billing/c7f15a54-e243-4e23-95dd-ff08937041ed/", + "published_at": "2022-02-04T02:03:04Z", "updated_at": "2021-10-06T13:59:37Z", "document_type": "page", "popularity": 1, - "body": "What's included AWS Elastic Beanstalk installation docs Monitor AWS Elastic Beanstalk by connecting AWS to New Relic. Doc What is AWS Elastic Beanstalk? Dynamic service that allows easy deployment and scalability for your applications on AWS. Get started! Start monitoring AWS Elastic Beanstalk by connecting Amazon Web Services (AWS) to New Relic! Check out our AWS Elastic Beanstalk documentation to instrument your cloud service and manage the stability, scalability, and reliability of your systems with New Relic's infrastructure monitoring capabilities. More info Check out the documentation to learn more about New Relic monitoring for AWS Elastic Beanstalk. This quickstart doesn't include any dashboards . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo This quickstart doesn't include any alerts . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo AWS Elastic Beanstalk observability quickstart contains 1 data source . This is how you'll get your data into New Relic. AWS Elastic Beanstalk installation docs Monitor AWS Elastic Beanstalk by connecting AWS to New Relic. Docs", + "body": "What's included AWS Billing installation docs Monitor AWS Billing by connecting AWS to New Relic. Doc What is AWS Billing? Service that can be used to pay your AWS bill, monitor your usage, and analyze and control your costs. Get started! Start monitoring AWS Billing by connecting Amazon Web Services (AWS) to New Relic! Check out our AWS Billing documentation to instrument your cloud service and manage the stability, scalability, and reliability of your systems with New Relic's infrastructure monitoring capabilities. More info Check out the documentation to learn more about New Relic monitoring for AWS Billing. This quickstart doesn't include any dashboards . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo This quickstart doesn't include any alerts . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo AWS Billing observability quickstart contains 1 data source . This is how you'll get your data into New Relic. AWS Billing installation docs Monitor AWS Billing by connecting AWS to New Relic. Docs", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20692, "_version": null, "_explanation": null, "sort": null, "highlight": { - "title": "AWS Elastic Beanstalk", - "sections": "AWS Elastic Beanstalk", + "title": "AWS Billing", + "sections": "AWS Billing", "tags": "amazon web services", - "quick_start_name": "AWS Elastic Beanstalk", - "body": " by connecting Amazon Web Services (AWS) to New Relic! Check out our AWS Elastic Beanstalk documentation to instrument your cloud service and manage the stability, scalability, and reliability of your systems with New Relic's infrastructure monitoring capabilities. More info Check out the documentation" + "quick_start_name": "AWS Billing", + "body": "What's included AWS Billing installation docs Monitor AWS Billing by connecting AWS to New Relic. Doc What is AWS Billing? Service that can be used to pay your AWS bill, monitor your usage, and analyze and control your costs. Get started! Start monitoring AWS Billing by connecting Amazon Web" }, - "id": "61566912196a678b22b70db3" + "id": "61566b7be7b9d2c9178de374" } ], "/azure-postgresql/570c0703-52dc-4481-8b56-e6c0bcacb47d": [ @@ -12254,7 +12233,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 163.3273, + "_score": 152.22581, "_version": null, "_explanation": null, "sort": null, @@ -12297,7 +12276,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 159.40974, + "_score": 148.58963, "_version": null, "_explanation": null, "sort": null, @@ -12340,7 +12319,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 158.56938, + "_score": 147.81015, "_version": null, "_explanation": null, "sort": null, @@ -12383,7 +12362,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 158.55865, + "_score": 147.80179, "_version": null, "_explanation": null, "sort": null, @@ -12432,7 +12411,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 152.63321, + "_score": 143.86487, "_version": null, "_explanation": null, "sort": null, @@ -12477,7 +12456,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 82.06964, + "_score": 76.61313, "_version": null, "_explanation": null, "sort": null, @@ -12526,7 +12505,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.643425, + "_score": 76.2801, "_version": null, "_explanation": null, "sort": null, @@ -12574,7 +12553,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.63191, + "_score": 76.27109, "_version": null, "_explanation": null, "sort": null, @@ -12616,7 +12595,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.62691, + "_score": 76.26717, "_version": null, "_explanation": null, "sort": null, @@ -12659,7 +12638,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.5959, + "_score": 76.24292, "_version": null, "_explanation": null, "sort": null, @@ -12702,7 +12681,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 194.114, + "_score": 184.13333, "_version": null, "_explanation": null, "sort": null, @@ -12740,7 +12719,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 191.00519, + "_score": 181.24077, "_version": null, "_explanation": null, "sort": null, @@ -12787,7 +12766,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 184.99176, + "_score": 175.80444, "_version": null, "_explanation": null, "sort": null, @@ -12838,7 +12817,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.30243, + "_score": 168.52126, "_version": null, "_explanation": null, "sort": null, @@ -12876,7 +12855,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 166.25183, + "_score": 154.3888, "_version": null, "_explanation": null, "sort": null, @@ -12916,7 +12895,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 155.14012, + "_score": 144.5479, "_version": null, "_explanation": null, "sort": null, @@ -12955,7 +12934,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20995, "_version": null, "_explanation": null, "sort": null, @@ -12995,7 +12974,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20995, "_version": null, "_explanation": null, "sort": null, @@ -13035,7 +13014,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20995, "_version": null, "_explanation": null, "sort": null, @@ -13075,7 +13054,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.2099, "_version": null, "_explanation": null, "sort": null, @@ -13119,7 +13098,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 120.249985, + "_score": 113.49166, "_version": null, "_explanation": null, "sort": null, @@ -13168,7 +13147,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 84.977295, + "_score": 80.729095, "_version": null, "_explanation": null, "sort": null, @@ -13209,7 +13188,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 82.06964, + "_score": 76.61224, "_version": null, "_explanation": null, "sort": null, @@ -13258,7 +13237,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.643425, + "_score": 76.27921, "_version": null, "_explanation": null, "sort": null, @@ -13306,7 +13285,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.63191, + "_score": 76.2702, "_version": null, "_explanation": null, "sort": null, @@ -13348,7 +13327,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.61609, + "_score": 189.5944, "_version": null, "_explanation": null, "sort": null, @@ -13389,7 +13368,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.51884, + "_score": 189.49849, "_version": null, "_explanation": null, "sort": null, @@ -13430,7 +13409,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.51877, + "_score": 189.49841, "_version": null, "_explanation": null, "sort": null, @@ -13471,7 +13450,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.51848, + "_score": 189.49814, "_version": null, "_explanation": null, "sort": null, @@ -13511,7 +13490,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -13553,7 +13532,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -13593,7 +13572,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -13633,7 +13612,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -13673,7 +13652,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -13713,7 +13692,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -13759,7 +13738,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 82.06964, + "_score": 76.61319, "_version": null, "_explanation": null, "sort": null, @@ -13808,7 +13787,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.643425, + "_score": 76.28016, "_version": null, "_explanation": null, "sort": null, @@ -13856,7 +13835,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.63191, + "_score": 76.27115, "_version": null, "_explanation": null, "sort": null, @@ -13898,7 +13877,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.62691, + "_score": 76.267235, "_version": null, "_explanation": null, "sort": null, @@ -13941,7 +13920,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.5959, + "_score": 76.24298, "_version": null, "_explanation": null, "sort": null, @@ -13996,7 +13975,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 431.29297, + "_score": 400.29047, "_version": null, "_explanation": null, "sort": null, @@ -14008,57 +13987,6 @@ }, "id": "617d5841196a67bb40f7c1de" }, - { - "sections": [ - "Kubernetes integration: what's changed in v3", - "v3 BETA", - "Integration version and Chart version", - "Overview", - "Architectural changes", - "Kube-state-metrics component", - "Kubelet component", - "Control plane component", - "Helm Charts", - "Migration Guide", - "KSM configuration", - "Tip", - "Control plane configuration", - "Agent configuration", - "Integrations configuration", - "Miscellaneous chart values", - "Upgrade from v2", - "Important" - ], - "title": "Kubernetes integration: what's changed in v3", - "type": "docs", - "tags": [ - "Changelog", - "Kubernetes integration", - "Integrations" - ], - "external_id": "a78ca20934f98fd2b43c7a9fbc2453c802c24ab8", - "image": "", - "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/get-started/changes-since-v3/", - "published_at": "2022-02-15T19:13:49Z", - "updated_at": "2022-02-13T15:01:38Z", - "document_type": "page", - "popularity": 1, - "body": "v3 BETA Kubernetes integration v3 is currently in a late stage beta, and we expect to make it generally available during the second quarter of 2022. We encourage you to give it a try and let us know what you think! Integration version and Chart version The Kubernetes integration v3 (appVersion) is included on the nri-bundle chart version 4. Overview v3 BETA Data reported by the Kubernetes Integration version 3 has not changed with respect to version 2. For this major release, we focused in configurability, stability, and user experience. From version 3 onwards, New Relic's Kubernetes solution features a new architecture which aims to be more modular and configurable, giving you more power to choose how the solution is deployed and making it compatible with more environments. Architectural changes In this new version, the main component of the integration, the newrelic-infrastructure DaemonSet, is divided in three different components: nrk8s-ksm, nrk8s-kubelet, and nrk8s-controlplane, with the first being a deployment and the next two being DaemonSets. This makes it easier to make decisions at scheduling and deployment time, rather than runtime. Moreover, we also changed the lifecycle of the scraping process. We went from a one-shot, short-lived process, to a long-lived one, allowing it to leverage higher-level Kubernetes APIs like the Kubernetes informers, that provide built-in caching and watching of cluster objects. For this reason, each of the components has two containers: A container for the integration, responsible for collecting metrics. A container with the New Relic Infrastructure Agent, which is used to send the metrics to the New Relic Platform. Kube-state-metrics component We build our cluster state metrics on top of the OSS project kube-state-metrics, which is housed under the Kubernetes organization itself. Previously, as our solution was comprised by just one DaemonSet, an election process was made to decide which pod was going to be in charge of scraping the metrics. This process was based merely on locality. The pod in charge would be the one that shares a node with the KSM deployment. As the KSM output contains data for the whole cluster, parsing this output requires a substantial amount of resources. While this is something that big cluster operators can assume, the fact that it's one arbitrary instance of the DaemonSet the one that will need this big amount of resources forces cluster operators to allow such consumption to the whole DaemonSet, where only one actually needed them. Another problem with KSM scraping was figuring out in which node the KSM pod lived. To do this, we need to contact the API Server and filter pods by some labels, but given the short-lived nature of the integration, caches and watchers were not being used effectively by it. This caused that, on large clusters, all instances of the DaemonSet flooded the control plane with non-namespaced pod list requests as an attempt to figure out whether the KSM pod was living next to them. We decided to tackle this problem by making two big changes to how KSM is scraped: Split the responsibility of scraping KSM out of the DaemonSet pods to a different, single instance Deployment. Refactor the code and make it long-running, so we can leverage Kubernetes informers which provide built-in caching and watching mechanisms. Thus, a specific Deployment nrk8s-ksm now takes care of finding KSM and scraping it. With this pod now being long-lived and single, it can safely use an endpoints informer to locate the IP of the KSM pod and scrape it. The informer will automatically cache the list of informers in the cluster locally and watch for new ones, avoiding storming the API Server with requests to figure out where the pod was located. While a sharded KSM setup is not supported yet, this new code was built with this future improvement in mind. Kubelet component The Kubelet is the “Kubernetes agent”, a service that runs on every Kubernetes node and is responsible for creating the containers as instructed by the control plane. Since it's the Kubelet who partners closely with the Container Runtime, it's the main source of infrastructure metrics for our integration, such as use of CPU, memory, disk, network, etc. Although not thoroughly documented, the Kubelet API is the de-facto standard source for most Kubernetes metrics. Scraping the Kubelet is typically a low-resource operation. Given this, and our intent to minimize internode traffic whenever possible, nrk8s-kubelet is run as a DaemonSet where each instance gathers metric from the Kubelet running in the same node as it is. nrk8s-kubelet no longer requires hostNetwork to run properly, and instead it connects to the Kubelet using the Node IP. If this process fails, nrk8s-kubelet will fall back to reach the node through the API Server proxy. This fallback mechanism is not new, but we do encourage you to mind this if you have very large clusters, as proxying many kubelets might increase the load in the API server. You can check if the API Server is being used as a proxy by looking for a message like this in the logs: Trying to connect to kubelet through API proxy Copy Control plane component Enabling the integration to successfully find and connect to CP components was probably one of the hardest parts of this effort. The main reason for this is the amount of ways in which CP components can be configured: inside or outside the cluster, with one or many replicas, with or without dedicated nodes, etc. Moreover, different CP components might be configured directly. We built the current approach with the following scenarios in mind: CP monitoring should work out of the box for those environments in which the CP is reachable out of the box, e.g. Kubeadm or even Minikube. For setups where the CP cannot be autodiscovered. For example, if it lives out of the cluster, we should provide a way for the user to specify their own endpoints. Failure to autodiscover shouldn't cause the deployment to fail, but failure to hit a manually defined endpoint should. As major Kubernetes distributions such as Kubeadm deploy CP components configured to listen only in localhost on the host's network namespace, we chose to deploy nrk8s-controlplane as a DaemonSet with hostNetwork: true. We structured the configuration to support autodiscover and static endpoints. To be compatible with a wide range of distributions out of the box, we provide a wide range of known defaults as configuration entries. Doing this in the configuration instead of the code allows you to tweak autodiscovery to your needs. Another improvement was adding the possibility of having multiple endpoints per selector and adding a probe mechanism which automatically detects the correct one. This allows you to try different configurations such as ports or protocols by using the same selector. Scraping configuration for the etcd CP component looks like the following where the same structure and features applies for all components: config: etcd: enabled: true autodiscover: - selector: \"tier=control-plane,component=etcd\" namespace: kube-system matchNode: true endpoints: - url: https://localhost:4001 insecureSkipVerify: true auth: type: bearer - url: http://localhost:2381 staticEndpoint: url: https://url:port insecureSkipVerify: true auth: {} Copy If staticEndpoint is set, the component will try to scrape it. If it can't hit the endpoint, the integration will fail so there are no silent errors when manual endpoints are configured. If staticEndpoint is not set, the component will iterate over the autodiscover entries looking for the first pod that matches the selector in the specified namespace, and optionally is running in the same node of the DaemonSet (if matchNode is set to true). After a pod is discovered, the component probes, issuing an http HEAD request, the listed endpoints in order and scrapes the first successful probed one using the authorization type selected. While above we show a config excerpt for the etcd component, the scraping logic is the same for other components. For more detailed instructions on how to configure control plane monitoring, please check the control plane monitoring page. Helm Charts Helm is the primary means we offer to deploy our solution into your clusters. Chart complexity was also significantly increased from the previous version, where it only had to manage one DaemonSet. Now, it has to manage one deployment and two DaemonSets where each has slightly different configurations. This will give you more flexibilty to adapt the solution to your needs, whithout the need to apply manual patches on top of the chart and the generated manifests. Some of the new features that our new Helm chart exposes are: Full control of the securityContext for all pods Full control of pod labels and annotations for all pods Ability to add extra environment variables, volumes, and volumeMounts Full control on the integration configuration, including which endpoints are reached, autodiscovery behavior, and scraping intervals Better alignment with Helm idioms and standards You can check full details on all the switches that can be flipped in the Chart's README.md. Migration Guide In order to make migration from earlier versions as easy as possible, we developed a compatibility layer that will translate most of the options that were possible to specify in the old newrelic-infrastructure chart to their new counterparts. This compatibility layer is temporary and will be removed in the future, so we encourage you to read carefully this guide and migrate the configuration with human supervision. KSM configuration Tip KSM monitoring works out of the box for most configurations, most users will not need to change this config. disableKubeStateMetrics has been replaced by ksm.enabled. The default is still the same (KSM scraping enabled). kubeStateMetricsScheme, kubeStateMetricsPort, kubeStateMetricsUrl, kubeStateMetricsPodLabel, and kubeStateMetricsNamespace have been replaced by the more comprehensive and flexible ksm.config. The ksm.config object has the following structure: ksm: config: # When autodiscovering KSM, force the following scheme. By default, `http` is used. scheme: \"http\" # Label selector to find kube-state-metrics endpoints. Defaults to `app.kubernetes.io/name=kube-state-metrics`. selector: \"app.kubernetes.io/name=kube-state-metrics\" # Restrict KSM discovery to this particular namespace. Defaults to all namespaces. namespace: \"\" # When autodiscovering, only consider endpoints that use this port. By default, all ports from the discovered `endpoint` are probed. #port: 8080 # Override autodiscovery mechanism completely and specify the KSM url directly instead #staticUrl: \"http://test.io:8080/metrics\" Copy Control plane configuration Control plane configuration has changed substantially. If you previously had control plane monitoring enabled, we encourage you to take a look at the Configure control plane monitoring dedicated page. The following options have been replaced by more comprehensive configuration, covered in the section linked above: apiServerSecurePort etcdTlsSecretName etcdTlsSecretNamespace controllerManagerEndpointUrl, etcdEndpointUrl, apiServerEndpointUrl, and schedulerEndpointUrl Agent configuration Agent config file, previously specified in config has been moved to common.agentConfig. Format of the file has not changed, and the full range of options that can be configured can be found here. The following agent options were previously \"aliased\" in the root of the values.yml file, and are no longer available: logFile has been replaced by common.agentConfig.log_file. eventQueueDepth has been replaced by common.agentConfig.event_queue_depth. customAttributes has changed in format to a yaml object. The previous format, a manually json-encoded string e.g. {\"team\": \"devops\"}, is still accepted although discouraged. Previously, customAttributes had a default clusterName entry which might have unwanted consequences if removed. This is no longer the case, users may now safely override customAttributes on its entirety. discoveryCacheTTL has been completely removed, as the discovery is now performed using kubernetes informers which have a built-in cache. Integrations configuration Integrations were previously configured under integrations_config, using an array format: integrations_config: - name: nri-redis.yaml data: discovery: # ... integrations: # ... Copy The mechanism remains the same, but we have changed the format to be more user-friendly: integrations: nri-redis-sampleapp: discovery: # ... integrations: # ... Copy Moreover, now the --port and --tls flags are mandatory on the discovery command. In the past, the following would work: integrations: nri-redis-sampleapp: discovery: command: exec: /var/db/newrelic-infra/nri-discovery-kubernetes Copy From v3 onwards, you must specify --port and --tls: integrations: nri-redis-sampleapp: discovery: command: exec: /var/db/newrelic-infra/nri-discovery-kubernetes --tls --port 10250 Copy This change is required because in v2 and below, the nrk8s-kubelet component (or its equivalent) ran with hostNetwork: true, so nri-discovery-kubernetes could connect to the kubelet using localhost and plain http. For security reasons, this is no longer the case, hence the need to specify both flags from now on. For more details on how to configure on-host integrations in Kubernetes please check the Monitor services in Kubernetes page. Miscellaneous chart values While not related to the integration configuration, the following miscellaneous options for the helm chart have also changed: runAsUser has been replaced by securityContext, which is templated directly into the pods and more configurable. resources has been removed, as now we deploy three different workloads. Resources for each one can be configured individually under: ksm.resources kubelet.resources controlPlane.resources Similarly, tolerations has been split into three and the previous one is no longer valid: ksm.tolerations kubelet.tolerations controlPlane.tolerations All three default to tolerate any value for NoSchedule and NoExecute image and all its subkeys have been replaced by individual sections for each of the three images that are now deployed: images.forwarder.* to configure the infrastructure-agent forwarder. images.agent.* to configure the image bundling the infrastructure-agent and on-host integrations. images.integration.* to configure the image in charge of scraping k8s data. Upgrade from v2 In order to upgrade from the Kubernetes integration version 2 (included in nri-bundle chart versions 3.x), we strongly encourage you to create a values-newrelic.yaml file with your desired License Key and configuration. If you had previously installed our chart from the CLI directly, for example using a command like the following: bash Copy $ helm install newrelic/nri-bundle \\ > --set global.licenseKey= \\ > --set global.cluster= \\ > --set infrastructure.enabled=true \\ > --set prometheus.enabled=true \\ > --set webhook.enabled=true \\ > --set ksm.enabled=true \\ > --set kubeEvents.enabled=true \\ > --set logging.enabled=true You can take the provided --set arguments and put them in a yaml file like the following: # values-newrelic.yaml global: licenseKey: cluster: infrastructure: enabled: true prometheus: enabled: true webhook: enabled: true ksm: enabled: true kubeEvents: enabled: true logging: enabled: true Copy After doing this, and adapting any other setting you might have changed according to the section above, you can upgrade by running the following command: bash Copy $ helm upgrade newrelic newrelic/nri-bundle \\ > --namespace newrelic --create-namespace \\ > -f values-newrelic.yaml \\ > --devel The --devel flag will instruct helm to download the v3 version of the integration (version 4.x of the nri-bundle chart). Important The --reuse-values flag is not supported for upgrading from v2 to v3.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 333.39224, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "title": "Kubernetes integration: what's changed in v3", - "sections": "Kubernetes integration: what's changed in v3", - "tags": "Kubernetes integration", - "body": " the infrastructure-agent and on-host integrations. images.integration.* to configure the image in charge of scraping k8s data. Upgrade from v2 In order to upgrade from the Kubernetes integration version 2 (included in nri-bundle chart versions 3.x), we strongly encourage you to create a values" - }, - "id": "61fd3c9d28ccbc72eec0dcda" - }, { "sections": [ "Configure control plane monitoring", @@ -14103,7 +14031,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 325.78156, + "_score": 319.34735, "_version": null, "_explanation": null, "sort": null, @@ -14116,51 +14044,95 @@ }, { "sections": [ - "Link your applications to Kubernetes", + "Kubernetes integration: what's changed in v3", + "v3 BETA", + "Integration version and Chart version", + "Overview", + "Architectural changes", + "Kube-state-metrics component", + "Kubelet component", + "Control plane component", + "Helm Charts", + "Migration Guide", + "KSM configuration", "Tip", - "Compatibility and requirements", - "Kubernetes requirements", - "Network requirements", - "APM agent compatibility", - "Openshift requirements", - "Important", - "Configure the injection of metadata", - "Default configuration", - "Custom configuration", - "Manage custom certificates", - "Validate the injection of metadata", - "Disable the injection of metadata", - "Troubleshooting" + "Control plane configuration", + "Agent configuration", + "Integrations configuration", + "Miscellaneous chart values", + "Upgrade from v2", + "Important" ], - "title": "Link your applications to Kubernetes", + "title": "Kubernetes integration: what's changed in v3", "type": "docs", "tags": [ - "Link apps and services", + "Changelog", "Kubernetes integration", "Integrations" ], - "external_id": "0fe0951312aaf683f6614d5956f8c402b9693780", + "external_id": "a78ca20934f98fd2b43c7a9fbc2453c802c24ab8", "image": "", - "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/link-your-applications/link-your-applications-kubernetes/", - "published_at": "2022-02-06T01:24:10Z", - "updated_at": "2022-02-06T01:24:10Z", + "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/get-started/changes-since-v3/", + "published_at": "2022-02-15T19:13:49Z", + "updated_at": "2022-02-13T15:01:38Z", "document_type": "page", "popularity": 1, - "body": "You can surface Kubernetes metadata and link it to your APM agents as distributed traces to explore performance issues and troubleshoot transaction errors. For more information, see this New Relic blog post. You can quickly start monitoring Kubernetes clusters using our Auto-telemetry with Pixie integration, which doesn't require a language agent. Learn more about Auto-telemetry with Pixie. The metadata injection product uses a MutatingAdmissionWebhook to add the following environment variables to pods: NEW_RELIC_METADATA_KUBERNETES_CLUSTER_NAME NEW_RELIC_METADATA_KUBERNETES_NODE_NAME NEW_RELIC_METADATA_KUBERNETES_NAMESPACE_NAME NEW_RELIC_METADATA_KUBERNETES_DEPLOYMENT_NAME NEW_RELIC_METADATA_KUBERNETES_POD_NAME NEW_RELIC_METADATA_KUBERNETES_CONTAINER_NAME NEW_RELIC_METADATA_KUBERNETES_CONTAINER_IMAGE_NAME Copy Tip Our Kubernetes metadata injection project is open source. Here's the code to link APM and infrastructure data and the code to automatically manage certificates. Compatibility and requirements Before linking Kubernetes metadata to your APM agents, make sure you meet the following requirements: Kubernetes requirements Network requirements APM agent compatibility OpenShift requirements Kubernetes requirements To link your applications and Kubernetes, your cluster must have the MutatingAdmissionWebhook controller enabled, which requires Kubernetes 1.9 or higher. To verify that your cluster is compatible, run the following command: kubectl api-versions | grep admissionregistration.k8s.io/v1beta1 admissionregistration.k8s.io/v1beta1 Copy If you see a different result, follow the Kubernetes documentation to enable admission control in your cluster. Network requirements For Kubernetes to speak to our MutatingAdmissionWebhook, the master node (or the API server container, depending on how the cluster is set up) should be allowed egress for HTTPS traffic on port 443 to pods in all of the other nodes in the cluster. This might require specific configuration depending on how the infrastructure is set up (on-premises, AWS, Google Cloud, etc). Tip Until Kubernetes v1.14, users were only allowed to register admission webhooks on port 443. Since v1.15 it's possible to register them on different ports. To ensure backward compatibility, the webhook is registered by default on port 443 in the YAML config file we distribute. APM agent compatibility The following New Relic agents collect Kubernetes metadata: Go 2.3.0 or higher Java 4.10.0 or higher Node.js 5.3.0 or higher Python 4.14.0 or higher Ruby 6.1.0 or higher .NET 8.17.438 or higher Openshift requirements To link Openshift and Kubernetes you must enable mutating admission webhooks, which requires Openshift 3.9 or higher. During the process, install a resource that requires admin permissions to the cluster. Run this to log in as admin: oc login -u system:admin Copy Check that webhooks are correctly configured. If they are not, update the master-config.yaml file. admissionConfig: pluginConfig: MutatingAdmissionWebhook: configuration: apiVersion: apiserver.config.k8s.io/v1alpha1 kubeConfigFile: /dev/null kind: WebhookAdmission ValidatingAdmissionWebhook: configuration: apiVersion: apiserver.config.k8s.io/v1alpha1 kubeConfigFile: /dev/null kind: WebhookAdmission location: \"\" Copy Important Add kubeConfigFile: /dev/null to address some issues in Openshift. Enable certificate signing by editing the YAML file and updating your configuration: kubernetesMasterConfig: controllerArguments: cluster-signing-cert-file: - \"/etc/origin/master/ca.crt\" cluster-signing-key-file: - \"/etc/origin/master/ca.key\" Copy Restart the Openshift services in the master node. Configure the injection of metadata By default, all the pods you create that include APM agents have the correct environment variables set and the metadata injection applies to the entire cluster. To check that the environment variables have been set, any container that is running must be stopped, and a new instance started (see Validate the injection of metadata). This default configuration also uses the Kubernetes certificates API to automatically manage the certificates required for the injection. If needed, you can limit the injection of metadata to specific namespaces in your cluster or self-manage your certificates. Default configuration We offer instructions for deploying our integration using Helm. Just be sure that, when you are configuring the chart, the webhook that inject the metadata is enabled. Notice that we are specifying --dry-run and --debug, so nothing will be installed in this step: helm upgrade --install newrelic newrelic/nri-bundle \\ --dry-run \\ --debug \\ --namespace newrelic --create-namespace \\ --set global.licenseKey=YOUR_NEW_RELIC_LICENSE_KEY \\ --set global.cluster=K8S_CLUSTER_NAME \\ --set ksm.enabled=true \\ --set newrelic-infrastructure.privileged=true \\ --set infrastructure.enabled=true \\ --set prometheus.enabled=true \\ --set webhook.enabled= true \\ --set kubeEvents.enabled=true \\ --set logging.enabled=true Copy Custom configuration You can limit the injection of metadata only to specific namespaces by using labels. To enable this feature, edit nri-bundle Helm values.yaml file: nri-metadata-injection: injectOnlyLabeledNamespaces: true Copy Or add a --set when installing or upgrading your Helm release: helm upgrade --install newrelic newrelic/nri-bundle \\ --dry-run \\ --debug \\ --namespace newrelic --create-namespace \\ --set global.licenseKey=YOUR_NEW_RELIC_LICENSE_KEY \\ --set global.cluster=K8S_CLUSTER_NAME \\ --set ksm.enabled=true \\ --set newrelic-infrastructure.privileged=true \\ --set infrastructure.enabled=true \\ --set prometheus.enabled=true \\ --set webhook.enabled= true \\ --set nri-metadata-injection.injectOnlyLabeledNamespaces=true \\ --set kubeEvents.enabled=true \\ --set logging.enabled=true Copy With this option, injection is only applied to those namespaces that have the newrelic-metadata-injection label set to enabled: kubectl label namespace YOUR_NAMESPACE newrelic-metadata-injection=enabled Copy Manage custom certificates To use custom certificates you need to disable the automatic installation of certificates when you are installing using Helm. To disable the installation for certificates just modify nri-bundle Helm values.yaml like this: nri-metadata-injection: customTLSCertificate: true Copy Or add a --set when installing or upgrading your Helm release: helm upgrade --install newrelic newrelic/nri-bundle \\ --dry-run \\ --debug \\ --namespace newrelic --create-namespace \\ --set global.licenseKey=YOUR_NEW_RELIC_LICENSE_KEY \\ --set global.cluster=K8S_CLUSTER_NAME \\ --set ksm.enabled=true \\ --set newrelic-infrastructure.privileged=true \\ --set infrastructure.enabled=true \\ --set prometheus.enabled=true \\ --set webhook.enabled= true \\ --set nri-metadata-injection.customTLSCertificate=true \\ --set kubeEvents.enabled=true \\ --set logging.enabled=true Copy Now you can proceed with the custom certificate management option. You need your certificate, server key, and Certification Authority (CA) bundle encoded in PEM format. If you have them in the standard certificate format (X.509), install openssl, and run the following: openssl x509 -in CERTIFICATE_FILENAME -outform PEM -out CERTIFICATE_FILENAME.pem openssl x509 -in SERVER_KEY_FILENAME -outform PEM -out SERVER_KEY_FILENAME.pem openssl x509 -in CA_BUNDLE_FILENAME -outform PEM -out BUNDLE_FILENAME.pem Copy If your certificate/key pair are in another format, see the Digicert knowledgebase for more help. Create the TLS secret with the signed certificate/key pair, and patch the mutating webhook configuration with the CA using the following commands: kubectl create secret tls newrelic-metadata-injection-admission \\ --key=PEM_ENCODED_SERVER_KEY \\ --cert=PEM_ENCODED_CERTIFICATE \\ --dry-run -o yaml | kubectl -n newrelic apply -f - caBundle=$(cat PEM_ENCODED_CA_BUNDLE | base64 | td -d $'\\n') kubectl patch mutatingwebhookconfiguration newrelic-metadata-injection-cfg --type='json' -p \"[{'op': 'replace', 'path': '/webhooks/0/clientConfig/caBundle', 'value':'${caBundle}'}]\" Copy Important Certificates signed by Kubernetes have an expiration of one year. For more information, see the Kubernetes source code in GitHub. Validate the injection of metadata In order to validate that the webhook (responsible for injecting the metadata) was installed correctly, deploy a new pod and check for the New Relic environment variables. Create a dummy pod containing Busybox by running: kubectl create -f https://git.io/vPieo Copy Check if New Relic environment variables were injected: kubectl exec busybox0 -- env | grep NEW_RELIC_METADATA_KUBERNETES NEW_RELIC_METADATA_KUBERNETES_CLUSTER_NAME=fsi NEW_RELIC_METADATA_KUBERNETES_NODE_NAME=nodea NEW_RELIC_METADATA_KUBERNETES_NAMESPACE_NAME=default NEW_RELIC_METADATA_KUBERNETES_POD_NAME=busybox0 NEW_RELIC_METADATA_KUBERNETES_CONTAINER_NAME=busybox Copy Disable the injection of metadata To disable/uninstall the injection of metadata, use the following commands: Delete the Kubernetes objects using the yaml file: kubectl delete -f k8s-metadata-injection-latest.yaml Copy Delete the TLS secret containing the certificate/key pair: kubectl delete secret/newrelic-metadata-injection-secret Copy Troubleshooting Follow these troubleshooting tips as needed. No Kubernetes metadata in APM or distributed tracing transactions Problem The creation of the secret by the k8s-webhook-cert-manager job used to fail due to the kubectl version used by the image when running in Kubernetes version 1.19.x, The new version 1.3.2 fixes this issue, therefore it is enough to run again the job using an update version of the image to fix the issue. Solution Update the image k8s-webhook-cert-manager (to a version >= 1.3.2) and re-run the job. The secret will be correctly created and the k8s-metadata-injection pod will be able to start. Note that the new version of the manifest and of the nri-bundle are already updated with the correct version of the image. Problem In OpenShift version 4.x, the CA that is used in order to patch the mutatingwebhookconfiguration resource is not the one used when signing the certificates. This is a known issue currently tracked here. In the logs of the Pod nri-metadata-injection, you'll see the following error message: TLS handshake error from 10.131.0.29:37428: remote error: tls: unknown certificate authority TLS handshake error from 10.129.0.1:49314: remote error: tls: bad certificate Copy Workaround Manually update the certificate stored in the mutatingwebhookconfiguration object. The correct CA locations might change according to the cluster configuration. However, you can usually find the CA in the secret csr-signer in the namespace openshift-kube-controller-manager. Problem There is no Kubernetes metadata included in the transactions' attributes of your APM agent or in distributed tracing. Solution Verify that the environment variables are being correctly injected by following the instructions described in the Validate your installation step. If they are not present, get the name of the metadata injection pod by running: kubectl get pods | grep newrelic-metadata-injection-deployment kubectl logs -f pod/podname Copy In another terminal, create a new pod (for example, see Validate your installation), and inspect the logs of the metadata injection deployment for errors. For every created pod there should be a set of 4 new entries in the logs like: {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.107Z\",\"caller\":\"server/main.go:139\",\"msg\":\"POST https://newrelic-metadata-injection-svc.default.svc:443/mutate?timeout=30s HTTP/2.0\\\" from 10.11.49.2:32836\"} {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.110Z\",\"caller\":\"server/webhook.go:168\",\"msg\":\"received admission review\",\"kind\":\"/v1, Kind=Pod\",\"namespace\":\"default\",\"name\":\"\",\"pod\":\"busybox1\",\"UID\":\"6577519b-7a61-11ea-965e-0e46d1c9335c\",\"operation\":\"CREATE\",\"userinfo\":{\"username\":\"admin\",\"uid\":\"admin\",\"groups\":[\"system:masters\",\"system:authenticated\"]}} {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.111Z\",\"caller\":\"server/webhook.go:182\",\"msg\":\"admission response created\",\"response\":\"[{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env\\\",\\\"value\\\":[{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_CLUSTER_NAME\\\",\\\"value\\\":\\\"adn_kops\\\"}]},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_NODE_NAME\\\",\\\"valueFrom\\\":{\\\"fieldRef\\\":{\\\"fieldPath\\\":\\\"spec.nodeName\\\"}}}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_NAMESPACE_NAME\\\",\\\"valueFrom\\\":{\\\"fieldRef\\\":{\\\"fieldPath\\\":\\\"metadata.namespace\\\"}}}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_POD_NAME\\\",\\\"valueFrom\\\":{\\\"fieldRef\\\":{\\\"fieldPath\\\":\\\"metadata.name\\\"}}}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_CONTAINER_NAME\\\",\\\"value\\\":\\\"busybox\\\"}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_CONTAINER_IMAGE_NAME\\\",\\\"value\\\":\\\"busybox\\\"}}]\"} {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.111Z\",\"caller\":\"server/webhook.go:257\",\"msg\":\"writing response\"} Copy If there are no new entries on the logs, it means that the apiserver is not being able to communicate with the webhook service, this could be due to networking rules or security groups rejecting the communication. To check if the apiserver is not being able to communicate with the webhook you should inspect the apiserver logs for errors like: failed calling webhook \"metadata-injection.newrelic.com\": ERROR_REASON Copy To get the apiserver logs: Start a proxy to the Kubernetes API server by the executing the following command in a terminal window and keep it running. kubectl proxy --port=8001 Copy Create a new pod in your cluster, this will make the apiserver try to communicate with the webhook. The following command will create a busybox. kubectl create -f https://git.io/vPieo Copy Retrieve the apiserver logs. curl localhost:8001/logs/kube-apiserver.log > apiserver.log Copy Delete the busybox container. kubectl delete -f https://git.io/vPieo Copy Inspect the logs for errors. grep -E 'failed calling webhook' apiserver.log Copy Remember that one of the requirements for the metadata injection is that the apiserver must be allowed egress to the pods running on the cluster. If you encounter errors regarding connection timeouts or failed connections, make sure to check the security groups and firewall rules of the cluster. If there are no log entries in either the apiserver logs or the metadata injection deployment, it means that the webhook was not properly registered. Ensure the metadata injection setup job ran successfully by inspecting the output of: kubectl get job newrelic-metadata-setup Copy If the job is not completed, investigate the logs of the setup job: kubectl logs job/newrelic-metadata-setup Copy Ensure the CertificateSigningRequest is approved and issued by running: kubectl get csr newrelic-metadata-injection-svc.default Copy Ensure the TLS secret is present by running: kubectl get secret newrelic-metadata-injection-secret Copy Ensure the CA bundle is present in the mutating webhook configuration: kubectl get mutatingwebhookconfiguration newrelic-metadata-injection-cfg -o json Copy Ensure the TargetPort of the Service resource matches the Port of the Deployment's container: kubectl describe service/newrelic-metadata-injection-svc kubectl describe deployment/newrelic-metadata-injection-deployment Copy", + "body": "v3 BETA Kubernetes integration v3 is currently in a late stage beta, and we expect to make it generally available during the second quarter of 2022. We encourage you to give it a try and let us know what you think! Integration version and Chart version The Kubernetes integration v3 (appVersion) is included on the nri-bundle chart version 4. Overview v3 BETA Data reported by the Kubernetes Integration version 3 has not changed with respect to version 2. For this major release, we focused in configurability, stability, and user experience. From version 3 onwards, New Relic's Kubernetes solution features a new architecture which aims to be more modular and configurable, giving you more power to choose how the solution is deployed and making it compatible with more environments. Architectural changes In this new version, the main component of the integration, the newrelic-infrastructure DaemonSet, is divided in three different components: nrk8s-ksm, nrk8s-kubelet, and nrk8s-controlplane, with the first being a deployment and the next two being DaemonSets. This makes it easier to make decisions at scheduling and deployment time, rather than runtime. Moreover, we also changed the lifecycle of the scraping process. We went from a one-shot, short-lived process, to a long-lived one, allowing it to leverage higher-level Kubernetes APIs like the Kubernetes informers, that provide built-in caching and watching of cluster objects. For this reason, each of the components has two containers: A container for the integration, responsible for collecting metrics. A container with the New Relic Infrastructure Agent, which is used to send the metrics to the New Relic Platform. Kube-state-metrics component We build our cluster state metrics on top of the OSS project kube-state-metrics, which is housed under the Kubernetes organization itself. Previously, as our solution was comprised by just one DaemonSet, an election process was made to decide which pod was going to be in charge of scraping the metrics. This process was based merely on locality. The pod in charge would be the one that shares a node with the KSM deployment. As the KSM output contains data for the whole cluster, parsing this output requires a substantial amount of resources. While this is something that big cluster operators can assume, the fact that it's one arbitrary instance of the DaemonSet the one that will need this big amount of resources forces cluster operators to allow such consumption to the whole DaemonSet, where only one actually needed them. Another problem with KSM scraping was figuring out in which node the KSM pod lived. To do this, we need to contact the API Server and filter pods by some labels, but given the short-lived nature of the integration, caches and watchers were not being used effectively by it. This caused that, on large clusters, all instances of the DaemonSet flooded the control plane with non-namespaced pod list requests as an attempt to figure out whether the KSM pod was living next to them. We decided to tackle this problem by making two big changes to how KSM is scraped: Split the responsibility of scraping KSM out of the DaemonSet pods to a different, single instance Deployment. Refactor the code and make it long-running, so we can leverage Kubernetes informers which provide built-in caching and watching mechanisms. Thus, a specific Deployment nrk8s-ksm now takes care of finding KSM and scraping it. With this pod now being long-lived and single, it can safely use an endpoints informer to locate the IP of the KSM pod and scrape it. The informer will automatically cache the list of informers in the cluster locally and watch for new ones, avoiding storming the API Server with requests to figure out where the pod was located. While a sharded KSM setup is not supported yet, this new code was built with this future improvement in mind. Kubelet component The Kubelet is the “Kubernetes agent”, a service that runs on every Kubernetes node and is responsible for creating the containers as instructed by the control plane. Since it's the Kubelet who partners closely with the Container Runtime, it's the main source of infrastructure metrics for our integration, such as use of CPU, memory, disk, network, etc. Although not thoroughly documented, the Kubelet API is the de-facto standard source for most Kubernetes metrics. Scraping the Kubelet is typically a low-resource operation. Given this, and our intent to minimize internode traffic whenever possible, nrk8s-kubelet is run as a DaemonSet where each instance gathers metric from the Kubelet running in the same node as it is. nrk8s-kubelet no longer requires hostNetwork to run properly, and instead it connects to the Kubelet using the Node IP. If this process fails, nrk8s-kubelet will fall back to reach the node through the API Server proxy. This fallback mechanism is not new, but we do encourage you to mind this if you have very large clusters, as proxying many kubelets might increase the load in the API server. You can check if the API Server is being used as a proxy by looking for a message like this in the logs: Trying to connect to kubelet through API proxy Copy Control plane component Enabling the integration to successfully find and connect to CP components was probably one of the hardest parts of this effort. The main reason for this is the amount of ways in which CP components can be configured: inside or outside the cluster, with one or many replicas, with or without dedicated nodes, etc. Moreover, different CP components might be configured directly. We built the current approach with the following scenarios in mind: CP monitoring should work out of the box for those environments in which the CP is reachable out of the box, e.g. Kubeadm or even Minikube. For setups where the CP cannot be autodiscovered. For example, if it lives out of the cluster, we should provide a way for the user to specify their own endpoints. Failure to autodiscover shouldn't cause the deployment to fail, but failure to hit a manually defined endpoint should. As major Kubernetes distributions such as Kubeadm deploy CP components configured to listen only in localhost on the host's network namespace, we chose to deploy nrk8s-controlplane as a DaemonSet with hostNetwork: true. We structured the configuration to support autodiscover and static endpoints. To be compatible with a wide range of distributions out of the box, we provide a wide range of known defaults as configuration entries. Doing this in the configuration instead of the code allows you to tweak autodiscovery to your needs. Another improvement was adding the possibility of having multiple endpoints per selector and adding a probe mechanism which automatically detects the correct one. This allows you to try different configurations such as ports or protocols by using the same selector. Scraping configuration for the etcd CP component looks like the following where the same structure and features applies for all components: config: etcd: enabled: true autodiscover: - selector: \"tier=control-plane,component=etcd\" namespace: kube-system matchNode: true endpoints: - url: https://localhost:4001 insecureSkipVerify: true auth: type: bearer - url: http://localhost:2381 staticEndpoint: url: https://url:port insecureSkipVerify: true auth: {} Copy If staticEndpoint is set, the component will try to scrape it. If it can't hit the endpoint, the integration will fail so there are no silent errors when manual endpoints are configured. If staticEndpoint is not set, the component will iterate over the autodiscover entries looking for the first pod that matches the selector in the specified namespace, and optionally is running in the same node of the DaemonSet (if matchNode is set to true). After a pod is discovered, the component probes, issuing an http HEAD request, the listed endpoints in order and scrapes the first successful probed one using the authorization type selected. While above we show a config excerpt for the etcd component, the scraping logic is the same for other components. For more detailed instructions on how to configure control plane monitoring, please check the control plane monitoring page. Helm Charts Helm is the primary means we offer to deploy our solution into your clusters. Chart complexity was also significantly increased from the previous version, where it only had to manage one DaemonSet. Now, it has to manage one deployment and two DaemonSets where each has slightly different configurations. This will give you more flexibilty to adapt the solution to your needs, whithout the need to apply manual patches on top of the chart and the generated manifests. Some of the new features that our new Helm chart exposes are: Full control of the securityContext for all pods Full control of pod labels and annotations for all pods Ability to add extra environment variables, volumes, and volumeMounts Full control on the integration configuration, including which endpoints are reached, autodiscovery behavior, and scraping intervals Better alignment with Helm idioms and standards You can check full details on all the switches that can be flipped in the Chart's README.md. Migration Guide In order to make migration from earlier versions as easy as possible, we developed a compatibility layer that will translate most of the options that were possible to specify in the old newrelic-infrastructure chart to their new counterparts. This compatibility layer is temporary and will be removed in the future, so we encourage you to read carefully this guide and migrate the configuration with human supervision. KSM configuration Tip KSM monitoring works out of the box for most configurations, most users will not need to change this config. disableKubeStateMetrics has been replaced by ksm.enabled. The default is still the same (KSM scraping enabled). kubeStateMetricsScheme, kubeStateMetricsPort, kubeStateMetricsUrl, kubeStateMetricsPodLabel, and kubeStateMetricsNamespace have been replaced by the more comprehensive and flexible ksm.config. The ksm.config object has the following structure: ksm: config: # When autodiscovering KSM, force the following scheme. By default, `http` is used. scheme: \"http\" # Label selector to find kube-state-metrics endpoints. Defaults to `app.kubernetes.io/name=kube-state-metrics`. selector: \"app.kubernetes.io/name=kube-state-metrics\" # Restrict KSM discovery to this particular namespace. Defaults to all namespaces. namespace: \"\" # When autodiscovering, only consider endpoints that use this port. By default, all ports from the discovered `endpoint` are probed. #port: 8080 # Override autodiscovery mechanism completely and specify the KSM url directly instead #staticUrl: \"http://test.io:8080/metrics\" Copy Control plane configuration Control plane configuration has changed substantially. If you previously had control plane monitoring enabled, we encourage you to take a look at the Configure control plane monitoring dedicated page. The following options have been replaced by more comprehensive configuration, covered in the section linked above: apiServerSecurePort etcdTlsSecretName etcdTlsSecretNamespace controllerManagerEndpointUrl, etcdEndpointUrl, apiServerEndpointUrl, and schedulerEndpointUrl Agent configuration Agent config file, previously specified in config has been moved to common.agentConfig. Format of the file has not changed, and the full range of options that can be configured can be found here. The following agent options were previously \"aliased\" in the root of the values.yml file, and are no longer available: logFile has been replaced by common.agentConfig.log_file. eventQueueDepth has been replaced by common.agentConfig.event_queue_depth. customAttributes has changed in format to a yaml object. The previous format, a manually json-encoded string e.g. {\"team\": \"devops\"}, is still accepted although discouraged. Previously, customAttributes had a default clusterName entry which might have unwanted consequences if removed. This is no longer the case, users may now safely override customAttributes on its entirety. discoveryCacheTTL has been completely removed, as the discovery is now performed using kubernetes informers which have a built-in cache. Integrations configuration Integrations were previously configured under integrations_config, using an array format: integrations_config: - name: nri-redis.yaml data: discovery: # ... integrations: # ... Copy The mechanism remains the same, but we have changed the format to be more user-friendly: integrations: nri-redis-sampleapp: discovery: # ... integrations: # ... Copy Moreover, now the --port and --tls flags are mandatory on the discovery command. In the past, the following would work: integrations: nri-redis-sampleapp: discovery: command: exec: /var/db/newrelic-infra/nri-discovery-kubernetes Copy From v3 onwards, you must specify --port and --tls: integrations: nri-redis-sampleapp: discovery: command: exec: /var/db/newrelic-infra/nri-discovery-kubernetes --tls --port 10250 Copy This change is required because in v2 and below, the nrk8s-kubelet component (or its equivalent) ran with hostNetwork: true, so nri-discovery-kubernetes could connect to the kubelet using localhost and plain http. For security reasons, this is no longer the case, hence the need to specify both flags from now on. For more details on how to configure on-host integrations in Kubernetes please check the Monitor services in Kubernetes page. Miscellaneous chart values While not related to the integration configuration, the following miscellaneous options for the helm chart have also changed: runAsUser has been replaced by securityContext, which is templated directly into the pods and more configurable. resources has been removed, as now we deploy three different workloads. Resources for each one can be configured individually under: ksm.resources kubelet.resources controlPlane.resources Similarly, tolerations has been split into three and the previous one is no longer valid: ksm.tolerations kubelet.tolerations controlPlane.tolerations All three default to tolerate any value for NoSchedule and NoExecute image and all its subkeys have been replaced by individual sections for each of the three images that are now deployed: images.forwarder.* to configure the infrastructure-agent forwarder. images.agent.* to configure the image bundling the infrastructure-agent and on-host integrations. images.integration.* to configure the image in charge of scraping k8s data. Upgrade from v2 In order to upgrade from the Kubernetes integration version 2 (included in nri-bundle chart versions 3.x), we strongly encourage you to create a values-newrelic.yaml file with your desired License Key and configuration. If you had previously installed our chart from the CLI directly, for example using a command like the following: bash Copy $ helm install newrelic/nri-bundle \\ > --set global.licenseKey= \\ > --set global.cluster= \\ > --set infrastructure.enabled=true \\ > --set prometheus.enabled=true \\ > --set webhook.enabled=true \\ > --set ksm.enabled=true \\ > --set kubeEvents.enabled=true \\ > --set logging.enabled=true You can take the provided --set arguments and put them in a yaml file like the following: # values-newrelic.yaml global: licenseKey: cluster: infrastructure: enabled: true prometheus: enabled: true webhook: enabled: true ksm: enabled: true kubeEvents: enabled: true logging: enabled: true Copy After doing this, and adapting any other setting you might have changed according to the section above, you can upgrade by running the following command: bash Copy $ helm upgrade newrelic newrelic/nri-bundle \\ > --namespace newrelic --create-namespace \\ > -f values-newrelic.yaml \\ > --devel The --devel flag will instruct helm to download the v3 version of the integration (version 4.x of the nri-bundle chart). Important The --reuse-values flag is not supported for upgrading from v2 to v3.", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 312.65958, + "_score": 316.72003, "_version": null, "_explanation": null, "sort": null, "highlight": { - "title": "Link your applications to Kubernetes", - "sections": "Link your applications to Kubernetes", + "title": "Kubernetes integration: what's changed in v3", + "sections": "Kubernetes integration: what's changed in v3", "tags": "Kubernetes integration", - "body": " \\ --namespace newrelic --create-namespace \\ --set global.licenseKey=YOUR_NEW_RELIC_LICENSE_KEY \\ --set global.cluster=K8S_CLUSTER_NAME \\ --set ksm.enabled=true \\ --set newrelic-infrastructure.privileged=true \\ --set infrastructure.enabled=true \\ --set prometheus.enabled=true \\ --set webhook.enabled= true" + "body": " the infrastructure-agent and on-host integrations. images.integration.* to configure the image in charge of scraping k8s data. Upgrade from v2 In order to upgrade from the Kubernetes integration version 2 (included in nri-bundle chart versions 3.x), we strongly encourage you to create a values" }, - "id": "617daead28ccbc662b7ffe23" + "id": "61fd3c9d28ccbc72eec0dcda" + }, + { + "sections": [ + "Not seeing control plane data", + "Problem", + "Solution", + "Check that the master nodes have the correct labels", + "Check that the integration is running on the master nodes", + "Check that the control plane components have the required labels", + "Retrieve the verbose logs of one of the integrations running on a master node and check for the control plane components jobs", + "Manually query the metrics of the components" + ], + "title": "Not seeing control plane data", + "type": "docs", + "tags": [ + "Troubleshooting", + "Kubernetes integration", + "Integrations" + ], + "external_id": "02edaca82526773fcaef4adb97825d2349a404b4", + "image": "", + "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/troubleshooting/not-seeing-control-plane-data/", + "published_at": "2022-02-15T20:58:20Z", + "updated_at": "2021-10-24T03:07:45Z", + "document_type": "troubleshooting_doc", + "popularity": 1, + "body": "Problem You have completed the installation procedure for New Relic's Kubernetes integration, you are seeing Kubernetes data in your New Relic account but there is no data from any of the control plane components. Solution Check that the master nodes have the correct labels Execute the following commands to manually find the master nodes: kubectl get nodes -l node-role.kubernetes.io/master=\"\" Copy kubectl get nodes -l kubernetes.io/role=\"master\" Copy If the master nodes follow the labeling convention defined in the discovery of master nodes and control plane components documentation section, you should get some output like: NAME STATUS ROLES AGE VERSION ip-10-42-24-4.ec2.internal Ready master 42d v1.14.8 Copy If no nodes are found, there are two scenarios: Your master nodes don’t have the required labels that identify them as masters, in this case you need to add both labels to your master nodes. You’re in a managed cluster and your provider is handling the master nodes for you. In this case there is nothing you can do, since your provider is limiting the access to those nodes. Check that the integration is running on the master nodes Replace the placeholder in the following command with one of the node names returned in the previous step to get an integration pod running on a master node: kubectl get pods --field-selector spec.nodeName=NODE_NAME -l name=newrelic-infra --all-namespaces Copy The next command is the same, just that it selects the node for you: kubectl get pods --field-selector spec.nodeName=$(kubectl get nodes -l node-role.kubernetes.io/master=\"\" -o jsonpath=\"{.items[0].metadata.name}\") -l name=newrelic-infra --all-namespaces Copy If everything is correct you should get some output like: NAME READY STATUS RESTARTS AGE newrelic-infra-whvzt 1/1 Running 0 6d20h Copy If the integration is not running on your master nodes, check that the daemonset has all the desired instances running and ready. kubectl get daemonsets -l app=newrelic-infra --all-namespaces Copy Check that the control plane components have the required labels Refer to the discovery of master nodes and control plane components documentation section and look for the labels the integration uses to discover the components. Then run the following commands to see if there are any pods with such labels and the nodes where they are running: kubectl get pods -l k8s-app=kube-apiserver --all-namespaces Copy If there is component with the given label you should see something like: NAMESPACE NAME READY STATUS RESTARTS AGE kube-system kube-apiserver-ip-10-42-24-42.ec2.internal 1/1 Running 3 49d Copy The same should be done with the rest of the components: kubectl get pods -l k8s-app=etcd-manager-main --all-namespaces Copy kubectl get pods -l k8s-app=kube-scheduler --all-namespaces Copy kubectl get pods -l k8s-app=kube-kube-controller-manager --all-namespaces Copy Retrieve the verbose logs of one of the integrations running on a master node and check for the control plane components jobs To retrieve the logs, follow the instructions on get logs from pod running on a master node. The integration logs for every component the following message “Running job: COMPONENT_NAME”. Ex: Running job: scheduler Copy Running job: etcd Copy Running job: controller-manager Copy Running job: api-server Copy If you didn’t specify the ETCD_TLS_SECRET_NAME configuration option you’ll find the following message in the logs: Skipping job creation for component etcd: etcd requires TLS configuration, none given Copy If any error occurs while querying the metrics of any component it will be logged after the Running job message. Manually query the metrics of the components Refer to the discovery of master nodes and control plane components documentation section to get the endpoint of the control plane component you want to query. With the endpoint we can use the integration pod that’s running on the same node as the component to query. The following are examples on how to query the Kubernetes scheduler: kubectl exec -ti POD_NAME -- wget -O - localhost:10251/metrics Copy The following command does the same, but also chooses the pod for you: kubectl exec -ti $(kubectl get pods --all-namespaces --field-selector spec.nodeName=$(kubectl get nodes -l node-role.kubernetes.io/master=\"\" -o jsonpath=\"{.items[0].metadata.name}\") -l name=newrelic-infra -o jsonpath=\"{.items[0].metadata.name}\") -- wget -O - localhost:10251/metrics Copy If everything is correct you should get some metrics on the Prometheus format, something like: Connecting to localhost:10251 (127.0.0.1:10251) # HELP apiserver_audit_event_total Counter of audit events generated and sent to the audit backend. # TYPE apiserver_audit_event_total counter apiserver_audit_event_total 0 # HELP apiserver_audit_requests_rejected_total Counter of apiserver requests rejected due to an error in audit logging backend. # TYPE apiserver_audit_requests_rejected_total counter apiserver_audit_requests_rejected_total 0 # HELP apiserver_client_certificate_expiration_seconds Distribution of the remaining lifetime on the certificate used to authenticate a request. # TYPE apiserver_client_certificate_expiration_seconds histogram apiserver_client_certificate_expiration_seconds_bucket{le=\"0\"} 0 apiserver_client_certificate_expiration_seconds_bucket{le=\"1800\"} 0 apiserver_client_certificate_expiration_seconds_bucket{le=\"3600\"} 0 Copy", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 275.6186, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "title": "Not seeing control plane data", + "sections": "Check that the integration is running on the master nodes", + "tags": "Kubernetes integration", + "body": " of master nodes and control plane components documentation section and look for the labels the integration uses to discover the components. Then run the following commands to see if there are any pods with such labels and the nodes where they are running: kubectl get pods -l k8s-app=kube-apiserver --all" + }, + "id": "617daf22196a67f585f7e101" }, { "sections": [ @@ -14200,7 +14172,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 281.06845, + "_score": 275.31503, "_version": null, "_explanation": null, "sort": null, @@ -14246,7 +14218,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 258.72473, + "_score": 257.36905, "_version": null, "_explanation": null, "sort": null, @@ -14295,7 +14267,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 255.74088, + "_score": 254.41615, "_version": null, "_explanation": null, "sort": null, @@ -14342,7 +14314,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 254.9986, + "_score": 253.6743, "_version": null, "_explanation": null, "sort": null, @@ -14386,7 +14358,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 233.1252, + "_score": 231.72287, "_version": null, "_explanation": null, "sort": null, @@ -14419,7 +14391,7 @@ "body": "You’re an operations engineer relying on a network of SNMP-enabled devices. Your team already uses New Relic’s features for monitoring software and infrastructure, but it’s time to monitor your network devices as well. In this track, you use Kentik’s ktranslate library to automatically discover your network devices, gather data, and send that data to New Relic to correlate network performance with infrastructure, applications, and digital experiences. Then you use New Relic to monitor the health of your network. Objectives Poll SNMP data from network devices and send it to New Relic Sample network flow data and send it to New Relic Create a New Relic dashboard for monitoring your network devices Create a New Relic workload to logically group your devices and set up anomaly detection Use your new data to understand behaviors within your network Requirements A free New Relic account A full platform user or a core user with the Nerdpack modify user privilege Important This track utilizes an ephemeral virtual machine. As a result, you need to finish the entire track in one sitting or you'll lose your progress when time expires. Lab Homework Well done! Now that you've gotten your feet wet using ktranslate and New Relic to monitor your network devices, here are some things you can do on your own as you prepare to monitor your real-life network: Read \"Get started with Network Performance Monitoring\" to learn how to get the most of New Relic's network performance monitoring Visit the ktranslate GitHub repository to learn how ktranslate works Read \"What is an entity?\" to learn more about New Relic entities and how to synthesize your own. This will help you get more mileage from your Kentik default entities.", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 209.44687, + "_score": 207.9004, "_version": null, "_explanation": null, "sort": null, @@ -14474,7 +14446,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 380.6577, + "_score": 353.5246, "_version": null, "_explanation": null, "sort": null, @@ -14486,54 +14458,6 @@ }, "id": "617d5841196a67bb40f7c1de" }, - { - "sections": [ - "Link your applications to Kubernetes", - "Tip", - "Compatibility and requirements", - "Kubernetes requirements", - "Network requirements", - "APM agent compatibility", - "Openshift requirements", - "Important", - "Configure the injection of metadata", - "Default configuration", - "Custom configuration", - "Manage custom certificates", - "Validate the injection of metadata", - "Disable the injection of metadata", - "Troubleshooting" - ], - "title": "Link your applications to Kubernetes", - "type": "docs", - "tags": [ - "Link apps and services", - "Kubernetes integration", - "Integrations" - ], - "external_id": "0fe0951312aaf683f6614d5956f8c402b9693780", - "image": "", - "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/link-your-applications/link-your-applications-kubernetes/", - "published_at": "2022-02-06T01:24:10Z", - "updated_at": "2022-02-06T01:24:10Z", - "document_type": "page", - "popularity": 1, - "body": "You can surface Kubernetes metadata and link it to your APM agents as distributed traces to explore performance issues and troubleshoot transaction errors. For more information, see this New Relic blog post. You can quickly start monitoring Kubernetes clusters using our Auto-telemetry with Pixie integration, which doesn't require a language agent. Learn more about Auto-telemetry with Pixie. The metadata injection product uses a MutatingAdmissionWebhook to add the following environment variables to pods: NEW_RELIC_METADATA_KUBERNETES_CLUSTER_NAME NEW_RELIC_METADATA_KUBERNETES_NODE_NAME NEW_RELIC_METADATA_KUBERNETES_NAMESPACE_NAME NEW_RELIC_METADATA_KUBERNETES_DEPLOYMENT_NAME NEW_RELIC_METADATA_KUBERNETES_POD_NAME NEW_RELIC_METADATA_KUBERNETES_CONTAINER_NAME NEW_RELIC_METADATA_KUBERNETES_CONTAINER_IMAGE_NAME Copy Tip Our Kubernetes metadata injection project is open source. Here's the code to link APM and infrastructure data and the code to automatically manage certificates. Compatibility and requirements Before linking Kubernetes metadata to your APM agents, make sure you meet the following requirements: Kubernetes requirements Network requirements APM agent compatibility OpenShift requirements Kubernetes requirements To link your applications and Kubernetes, your cluster must have the MutatingAdmissionWebhook controller enabled, which requires Kubernetes 1.9 or higher. To verify that your cluster is compatible, run the following command: kubectl api-versions | grep admissionregistration.k8s.io/v1beta1 admissionregistration.k8s.io/v1beta1 Copy If you see a different result, follow the Kubernetes documentation to enable admission control in your cluster. Network requirements For Kubernetes to speak to our MutatingAdmissionWebhook, the master node (or the API server container, depending on how the cluster is set up) should be allowed egress for HTTPS traffic on port 443 to pods in all of the other nodes in the cluster. This might require specific configuration depending on how the infrastructure is set up (on-premises, AWS, Google Cloud, etc). Tip Until Kubernetes v1.14, users were only allowed to register admission webhooks on port 443. Since v1.15 it's possible to register them on different ports. To ensure backward compatibility, the webhook is registered by default on port 443 in the YAML config file we distribute. APM agent compatibility The following New Relic agents collect Kubernetes metadata: Go 2.3.0 or higher Java 4.10.0 or higher Node.js 5.3.0 or higher Python 4.14.0 or higher Ruby 6.1.0 or higher .NET 8.17.438 or higher Openshift requirements To link Openshift and Kubernetes you must enable mutating admission webhooks, which requires Openshift 3.9 or higher. During the process, install a resource that requires admin permissions to the cluster. Run this to log in as admin: oc login -u system:admin Copy Check that webhooks are correctly configured. If they are not, update the master-config.yaml file. admissionConfig: pluginConfig: MutatingAdmissionWebhook: configuration: apiVersion: apiserver.config.k8s.io/v1alpha1 kubeConfigFile: /dev/null kind: WebhookAdmission ValidatingAdmissionWebhook: configuration: apiVersion: apiserver.config.k8s.io/v1alpha1 kubeConfigFile: /dev/null kind: WebhookAdmission location: \"\" Copy Important Add kubeConfigFile: /dev/null to address some issues in Openshift. Enable certificate signing by editing the YAML file and updating your configuration: kubernetesMasterConfig: controllerArguments: cluster-signing-cert-file: - \"/etc/origin/master/ca.crt\" cluster-signing-key-file: - \"/etc/origin/master/ca.key\" Copy Restart the Openshift services in the master node. Configure the injection of metadata By default, all the pods you create that include APM agents have the correct environment variables set and the metadata injection applies to the entire cluster. To check that the environment variables have been set, any container that is running must be stopped, and a new instance started (see Validate the injection of metadata). This default configuration also uses the Kubernetes certificates API to automatically manage the certificates required for the injection. If needed, you can limit the injection of metadata to specific namespaces in your cluster or self-manage your certificates. Default configuration We offer instructions for deploying our integration using Helm. Just be sure that, when you are configuring the chart, the webhook that inject the metadata is enabled. Notice that we are specifying --dry-run and --debug, so nothing will be installed in this step: helm upgrade --install newrelic newrelic/nri-bundle \\ --dry-run \\ --debug \\ --namespace newrelic --create-namespace \\ --set global.licenseKey=YOUR_NEW_RELIC_LICENSE_KEY \\ --set global.cluster=K8S_CLUSTER_NAME \\ --set ksm.enabled=true \\ --set newrelic-infrastructure.privileged=true \\ --set infrastructure.enabled=true \\ --set prometheus.enabled=true \\ --set webhook.enabled= true \\ --set kubeEvents.enabled=true \\ --set logging.enabled=true Copy Custom configuration You can limit the injection of metadata only to specific namespaces by using labels. To enable this feature, edit nri-bundle Helm values.yaml file: nri-metadata-injection: injectOnlyLabeledNamespaces: true Copy Or add a --set when installing or upgrading your Helm release: helm upgrade --install newrelic newrelic/nri-bundle \\ --dry-run \\ --debug \\ --namespace newrelic --create-namespace \\ --set global.licenseKey=YOUR_NEW_RELIC_LICENSE_KEY \\ --set global.cluster=K8S_CLUSTER_NAME \\ --set ksm.enabled=true \\ --set newrelic-infrastructure.privileged=true \\ --set infrastructure.enabled=true \\ --set prometheus.enabled=true \\ --set webhook.enabled= true \\ --set nri-metadata-injection.injectOnlyLabeledNamespaces=true \\ --set kubeEvents.enabled=true \\ --set logging.enabled=true Copy With this option, injection is only applied to those namespaces that have the newrelic-metadata-injection label set to enabled: kubectl label namespace YOUR_NAMESPACE newrelic-metadata-injection=enabled Copy Manage custom certificates To use custom certificates you need to disable the automatic installation of certificates when you are installing using Helm. To disable the installation for certificates just modify nri-bundle Helm values.yaml like this: nri-metadata-injection: customTLSCertificate: true Copy Or add a --set when installing or upgrading your Helm release: helm upgrade --install newrelic newrelic/nri-bundle \\ --dry-run \\ --debug \\ --namespace newrelic --create-namespace \\ --set global.licenseKey=YOUR_NEW_RELIC_LICENSE_KEY \\ --set global.cluster=K8S_CLUSTER_NAME \\ --set ksm.enabled=true \\ --set newrelic-infrastructure.privileged=true \\ --set infrastructure.enabled=true \\ --set prometheus.enabled=true \\ --set webhook.enabled= true \\ --set nri-metadata-injection.customTLSCertificate=true \\ --set kubeEvents.enabled=true \\ --set logging.enabled=true Copy Now you can proceed with the custom certificate management option. You need your certificate, server key, and Certification Authority (CA) bundle encoded in PEM format. If you have them in the standard certificate format (X.509), install openssl, and run the following: openssl x509 -in CERTIFICATE_FILENAME -outform PEM -out CERTIFICATE_FILENAME.pem openssl x509 -in SERVER_KEY_FILENAME -outform PEM -out SERVER_KEY_FILENAME.pem openssl x509 -in CA_BUNDLE_FILENAME -outform PEM -out BUNDLE_FILENAME.pem Copy If your certificate/key pair are in another format, see the Digicert knowledgebase for more help. Create the TLS secret with the signed certificate/key pair, and patch the mutating webhook configuration with the CA using the following commands: kubectl create secret tls newrelic-metadata-injection-admission \\ --key=PEM_ENCODED_SERVER_KEY \\ --cert=PEM_ENCODED_CERTIFICATE \\ --dry-run -o yaml | kubectl -n newrelic apply -f - caBundle=$(cat PEM_ENCODED_CA_BUNDLE | base64 | td -d $'\\n') kubectl patch mutatingwebhookconfiguration newrelic-metadata-injection-cfg --type='json' -p \"[{'op': 'replace', 'path': '/webhooks/0/clientConfig/caBundle', 'value':'${caBundle}'}]\" Copy Important Certificates signed by Kubernetes have an expiration of one year. For more information, see the Kubernetes source code in GitHub. Validate the injection of metadata In order to validate that the webhook (responsible for injecting the metadata) was installed correctly, deploy a new pod and check for the New Relic environment variables. Create a dummy pod containing Busybox by running: kubectl create -f https://git.io/vPieo Copy Check if New Relic environment variables were injected: kubectl exec busybox0 -- env | grep NEW_RELIC_METADATA_KUBERNETES NEW_RELIC_METADATA_KUBERNETES_CLUSTER_NAME=fsi NEW_RELIC_METADATA_KUBERNETES_NODE_NAME=nodea NEW_RELIC_METADATA_KUBERNETES_NAMESPACE_NAME=default NEW_RELIC_METADATA_KUBERNETES_POD_NAME=busybox0 NEW_RELIC_METADATA_KUBERNETES_CONTAINER_NAME=busybox Copy Disable the injection of metadata To disable/uninstall the injection of metadata, use the following commands: Delete the Kubernetes objects using the yaml file: kubectl delete -f k8s-metadata-injection-latest.yaml Copy Delete the TLS secret containing the certificate/key pair: kubectl delete secret/newrelic-metadata-injection-secret Copy Troubleshooting Follow these troubleshooting tips as needed. No Kubernetes metadata in APM or distributed tracing transactions Problem The creation of the secret by the k8s-webhook-cert-manager job used to fail due to the kubectl version used by the image when running in Kubernetes version 1.19.x, The new version 1.3.2 fixes this issue, therefore it is enough to run again the job using an update version of the image to fix the issue. Solution Update the image k8s-webhook-cert-manager (to a version >= 1.3.2) and re-run the job. The secret will be correctly created and the k8s-metadata-injection pod will be able to start. Note that the new version of the manifest and of the nri-bundle are already updated with the correct version of the image. Problem In OpenShift version 4.x, the CA that is used in order to patch the mutatingwebhookconfiguration resource is not the one used when signing the certificates. This is a known issue currently tracked here. In the logs of the Pod nri-metadata-injection, you'll see the following error message: TLS handshake error from 10.131.0.29:37428: remote error: tls: unknown certificate authority TLS handshake error from 10.129.0.1:49314: remote error: tls: bad certificate Copy Workaround Manually update the certificate stored in the mutatingwebhookconfiguration object. The correct CA locations might change according to the cluster configuration. However, you can usually find the CA in the secret csr-signer in the namespace openshift-kube-controller-manager. Problem There is no Kubernetes metadata included in the transactions' attributes of your APM agent or in distributed tracing. Solution Verify that the environment variables are being correctly injected by following the instructions described in the Validate your installation step. If they are not present, get the name of the metadata injection pod by running: kubectl get pods | grep newrelic-metadata-injection-deployment kubectl logs -f pod/podname Copy In another terminal, create a new pod (for example, see Validate your installation), and inspect the logs of the metadata injection deployment for errors. For every created pod there should be a set of 4 new entries in the logs like: {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.107Z\",\"caller\":\"server/main.go:139\",\"msg\":\"POST https://newrelic-metadata-injection-svc.default.svc:443/mutate?timeout=30s HTTP/2.0\\\" from 10.11.49.2:32836\"} {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.110Z\",\"caller\":\"server/webhook.go:168\",\"msg\":\"received admission review\",\"kind\":\"/v1, Kind=Pod\",\"namespace\":\"default\",\"name\":\"\",\"pod\":\"busybox1\",\"UID\":\"6577519b-7a61-11ea-965e-0e46d1c9335c\",\"operation\":\"CREATE\",\"userinfo\":{\"username\":\"admin\",\"uid\":\"admin\",\"groups\":[\"system:masters\",\"system:authenticated\"]}} {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.111Z\",\"caller\":\"server/webhook.go:182\",\"msg\":\"admission response created\",\"response\":\"[{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env\\\",\\\"value\\\":[{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_CLUSTER_NAME\\\",\\\"value\\\":\\\"adn_kops\\\"}]},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_NODE_NAME\\\",\\\"valueFrom\\\":{\\\"fieldRef\\\":{\\\"fieldPath\\\":\\\"spec.nodeName\\\"}}}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_NAMESPACE_NAME\\\",\\\"valueFrom\\\":{\\\"fieldRef\\\":{\\\"fieldPath\\\":\\\"metadata.namespace\\\"}}}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_POD_NAME\\\",\\\"valueFrom\\\":{\\\"fieldRef\\\":{\\\"fieldPath\\\":\\\"metadata.name\\\"}}}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_CONTAINER_NAME\\\",\\\"value\\\":\\\"busybox\\\"}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_CONTAINER_IMAGE_NAME\\\",\\\"value\\\":\\\"busybox\\\"}}]\"} {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.111Z\",\"caller\":\"server/webhook.go:257\",\"msg\":\"writing response\"} Copy If there are no new entries on the logs, it means that the apiserver is not being able to communicate with the webhook service, this could be due to networking rules or security groups rejecting the communication. To check if the apiserver is not being able to communicate with the webhook you should inspect the apiserver logs for errors like: failed calling webhook \"metadata-injection.newrelic.com\": ERROR_REASON Copy To get the apiserver logs: Start a proxy to the Kubernetes API server by the executing the following command in a terminal window and keep it running. kubectl proxy --port=8001 Copy Create a new pod in your cluster, this will make the apiserver try to communicate with the webhook. The following command will create a busybox. kubectl create -f https://git.io/vPieo Copy Retrieve the apiserver logs. curl localhost:8001/logs/kube-apiserver.log > apiserver.log Copy Delete the busybox container. kubectl delete -f https://git.io/vPieo Copy Inspect the logs for errors. grep -E 'failed calling webhook' apiserver.log Copy Remember that one of the requirements for the metadata injection is that the apiserver must be allowed egress to the pods running on the cluster. If you encounter errors regarding connection timeouts or failed connections, make sure to check the security groups and firewall rules of the cluster. If there are no log entries in either the apiserver logs or the metadata injection deployment, it means that the webhook was not properly registered. Ensure the metadata injection setup job ran successfully by inspecting the output of: kubectl get job newrelic-metadata-setup Copy If the job is not completed, investigate the logs of the setup job: kubectl logs job/newrelic-metadata-setup Copy Ensure the CertificateSigningRequest is approved and issued by running: kubectl get csr newrelic-metadata-injection-svc.default Copy Ensure the TLS secret is present by running: kubectl get secret newrelic-metadata-injection-secret Copy Ensure the CA bundle is present in the mutating webhook configuration: kubectl get mutatingwebhookconfiguration newrelic-metadata-injection-cfg -o json Copy Ensure the TargetPort of the Service resource matches the Port of the Deployment's container: kubectl describe service/newrelic-metadata-injection-svc kubectl describe deployment/newrelic-metadata-injection-deployment Copy", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 244.76897, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "title": "Link your applications to Kubernetes", - "sections": "Link your applications to Kubernetes", - "tags": "Kubernetes integration", - "body": " the following commands: Delete the Kubernetes objects using the yaml file: kubectl delete -f k8s-metadata-injection-latest.yaml Copy Delete the TLS secret containing the certificate/key pair: kubectl delete secret/newrelic-metadata-injection-secret Copy Troubleshooting Follow these troubleshooting tips" - }, - "id": "617daead28ccbc662b7ffe23" - }, { "sections": [ "Kubernetes Data Ingest Analysis", @@ -14560,7 +14484,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 235.1036, + "_score": 235.28261, "_version": null, "_explanation": null, "sort": null, @@ -14573,6 +14497,47 @@ }, "id": "618f1e16e7b9d26af1386ee2" }, + { + "sections": [ + "Not seeing control plane data", + "Problem", + "Solution", + "Check that the master nodes have the correct labels", + "Check that the integration is running on the master nodes", + "Check that the control plane components have the required labels", + "Retrieve the verbose logs of one of the integrations running on a master node and check for the control plane components jobs", + "Manually query the metrics of the components" + ], + "title": "Not seeing control plane data", + "type": "docs", + "tags": [ + "Troubleshooting", + "Kubernetes integration", + "Integrations" + ], + "external_id": "02edaca82526773fcaef4adb97825d2349a404b4", + "image": "", + "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/troubleshooting/not-seeing-control-plane-data/", + "published_at": "2022-02-15T20:58:20Z", + "updated_at": "2021-10-24T03:07:45Z", + "document_type": "troubleshooting_doc", + "popularity": 1, + "body": "Problem You have completed the installation procedure for New Relic's Kubernetes integration, you are seeing Kubernetes data in your New Relic account but there is no data from any of the control plane components. Solution Check that the master nodes have the correct labels Execute the following commands to manually find the master nodes: kubectl get nodes -l node-role.kubernetes.io/master=\"\" Copy kubectl get nodes -l kubernetes.io/role=\"master\" Copy If the master nodes follow the labeling convention defined in the discovery of master nodes and control plane components documentation section, you should get some output like: NAME STATUS ROLES AGE VERSION ip-10-42-24-4.ec2.internal Ready master 42d v1.14.8 Copy If no nodes are found, there are two scenarios: Your master nodes don’t have the required labels that identify them as masters, in this case you need to add both labels to your master nodes. You’re in a managed cluster and your provider is handling the master nodes for you. In this case there is nothing you can do, since your provider is limiting the access to those nodes. Check that the integration is running on the master nodes Replace the placeholder in the following command with one of the node names returned in the previous step to get an integration pod running on a master node: kubectl get pods --field-selector spec.nodeName=NODE_NAME -l name=newrelic-infra --all-namespaces Copy The next command is the same, just that it selects the node for you: kubectl get pods --field-selector spec.nodeName=$(kubectl get nodes -l node-role.kubernetes.io/master=\"\" -o jsonpath=\"{.items[0].metadata.name}\") -l name=newrelic-infra --all-namespaces Copy If everything is correct you should get some output like: NAME READY STATUS RESTARTS AGE newrelic-infra-whvzt 1/1 Running 0 6d20h Copy If the integration is not running on your master nodes, check that the daemonset has all the desired instances running and ready. kubectl get daemonsets -l app=newrelic-infra --all-namespaces Copy Check that the control plane components have the required labels Refer to the discovery of master nodes and control plane components documentation section and look for the labels the integration uses to discover the components. Then run the following commands to see if there are any pods with such labels and the nodes where they are running: kubectl get pods -l k8s-app=kube-apiserver --all-namespaces Copy If there is component with the given label you should see something like: NAMESPACE NAME READY STATUS RESTARTS AGE kube-system kube-apiserver-ip-10-42-24-42.ec2.internal 1/1 Running 3 49d Copy The same should be done with the rest of the components: kubectl get pods -l k8s-app=etcd-manager-main --all-namespaces Copy kubectl get pods -l k8s-app=kube-scheduler --all-namespaces Copy kubectl get pods -l k8s-app=kube-kube-controller-manager --all-namespaces Copy Retrieve the verbose logs of one of the integrations running on a master node and check for the control plane components jobs To retrieve the logs, follow the instructions on get logs from pod running on a master node. The integration logs for every component the following message “Running job: COMPONENT_NAME”. Ex: Running job: scheduler Copy Running job: etcd Copy Running job: controller-manager Copy Running job: api-server Copy If you didn’t specify the ETCD_TLS_SECRET_NAME configuration option you’ll find the following message in the logs: Skipping job creation for component etcd: etcd requires TLS configuration, none given Copy If any error occurs while querying the metrics of any component it will be logged after the Running job message. Manually query the metrics of the components Refer to the discovery of master nodes and control plane components documentation section to get the endpoint of the control plane component you want to query. With the endpoint we can use the integration pod that’s running on the same node as the component to query. The following are examples on how to query the Kubernetes scheduler: kubectl exec -ti POD_NAME -- wget -O - localhost:10251/metrics Copy The following command does the same, but also chooses the pod for you: kubectl exec -ti $(kubectl get pods --all-namespaces --field-selector spec.nodeName=$(kubectl get nodes -l node-role.kubernetes.io/master=\"\" -o jsonpath=\"{.items[0].metadata.name}\") -l name=newrelic-infra -o jsonpath=\"{.items[0].metadata.name}\") -- wget -O - localhost:10251/metrics Copy If everything is correct you should get some metrics on the Prometheus format, something like: Connecting to localhost:10251 (127.0.0.1:10251) # HELP apiserver_audit_event_total Counter of audit events generated and sent to the audit backend. # TYPE apiserver_audit_event_total counter apiserver_audit_event_total 0 # HELP apiserver_audit_requests_rejected_total Counter of apiserver requests rejected due to an error in audit logging backend. # TYPE apiserver_audit_requests_rejected_total counter apiserver_audit_requests_rejected_total 0 # HELP apiserver_client_certificate_expiration_seconds Distribution of the remaining lifetime on the certificate used to authenticate a request. # TYPE apiserver_client_certificate_expiration_seconds histogram apiserver_client_certificate_expiration_seconds_bucket{le=\"0\"} 0 apiserver_client_certificate_expiration_seconds_bucket{le=\"1800\"} 0 apiserver_client_certificate_expiration_seconds_bucket{le=\"3600\"} 0 Copy", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 222.86055, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "title": "Not seeing control plane data", + "sections": "Not seeing control plane data", + "tags": "Kubernetes integration", + "body": " of master nodes and control plane components documentation section and look for the labels the integration uses to discover the components. Then run the following commands to see if there are any pods with such labels and the nodes where they are running: kubectl get pods -l k8s-app=kube-apiserver --all" + }, + "id": "617daf22196a67f585f7e101" + }, { "sections": [ "Kubernetes integration: what's changed in v3", @@ -14612,7 +14577,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 233.80714, + "_score": 222.21045, "_version": null, "_explanation": null, "sort": null, @@ -14626,44 +14591,66 @@ }, { "sections": [ - "Not seeing control plane data", - "Problem", - "Solution", - "Check that the master nodes have the correct labels", - "Check that the integration is running on the master nodes", - "Check that the control plane components have the required labels", - "Retrieve the verbose logs of one of the integrations running on a master node and check for the control plane components jobs", - "Manually query the metrics of the components" + "Find and use your Kubernetes data", + "Query Kubernetes data", + "Event types", + "Manage alerts", + "Create an alert condition", + "Use the predefined alert types and thresholds", + "Select alert notifications", + "Pod alert notification example", + "Container resource notification example", + "Create alert conditions using NRQL", + "Kubernetes attributes and metrics", + "Node data", + "Namespace data", + "Deployment data", + "ReplicaSet data", + "DaemonSet data", + "StatefulSet data", + "Pod data", + "Cluster data", + "Container data", + "Volume data", + "API server data", + "Controller manager data", + "Scheduler data", + "etcd data", + "Endpoint data", + "Service data", + "Horizontal Pod Autoscaler data", + "Kubernetes metadata in APM-monitored applications", + "For more help" ], - "title": "Not seeing control plane data", + "title": "Find and use your Kubernetes data", "type": "docs", "tags": [ - "Troubleshooting", + "Understand and use data", "Kubernetes integration", "Integrations" ], - "external_id": "02edaca82526773fcaef4adb97825d2349a404b4", + "external_id": "636617521998343c5bb96b0500843229b9263712", "image": "", - "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/troubleshooting/not-seeing-control-plane-data/", - "published_at": "2022-02-15T20:58:20Z", - "updated_at": "2021-10-24T03:07:45Z", - "document_type": "troubleshooting_doc", + "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/understand-use-data/find-use-your-kubernetes-data/", + "published_at": "2022-02-15T21:16:28Z", + "updated_at": "2022-02-04T12:24:52Z", + "document_type": "page", "popularity": 1, - "body": "Problem You have completed the installation procedure for New Relic's Kubernetes integration, you are seeing Kubernetes data in your New Relic account but there is no data from any of the control plane components. Solution Check that the master nodes have the correct labels Execute the following commands to manually find the master nodes: kubectl get nodes -l node-role.kubernetes.io/master=\"\" Copy kubectl get nodes -l kubernetes.io/role=\"master\" Copy If the master nodes follow the labeling convention defined in the discovery of master nodes and control plane components documentation section, you should get some output like: NAME STATUS ROLES AGE VERSION ip-10-42-24-4.ec2.internal Ready master 42d v1.14.8 Copy If no nodes are found, there are two scenarios: Your master nodes don’t have the required labels that identify them as masters, in this case you need to add both labels to your master nodes. You’re in a managed cluster and your provider is handling the master nodes for you. In this case there is nothing you can do, since your provider is limiting the access to those nodes. Check that the integration is running on the master nodes Replace the placeholder in the following command with one of the node names returned in the previous step to get an integration pod running on a master node: kubectl get pods --field-selector spec.nodeName=NODE_NAME -l name=newrelic-infra --all-namespaces Copy The next command is the same, just that it selects the node for you: kubectl get pods --field-selector spec.nodeName=$(kubectl get nodes -l node-role.kubernetes.io/master=\"\" -o jsonpath=\"{.items[0].metadata.name}\") -l name=newrelic-infra --all-namespaces Copy If everything is correct you should get some output like: NAME READY STATUS RESTARTS AGE newrelic-infra-whvzt 1/1 Running 0 6d20h Copy If the integration is not running on your master nodes, check that the daemonset has all the desired instances running and ready. kubectl get daemonsets -l app=newrelic-infra --all-namespaces Copy Check that the control plane components have the required labels Refer to the discovery of master nodes and control plane components documentation section and look for the labels the integration uses to discover the components. Then run the following commands to see if there are any pods with such labels and the nodes where they are running: kubectl get pods -l k8s-app=kube-apiserver --all-namespaces Copy If there is component with the given label you should see something like: NAMESPACE NAME READY STATUS RESTARTS AGE kube-system kube-apiserver-ip-10-42-24-42.ec2.internal 1/1 Running 3 49d Copy The same should be done with the rest of the components: kubectl get pods -l k8s-app=etcd-manager-main --all-namespaces Copy kubectl get pods -l k8s-app=kube-scheduler --all-namespaces Copy kubectl get pods -l k8s-app=kube-kube-controller-manager --all-namespaces Copy Retrieve the verbose logs of one of the integrations running on a master node and check for the control plane components jobs To retrieve the logs, follow the instructions on get logs from pod running on a master node. The integration logs for every component the following message “Running job: COMPONENT_NAME”. Ex: Running job: scheduler Copy Running job: etcd Copy Running job: controller-manager Copy Running job: api-server Copy If you didn’t specify the ETCD_TLS_SECRET_NAME configuration option you’ll find the following message in the logs: Skipping job creation for component etcd: etcd requires TLS configuration, none given Copy If any error occurs while querying the metrics of any component it will be logged after the Running job message. Manually query the metrics of the components Refer to the discovery of master nodes and control plane components documentation section to get the endpoint of the control plane component you want to query. With the endpoint we can use the integration pod that’s running on the same node as the component to query. The following are examples on how to query the Kubernetes scheduler: kubectl exec -ti POD_NAME -- wget -O - localhost:10251/metrics Copy The following command does the same, but also chooses the pod for you: kubectl exec -ti $(kubectl get pods --all-namespaces --field-selector spec.nodeName=$(kubectl get nodes -l node-role.kubernetes.io/master=\"\" -o jsonpath=\"{.items[0].metadata.name}\") -l name=newrelic-infra -o jsonpath=\"{.items[0].metadata.name}\") -- wget -O - localhost:10251/metrics Copy If everything is correct you should get some metrics on the Prometheus format, something like: Connecting to localhost:10251 (127.0.0.1:10251) # HELP apiserver_audit_event_total Counter of audit events generated and sent to the audit backend. # TYPE apiserver_audit_event_total counter apiserver_audit_event_total 0 # HELP apiserver_audit_requests_rejected_total Counter of apiserver requests rejected due to an error in audit logging backend. # TYPE apiserver_audit_requests_rejected_total counter apiserver_audit_requests_rejected_total 0 # HELP apiserver_client_certificate_expiration_seconds Distribution of the remaining lifetime on the certificate used to authenticate a request. # TYPE apiserver_client_certificate_expiration_seconds histogram apiserver_client_certificate_expiration_seconds_bucket{le=\"0\"} 0 apiserver_client_certificate_expiration_seconds_bucket{le=\"1800\"} 0 apiserver_client_certificate_expiration_seconds_bucket{le=\"3600\"} 0 Copy", + "body": "You can build your own charts and query all your Kubernetes integration data using the query builder and the NerdGraph API. Our integration collects Kubernetes data by instrumenting the container orchestration layer. For a simpler and more visual experience, use the cluster explorer. one.newrelic.com > Dashboards: Using the query builder you can query your Kubernetes data and create clear visualizations. Query Kubernetes data The simplest way to query your Kubernetes data is using the query builder, which accepts NRQL queries. Alternatively, you can use the NerdGraph API to retrieve Kubernetes data. Event types Kubernetes data is attached to the following event types: Event name Type of Kubernetes data Available since K8sNodeSample Node data v1.0.0 K8sNamespaceSample Namespace data v1.0.0 K8sDeploymentSample Deployment data v1.0.0 K8sReplicasetSample ReplicaSet data v1.0.0 K8sDaemonsetSample DaemonSet data v1.13.0 K8sStatefulsetSample StatefulSet data v1.13.0 K8sPodSample Pod data v1.0.0 K8sClusterSample Cluster data v1.0.0 K8sContainerSample Container data v1.0.0 K8sVolumeSample Volume data v1.0.0 K8sApiServerSample API server data v1.11.0 K8sControllerManagerSample Controller manager data v1.11.0 K8sSchedulerSample Scheduler data v1.11.0 K8sEtcdSample etcd data v1.11.0 K8sEndpointSample Endpoint data v1.13.0 K8sServiceSample Service data v1.13.0 K8sHpaSample Horizontal Pod Autoscaler data v2.3.0 Manage alerts You can be notified about alert violations for your Kubernetes data: Create an alert condition To create an alert condition for the Kubernetes integration: Go to one.newrelic.com > Infrastructure > Settings > Alerts > Kubernetes, then select Create alert condition. To filter the alert to Kubernetes entities that only have the chosen attributes, select Filter. Select the threshold settings. For more on the Trigger an alert when... options, see Alert types. Select an existing alert policy, or create a new one. Select Create. When an alert condition's threshold is triggered, New Relic sends a notification to the policy's notification channels. Use the predefined alert types and thresholds The Kubernetes integration comes with its own alert policy and alert conditions. To see what the predefined alert conditions are, see Kubernetes integration: Predefined alert policy. In addition, you can create an alert condition for any metric collected by any New Relic integration you use, including the Kubernetes integration: Select the alert type Integrations. From the Select a data source dropdown, select a Kubernetes (K8s) data source. Select alert notifications When an alert condition's threshold is triggered, New Relic sends a message to the notification channel(s) chosen in the alert policy. Depending on the type of notification, you may have the following options: View the incident. Acknowledge the incident. Go to a chart of the incident data by selecting the identifier name. The entity identifier that triggered the alert appears near the top of the notification message. The format of the identifier depends on the alert type: Available pods are less than desired pods alerts: K8s:CLUSTER_NAME:PARENT_NAMESPACE:replicaset:REPLICASET_NAME Copy CPU or memory usage alerts: K8s:CLUSTER_NAME:PARENT_NAMESPACE:POD_NAME:container:CONTAINER_NAME Copy Here are some examples. Pod alert notification example For Available pods are less than desired pods alerts, the ID of the ReplicaSet triggering the issue might look like this: k8s:beam-production:default:replicaset:nginx-deployment-1623441481 Copy This identifier contains the following information: Cluster name: beam-production Parent namespace: default ReplicaSet name: nginx-deployment-1623441481 Container resource notification example For container CPU or memory usage alerts, the entity might look like this: k8s:beam-production:kube-system:kube-state-metrics-797bb87c75-zncwn:container:kube-state-metrics Copy This identifier contains the following information: Cluster name: beam-production Parent namespace: kube-system Pod namespace: kube-state-metrics-797bb87c75-zncwn Container name: kube-state-metrics Create alert conditions using NRQL Follow standard procedures to create alert conditions for NRQL queries. Kubernetes attributes and metrics The Kubernetes integration collects the following metrics and other attributes. Node data Query the K8sNodeSample event for node data: Node attribute Description allocatableCpuCores Node allocatable CPU cores allocatableMemoryBytes Node allocatable memory bytes allocatablePods Node allocatable pods allocatableEphemeralStorageBytes Node allocatable ephemeral-storage bytes capacityCpuCores Node CPU capacity capacityMemoryBytes Node memory capacity (in bytes) capacityPods Pod capacity of the node capacityEphemeralStorageBytes Node ephemeral-storage capacity clusterName Name that you assigned to the cluster when you installed the Kubernetes integration condition.{conditionName}={conditionValue} Status of the current observed node condition. The reported conditions can vary depending on your Kubernetes flavor and installed operators. Examples of common conditions are: Ready, DiskPressure, MemoryPressure, PIDPressure and NetworkUnavailable. Condition values can be 1 (true), 0 (false), or -1 (unknown). cpuUsedCoreMilliseconds Node CPU usage measured in core milliseconds cpuUsedCores Node CPU usage measured in cores cpuRequestedCores Total amount of CPU cores requested allocatableCpuCoresUtilization Percentage of CPU cores actually used with respect to the CPU cores allocatable fsAvailableBytes Bytes available in the node filesystem fsCapacityBytes Total capacity of the node filesystem in bytes fsInodes Total number of inodes in the node filesystem fsInodesFree Free inodes in the node filesystem fsInodesUsed Used inodes in the node filesystem fsUsedBytes Used bytes in the node filesystem fsCapacityUtilization Percentage of used bytes in the node filesystem with respect to the capacity memoryAvailableBytes Bytes of memory available in the node memoryMajorPageFaultsPerSecond Number of major page faults per second in the node memoryPageFaults Number of page faults in the node memoryRssBytes Bytes of rss memory memoryUsedBytes Bytes of memory used memoryWorkingSetBytes Bytes of memory in the working set memoryRequestedBytes Total amount of requested memory allocatableMemoryUtilization Percentage of bytes of memory in the working set with respect to the node allocatable memory net.errorCountPerSecond Number of errors per second while receiving/transmitting over the network nodeName Host name that the pod is running on runtimeAvailableBytes Bytes available to the container runtime filesystem runtimeCapacityBytes Total capacity assigned to the container runtime filesystem in bytes runtimeInodes Total number of inodes in the container runtime filesystem runtimeInodesFree Free inodes in the container runtime filesystem runtimeInodesUsed Used inodes in the container runtime filesystem runtimeUsedBytes Used bytes in the container runtime filesystem unschedulable Status of node schedulability of new pods. Its value can be 0 (false) or 1 (true) label.LABEL_NAME Labels associated with your node, so you can filter and query for specific nodes Namespace data Query the K8sNamespaceSample event for namespace data: Namespace attribute Description clusterName Name that you assigned to the cluster when you installed the Kubernetes integration createdAt Timestamp of the namespace when it was created namespace Name of the namespace to be used as an identifier label.LABEL_NAME Labels associated with your namespace, so you can filter and query for specific namespaces status Current status of the namespace. The value can be Active or Terminated Deployment data Query the K8sDeploymentSample event for deployment data: Deployment attribute Description clusterName Name that you assigned to the cluster when you installed the Kubernetes integration createdAt Timestamp of when the deployment was created deploymentName Name of the deployment to be used as an identifier namespace Name of the namespace that the deployment belongs to label.LABEL_NAME Labels associated with your deployment, so you can filter and query for specific deployments podsAvailable Number of replicas that are currently available podsDesired Number of replicas that you defined in the deployment podsTotal Total number of replicas that are currently running podsUnavailable Number of replicas that are currently unavailable podsUpdated Number of replicas that have been updated to achieve the desired state of the deployment podsMissing Total number of replicas that are missing (number of desired replicas, podsDesired, minus the total number of replicas, podsTotal) ReplicaSet data Query the K8sReplicasetSample event for ReplicaSet data: Replica attribute Description clusterName Name that you assigned to the cluster when you installed the Kubernetes integration createdAt Timestamp of when the ReplicaSet was created deploymentName Name of the deployment to be used as an identifier namespace Name of the namespace that the ReplicaSet belongs to observedGeneration Integer representing generation observed by the ReplicaSet podsDesired Number of replicas that you defined in the deployment podsFullyLabeled Number of pods that have labels that match the ReplicaSet pod template labels podsReady Number of replicas that are ready for this ReplicaSet podsTotal Total number of replicas that are currently running podsMissing Total number of replicas that are currently missing (number of desired replicas, podsDesired, minus the number of ready replicas, podsReady) replicasetName Name of the ReplicaSet to be used as an identifier DaemonSet data Query the K8sDaemonsetSample event for DaemonSet data: DaemonSet attribute Description clusterName Name that you assigned to the cluster when you installed the Kubernetes integration createdAt Timestamp of when the DaemonSet was created namespaceName Name of the namespace that the DaemonSet belongs to label.LABEL_NAME Labels associated with your DaemonSet, so you can filter and query for specific DaemonSet daemonsetName Name associated with the DaemonSet podsDesired The number of nodes that should be running the daemon pod podsScheduled The number of nodes running at least one daemon pod and are supposed to podsAvailable The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available podsReady The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready podsUnavailable The number of nodes that should be running the daemon pod and have none of the daemon pod running and available podsMisscheduled The number of nodes running a daemon pod but are not supposed to podsUpdatedScheduled The total number of nodes that are running updated daemon pod podsMissing Total number of replicas that are currently missing (number of desired replicas, podsDesired, minus the number of ready replicas, podsReady) metadataGeneration Sequence number representing a specific generation of the desired state StatefulSet data Query the K8sStatefulsetSample event for StatefulSet data: StatefulSet attribute Description clusterName Name that you assigned to the cluster when you installed the Kubernetes integration createdAt Timestamp of when the StatefulSet was created namespaceName Name of the namespace that the StatefulSet belongs to label.LABEL_NAME Labels associated with your StatefulSet, so you can filter and query for specific StatefulSet statefulsetName Name associated with the StatefulSet podsDesired Number of desired pods for a StatefulSet podsReady The number of ready replicas per StatefulSet podsCurrent The number of current replicas per StatefulSet podsTotal The number of replicas per StatefulSet podsUpdated The number of updated replicas per StatefulSet podsMissing Total number of replicas that are currently missing (number of desired replicas, podsDesired, minus the number of ready replicas, podsReady) observedGeneration The generation observed by the StatefulSet controller metadataGeneration Sequence number representing a specific generation of the desired state for the StatefulSet currentRevision Indicates the version of the StatefulSet used to generate pods in the sequence. Value range: between 0 and podsCurrent updateRevision Indicates the version of the StatefulSet used to generate pods in the sequence. Value range: between podsDesired-podsUpdated and podsDesired Pod data Query the K8sPodSample event for pod data: Pod attribute Description clusterName Name that you assigned to the cluster when you installed the Kubernetes integration createdAt Timestamp of when the pod was created in epoch seconds createdBy Name of the Kubernetes object that created the pod. For example, newrelic-infra createdKind Kind of Kubernetes object that created the pod. For example, DaemonSet. deploymentName Name of the deployment to be used as an identifier isReady Boolean representing whether or not the pod is ready to serve requests isScheduled Boolean representing whether or not the pod has been scheduled to run on a node label.LABEL_NAME Labels associated with your pod, so you can filter and query for specific pods message Details related to the last pod status change namespace Name of the namespace that the pod belongs to net.errorCountPerSecond Number of errors per second while receiving/transmitting over the network net.errorsPerSecond Number of errors per second net.rxBytesPerSecond Number of bytes per second received over the network net.txBytesPerSecond Number of bytes per second transmitted over the network nodeIP Host IP address that the pod is running on nodeName Host name that the pod is running on podIP IP address of the pod. If it doesn't have an IP, it'll be empty podName Name of the pod to be used as an identifier reason Reason why the pod is in the current status startTime Timestamp of when the pod started running in epoch seconds status Current status of the pod. Value can be Pending, Running, Succeeded, Failed, Unknown Cluster data Query the K8sClusterSample event to see cluster data: Cluster attribute Description clusterName Name that you assigned to the cluster when you installed the Kubernetes integration clusterK8sVersion Kubernetes version that the cluster is running Container data Query the K8sContainerSample event for container data: Container attribute Description clusterName Name that you assigned to the cluster when you installed the Kubernetes integration containerID Unique ID associated with the container. If you are running Docker, this is the Docker container id containerImage Name of the image that the container is running containerImageID Unique ID associated with the image that the container is running containerName Name associated with the container cpuLimitCores Integer representing limit CPU cores defined for the container in the pod specification cpuRequestedCores Requested CPU cores defined for the container in the pod specification cpuUsedCores CPU cores actually used by the container cpuCoresUtilization Percentage of CPU cores actually used by the container with respect to the CPU limit specified. This percentage is based on this calculation: (cpuUsedCores / cpuLimitCores) * 100 requestedCpuCoresUtilization Percentage of CPU cores actually used by the container with respect to the CPU request specified deploymentName Name of the deployment to be used as an identifier isReady Boolean. Whether or not the container's readiness check succeeded label.LABEL_NAME Labels associated with your container, so you can filter and query for specific containers memoryLimitBytes Integer representing limit bytes of memory defined for the container in the pod specification memoryRequestedBytes Integer. Requested bytes of memory defined for the container in the pod specification memoryUsedBytes Integer. Bytes of memory actually used by the container memoryUtilization Percentage of memory actually used by the container with respect to the memory limit specified requestedMemoryUtilization Percentage of memory actually used by the container with respect to the memory request specified memoryWorkingSetBytes Integer. Bytes of memory in the working set memoryWorkingSetUtilization Percentage of working set memory actually used by the container with respect to the memory limit specified requestedMemoryWorkingSetUtilization Percentage of working set memory actually used by the container with respect to the memory request specified namespace Name of the namespace that the container belongs to nodeIP Host IP address the container is running on nodeName Host name that the container is running on podName Name of the pod that the container is in, to be used as an identifier reason Provides a reason why the container is in the current status restartCount Number of times the container has been restarted status Current status of the container. Value can be Running, Terminated, or Unknown containerCpuCfsPeriodsDelta Delta change of elapsed enforcement period intervals containerCpuCfsThrottledPeriodsDelta Delta change of throttled period intervals containerCpuCfsThrottledSecondsDelta Delta change of duration the container has been throttled, in seconds containerCpuCfsPeriodsTotal Total number of elapsed enforcement period intervals containerCpuCfsThrottledPeriodsTotal Total number of throttled period intervals containerCpuCfsThrottledSecondsTotal Total time duration the container has been throttled, in seconds containerMemoryMappedFileBytes Total size of memory mapped files used by this container, in bytes Volume data Query the K8sVolumeSample event for volume data: Volume attribute Description volumeName Name that you assigned to the volume at creation clusterName Cluster where the volume is configured namespace Namespace where the volume is configured podName The pod that the volume is attached to. The Kubernetes monitoring integration lists Volumes that are attached to a pod persistent If this is a persistent volume, this value is set to true pvcNamespace Namespace where the Persistent Volume Claim is configured pvcName Name that you assigned to the Persistent Volume Claim at creation fsCapacityBytes Capacity of the volume, in bytes fsUsedBytes Usage of the volume, in bytes fsAvailableBytes Capacity available of the volume, in bytes fsUsedPercent Usage of the volume in percentage fsInodes Total inodes of the volume fsInodesUsed inodes used in the volume fsInodesFree inodes available in the volume Volume data is available for volume plugins that implement the MetricsProvider interface: AWSElasticBlockStore AzureDisk AzureFile Cinder Flexvolume Flocker GCEPersistentDisk GlusterFS iSCSI NFS StorageOS VsphereVolume API server data Query the K8sApiServerSample event to see API Server data. For more information, see Configure control plane monitoring: API server attribute Description processResidentMemoryBytes Resident memory size, in bytes processCpuSecondsDelta Difference of the user and system CPU time spent, in seconds goThreads Number of OS threads created goGoroutines Number of goroutines that currently exist apiserverRequestDelta_verb_VERB_code_CODE Difference of the number of apiserver requests, broken out for each verb and HTTP response code apiserverRequestRate_verb_VERB_code_CODE Rate of apiserver requests, broken out for each verb and HTTP response code restClientRequestsDelta_code_CODE_method_METHOD Difference of the number of HTTP requests, partitioned by method and code restClientRequestsRate_code_CODE_method_METHOD Rate of the number of HTTP requests, partitioned by method and code etcdObjectCounts_resource_RESOURCE-KIND Number of stored objects at the time of last check, split by kind Controller manager data Query the K8sControllerManagerSample event to see Controller manager data. For more information, see Configure control plane monitoring: Controller manager attribute Description processResidentMemoryBytes Resident memory size, in bytes processCpuSecondsDelta Difference of the user and system CPU time spent in seconds goThreads Number of OS threads created goGoroutines Number of goroutines that currently exist workqueueAddsDelta_name_WORK-QUEUE-NAME Difference of the total number of adds handled by workqueue workqueueDepth_name_WORK-QUEUE-NAME Current depth of workqueue workqueueRetriesDelta_name_WORK-QUEUE-NAME Difference of the total number of retries handled by workqueue leaderElectionMasterStatus Gauge of if the reporting system is master of the relevant lease, 0 indicates backup, 1 indicates master Scheduler data Query the K8sSchedulerSample event to see Scheduler data. For more information, see Configure control plane monitoring: Scheduler attribute Description processResidentMemoryBytes Resident memory size, in bytes processCpuSecondsDelta Difference of the user and system CPU time spent in seconds goThreads Number of OS threads created goGoroutines Number of goroutines that currently exist leaderElectionMasterStatus Gauge of if the reporting system is master of the relevant lease, 0 indicates backup, 1 indicates master httpRequestDurationMicroseconds_handler_HANDLER_quantile_QUANTILE The HTTP request latencies in microseconds, per quantile httpRequestDurationMicroseconds_handler_HANDLER_sum The sum of the HTTP request latencies, in microseconds httpRequestDurationMicroseconds_handler_HANDLER_count The number of observed HTTP requests events restClientRequestsDelta_code_CODE_host_HOST_method_METHOD Difference of the number of HTTP requests, partitioned by status code, method, and host restClientRequestsRate_code_CODE_host_HOST_method_METHOD Rate of the number of HTTP requests, partitioned by status code, method, and host schedulerScheduleAttemptsDelta_result_RESULT Difference of the number of attempts to schedule pods, by the result. unschedulable means a pod could not be scheduled, while error means an internal scheduler problem schedulerScheduleAttemptsRate_result_RESULT Rate of the number of attempts to schedule pods, by the result. unschedulable means a pod could not be scheduled, while error means an internal scheduler problem schedulerSchedulingDurationSeconds_operation_OPERATION_quantile_QUANTILE Scheduling latency in seconds split by sub-parts of the scheduling operation schedulerSchedulingDurationSeconds_operation_OPERATION_sum The sum of scheduling latency in seconds split by sub-parts of the scheduling operation schedulerSchedulingDurationSeconds_operation_OPERATION_count The number of observed events of schedulings split by sub-parts of the scheduling operation. schedulerPreemptionAttemptsDelta Difference of the total preemption attempts in the cluster till now schedulerPodPreemptionVictims Number of selected preemption victims etcd data Query the K8sEtcdSample event to see etcd data. For more information, see Configure control plane monitoring: etcd attribute Description processResidentMemoryBytes Resident memory size, in bytes processCpuSecondsDelta Difference of the user and system CPU time spent in seconds goThreads Number of OS threads created goGoroutines Number of goroutines that currently exist etcdServerHasLeader Whether or not a leader exists. 1 is existence, 0 is not etcdServerLeaderChangesSeenDelta Difference of the number of leader changes seen etcdMvccDbTotalSizeInBytes Total size of the underlying database physically allocated, in bytes etcdServerProposalsCommittedDelta Difference of the total number of consensus proposals committed etcdServerProposalsCommittedRate Rate of the total number of consensus proposals committed etcdServerProposalsAppliedDelta Difference of the total number of consensus proposals applied etcdServerProposalsAppliedRate Rate of the total number of consensus proposals applied etcdServerProposalsPending The current number of pending proposals to commit etcdServerProposalsFailedDelta Difference of the total number of failed proposals seen etcdServerProposalsFailedRate Rate of the total number of failed proposals seen processOpenFds Number of open file descriptors processMaxFds Maximum number of open file descriptors processFdsUtilization Percentage open file descriptors with respect to the maximum number that can be opened etcdNetworkClientGrpcReceivedBytesRate Rate of the total number of bytes received from gRPC clients etcdNetworkClientGrpcSentBytesRate Rate of the total number of bytes sent to gRPC clients Endpoint data Query the K8sEndpointSample event for endpoint data: Endpoint attribute Description clusterName Name that you assigned to the cluster when you installed the Kubernetes integration createdAt Timestamp of when the endpoint was created namespaceName Name of the namespace that the endpoint belongs to endpointName Name associated with the endpoint label.LABEL_NAME Labels associated with your endpoint, so you can filter and query for specific endpoints addressAvailable Number of addresses available in endpoint addressNotReady Number of addresses not ready in endpoint Service data Query the K8sServiceSample event for service data: Service attribute Description clusterName Name that you assigned to the cluster when you installed the Kubernetes integration createdAt Timestamp of when the service was created namespaceName Name of the namespace that the service belongs to label.LABEL_NAME Labels associated with your service, so you can filter and query for specific service serviceName Name associated with the service loadBalancerIP The IP of the external load balancer, if Spectype is LoadBalancer. externalName The external name value, if Spectype is ExternalName clusterIP The internal cluster IP, if Spectype is ClusterIP specType Type of the service selector.LABEL_NAME The label selector that this service targets Horizontal Pod Autoscaler data Query the K8sHpaSample event for Horizontal Pod Autoscaler data: HPA attribute Description clusterName Name that you assigned to the cluster when you installed the Kubernetes integration label.LABEL_NAME Labels associated with your HPA, so you can filter and query for specific autoscaler currentReplicas Current number of replicas of pods managed by this autoscaler desiredReplicas Desired number of replicas of pods managed by this autoscaler minReplicas Lower limit for the number of pods that can be set by the autoscaler, 1 by default maxReplicas Upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than minReplicas targetMetric The metric specifications used by this autoscaler when calculating the desired replica count isAble Boolean representing whether or not the autoscaler is able to fetch and update scales, as well as whether or not any backoff-related conditions would prevent scaling isActive Boolean representing whether or not the autoscaler is enabled (if it's able to calculate the desired scales) isLimited Boolean representing whether or not the autoscaler is capped, either up or down, by the maximum or minimum replicas configured labels Number of Kubernetes labels converted to Prometheus labels metadataGeneration The generation observed by the HorizontalPodAutoscaler controller Kubernetes metadata in APM-monitored applications By linking your applications with Kubernetes, the following attributes are added to application trace and distributed trace: nodeName containerName podName clusterName deploymentName namespaceName For more help", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 223.23756, + "_score": 212.14972, "_version": null, "_explanation": null, "sort": null, "highlight": { - "title": "Not seeing control plane data", - "sections": "Not seeing control plane data", + "title": "Find and use your Kubernetes data", + "sections": "Find and use your Kubernetes data", "tags": "Kubernetes integration", - "body": " of master nodes and control plane components documentation section and look for the labels the integration uses to discover the components. Then run the following commands to see if there are any pods with such labels and the nodes where they are running: kubectl get pods -l k8s-app=kube-apiserver --all" + "body": " Relic integration you use, including the Kubernetes integration: Select the alert type Integrations. From the Select a data source dropdown, select a Kubernetes (K8s) data source. Select alert notifications When an alert condition's threshold is triggered, New Relic sends a message to the notification" }, - "id": "617daf22196a67f585f7e101" + "id": "617d58a9196a6775cbf7c43d" } ], "/ads-web-gpt/a29dc26d-a05e-493e-8b97-eb9c2d90b763": [], @@ -14693,7 +14680,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 162.41895, + "_score": 159.99179, "_version": null, "_explanation": null, "sort": null, @@ -14740,7 +14727,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 131.77281, + "_score": 130.72707, "_version": null, "_explanation": null, "sort": null, @@ -14767,7 +14754,7 @@ "external_id": "92138b3846dabdae20d88c102dcac8e575502ad1", "image": "https://developer.newrelic.com/static/65a2aca8a0e6d0d1b808c2cc98519def/0086b/UC2-sec2-query.png", "url": "https://developer.newrelic.com/collect-data/custom-events/", - "published_at": "2022-02-15T01:38:13Z", + "published_at": "2022-02-16T01:38:29Z", "updated_at": "2020-12-04T01:52:23Z", "document_type": "page", "popularity": 1, @@ -14775,7 +14762,7 @@ "body": "Measure what you need by creating your own event types. Whereas adding custom attributes adds metadata to an existing event, a custom event creates an entirely new event type. Create custom events to define, visualize, and get alerts on additional data, just as you would with any data we provide from our core agents. Custom events can be inserted through the Agent APIs or directly via the Insights Insert API. The following example shows how to send a custom event named CLIRun that tracks when a command line tool written in Ruby has its process exit due to an exception. # Hook into the runtime 'at_exit' event at_exit do # Name the custom event payload = { 'eventType' => 'CLIRun' } # Check to see if the process is exiting due to an error if $!.nil? || $!.is_a?(SystemExit) && $!.success? payload[:status] = 0 else # Gather any known errors errors = \"\" (Thread.current[:errors] ||= []).each do |err| errors += \"#{err}\\n\" end payload[:errors] = errors end # Send the errors to New Relic as a custom event insights_url = URI.parse(\"https://insights-collector.newrelic.com/v1/accounts/YOUR_ACCOUNT_ID/events\") headers = { \"x-insert-key\" => \"YOUR_API_KEY\", \"content-type\" => \"application/json\" } http = Net::HTTP.new(insights_url.host, insights_url.port) http.use_ssl = true request = Net::HTTP::Post.new(insights_url.request_uri, headers) request.body = payload.to_json puts \"Sending run summary to Insights: #{payload.to_json}\" begin response = http.request(request) puts \"Response from Insights: #{response.body}\" rescue Exception => e puts \"There was an error posting to Insights. Error: #{e.inspect}\" end end Copy Here, a NRQL query retrieves information about the custom event, and the result can be added to a dashboard. SELECT count(*) FROM CLIRun FACET errors SINCE 1 week ago Copy Learn more about custom events.", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 123.732086, + "_score": 122.819595, "_version": null, "_explanation": null, "sort": null, @@ -14821,7 +14808,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 117.92382, + "_score": 117.00055, "_version": null, "_explanation": null, "sort": null, @@ -14863,7 +14850,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 117.548256, + "_score": 116.61559, "_version": null, "_explanation": null, "sort": null, @@ -14913,7 +14900,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 285.28802, + "_score": 265.12744, "_version": null, "_explanation": null, "sort": null, @@ -14966,7 +14953,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 204.24988, + "_score": 190.33984, "_version": null, "_explanation": null, "sort": null, @@ -15000,7 +14987,7 @@ "external_id": "509f5fb8490b652df4c6f31ec06b403c3393530e", "image": "", "url": "https://docs.newrelic.com/docs/infrastructure/amazon-integrations/aws-integrations-list/aws-elb-classic-monitoring-integration/", - "published_at": "2022-02-14T11:39:18Z", + "published_at": "2022-02-16T01:42:02Z", "updated_at": "2022-02-14T11:39:18Z", "document_type": "page", "popularity": 1, @@ -15008,7 +14995,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 192.89095, + "_score": 181.45224, "_version": null, "_explanation": null, "sort": null, @@ -15053,7 +15040,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 181.02737, + "_score": 170.70715, "_version": null, "_explanation": null, "sort": null, @@ -15103,7 +15090,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 167.1589, + "_score": 157.03575, "_version": null, "_explanation": null, "sort": null, @@ -15156,7 +15143,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 198.24617, + "_score": 188.20534, "_version": null, "_explanation": null, "sort": null, @@ -15197,7 +15184,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 197.51288, + "_score": 187.60748, "_version": null, "_explanation": null, "sort": null, @@ -15235,7 +15222,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 197.2091, + "_score": 187.35959, "_version": null, "_explanation": null, "sort": null, @@ -15286,7 +15273,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 178.43439, + "_score": 169.62315, "_version": null, "_explanation": null, "sort": null, @@ -15333,7 +15320,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.4935, + "_score": 168.70702, "_version": null, "_explanation": null, "sort": null, @@ -15372,7 +15359,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 1259.9567, + "_score": 1262.2787, "_version": null, "_explanation": null, "sort": null, @@ -15391,7 +15378,7 @@ "Tip", "Important" ], - "published_at": "2022-02-15T01:41:58Z", + "published_at": "2022-02-16T01:44:29Z", "title": "Instrument your cluster", "updated_at": "2021-11-06T01:49:38Z", "type": "developer", @@ -15402,7 +15389,7 @@ "body": "lab This procedure is part of a lab that teaches you how to monitor your Kubernetes cluster with Pixie. Each procedure in the lab builds upon the last, so make sure you've completed the last procedure, Explore your cluster, before starting this one. As the developer of TinyHat.me, you need to have visibility into your cluster. You need to know how healthy your application is. You need to know when things go wrong. But you've put it off for so long because instrumenting Kubernetes is hard and time-consuming. This is one of the things that makes Pixie so valuable. Pixie is a CNCF open source Kubernetes monitoring solution that provides: Automatic and instant baseline observability of your cluster Actionable, code-level insights of your applications With Pixie's auto-telemetry, you'll instrument your cluster in minutes to get dynamic data such as protocol traces, resource metrics, and app metrics from your cluster—all without an agent! Tip If you haven't signed into New Relic, do that now so you're ready to install Pixie into your cluster. Step 1 of 10 Open New Relic. On the right side of the upper navigation bar, click Add more data: Step 2 of 10 Click Guided install: This walks you through the installation process. Step 3 of 10 Click Kubernetes to let New Relic guide you through instrumenting your Kubernetes cluster: Step 4 of 10 Click Begin installation: Step 5 of 10 Select your account, name your cluster \"tiny-hat\", and click Continue: This specifies that TinyHat.me, and all its services, should live in a New Relic cluster called \"tiny-hat\" in the account you selected. Step 6 of 10 Leave the default choices on the next screen. These provide a range of observability features for your cluster, including our infrastructure agent which gives you a high level overview of the health of your cluster. Notably, the default options include \"Instant service-level insights, full-body requests, and application profiles through Pixie\" which you focus on in this lab. Click Continue: In the next screen, you see a command for installing our Kubernetes integration into your cluster. Step 7 of 10 Click Copy command: Now you're ready to install Pixie into your cluster. Step 8 of 10 Switch back to your terminal and paste the Helm command. Step 9 of 10 While you're installing Pixie, switch back to New Relic and click Continue to progress your guided installation to the final step. Here, you see a message that says \"Listening for data\": In a few minutes, Helm will have fully installed the necessary charts. You should see a message with the name, deployed date, namespace, and more: bash Copy NAME: newrelic-bundle LAST DEPLOYED: Thu Sep 23 13:50:24 2021 NAMESPACE: newrelic STATUS: deployed REVISION: 1 TEST SUITE: None Soon after that, the New Relic page updates to tell you that we're receiving data from your cluster: Step 10 of 10 Click Kubernetes cluster explorer to see your nodes, pods, deployments and a host of other data about your cluster, all in one view: Important You may see an error message, \"We're receiving incomplete data for this cluster.\" Please wait a few more minutes and refresh the page to see your cluster. In minutes, you were able to instrument your entire cluster without having to install language-specific agents or specify detailed cluster information! On top of all the data you see in the cluster explorer, click a pod or a node to dig deeper and see the granular data that the infrastructure agent was able to access near-instantly: lab This procedure is part of a lab that teaches you how to monitor your Kubernetes cluster with Pixie. Now that you've instrumented your cluster, use Pixie to debug your application.", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 57.33723, + "_score": 57.40007, "_version": null, "_explanation": null, "sort": null, @@ -15446,7 +15433,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 163.3273, + "_score": 152.22581, "_version": null, "_explanation": null, "sort": null, @@ -15489,7 +15476,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 159.40974, + "_score": 148.58963, "_version": null, "_explanation": null, "sort": null, @@ -15532,7 +15519,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 158.56938, + "_score": 147.81015, "_version": null, "_explanation": null, "sort": null, @@ -15575,7 +15562,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 158.55865, + "_score": 147.80179, "_version": null, "_explanation": null, "sort": null, @@ -15624,7 +15611,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 152.63321, + "_score": 143.86487, "_version": null, "_explanation": null, "sort": null, @@ -15666,7 +15653,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.2279, + "_score": 177.22371, "_version": null, "_explanation": null, "sort": null, @@ -15707,7 +15694,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.2279, + "_score": 177.22371, "_version": null, "_explanation": null, "sort": null, @@ -15748,7 +15735,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.22784, + "_score": 177.22365, "_version": null, "_explanation": null, "sort": null, @@ -15789,7 +15776,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.22777, + "_score": 177.22359, "_version": null, "_explanation": null, "sort": null, @@ -15830,7 +15817,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.09012, + "_score": 177.0878, "_version": null, "_explanation": null, "sort": null, @@ -15878,7 +15865,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 163.3273, + "_score": 152.22571, "_version": null, "_explanation": null, "sort": null, @@ -15921,7 +15908,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 159.40974, + "_score": 148.58951, "_version": null, "_explanation": null, "sort": null, @@ -15964,7 +15951,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 158.56938, + "_score": 147.81004, "_version": null, "_explanation": null, "sort": null, @@ -16007,7 +15994,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 158.55865, + "_score": 147.80167, "_version": null, "_explanation": null, "sort": null, @@ -16056,7 +16043,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 152.63321, + "_score": 143.86478, "_version": null, "_explanation": null, "sort": null, @@ -16097,7 +16084,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -16137,7 +16124,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -16177,7 +16164,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -16217,7 +16204,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -16257,7 +16244,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -16302,7 +16289,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 159.02142, + "_score": 147.45116, "_version": null, "_explanation": null, "sort": null, @@ -16345,7 +16332,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 154.99384, + "_score": 144.44461, "_version": null, "_explanation": null, "sort": null, @@ -16357,47 +16344,6 @@ }, "id": "617da50164441f41c4fbf2ea" }, - { - "sections": [ - "AWS Elemental MediaConvert monitoring integration", - "Important", - "Activate the integration", - "Configuration and polling", - "Find and use data", - "Metric data", - "Elemental MediaConvert Queue data", - "Elemental MediaConvert Operation data" - ], - "title": "AWS Elemental MediaConvert monitoring integration", - "type": "docs", - "tags": [ - "AWS integrations list", - "Amazon integrations", - "Integrations" - ], - "external_id": "4e3af1ef7b8a79842f689fde5561e79fa9acfbb0", - "image": "", - "url": "https://docs.newrelic.com/docs/infrastructure/amazon-integrations/aws-integrations-list/aws-elemental-mediaconvert-monitoring-integration/", - "published_at": "2022-02-14T11:37:19Z", - "updated_at": "2022-02-14T11:37:19Z", - "document_type": "page", - "popularity": 1, - "body": "Important Enable the AWS CloudWatch Metric Streams integration to monitor all CloudWatch metrics from your AWS services, including custom namespaces. Individual integrations are no longer our recommended option. New Relic includes an integration for reporting your AWS Elemental MediaConvert data to our platform. Here we explain how to activate the integration and what data it collects. Activate the integration To enable this integration follow standard procedures to Connect AWS services. Configuration and polling You can change the polling frequency and filter data using configuration options. Default polling information for the AWS Elemental MediaConvert integration: New Relic polling interval: 5 minutes Amazon CloudWatch data interval: 1 minute Find and use data To find your integration data in Infrastructure, go to one.newrelic.com > Infrastructure > AWS and select an integration. Data is attached to the following event type: Entity Event Type Provider Queue AwsMediaConvertQueueSample AwsMediaConvertQueue Operation AwsMediaConvertOperationSample AwsMediaConvertOperation For more on how to use your data, see Understand and use integration data. Metric data This integration collects AWS Elemental MediaConvert data for Queue and Operation. Elemental MediaConvert Queue data Metric Unit Description audioOutputDuration Seconds The number of seconds of audio-only output for a queue. sDOutputDuration Seconds The number of seconds of standard definition (SD) output for a queue. hDOutputDuration Seconds The number of seconds of high-definition (HD) output for a queue. uHDOutputDuration Seconds The number of seconds of ultra-high-definition (UHD) output for a queue. 8KOutputDuration Seconds The number of seconds of 8K output for a queue. jobsCompletedCount Count The number of jobs completed in this queue. jobsErroredCount Count The number of jobs that failed because of invalid inputs, such as a request to transcode a file that is not in the specified input bucket. standbyTime Seconds The number of seconds before AWS Elemental MediaConvert starts transcoding a job. transcodingTime Seconds The number of seconds for AWS Elemental MediaConvert to complete transcoding. Elemental MediaConvert Operation data Metric Unit Description errors Count errors", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 147.11035, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "title": "AWS Elemental MediaConvert monitoring integration", - "sections": "AWS Elemental MediaConvert monitoring integration", - "tags": "AWS integrations list", - "body": "Important Enable the AWS CloudWatch Metric Streams integration to monitor all CloudWatch metrics from your AWS services, including custom namespaces. Individual integrations are no longer our recommended option. New Relic includes an integration for reporting your AWS Elemental MediaConvert data" - }, - "id": "617d6cbe64441f6988fbd4e7" - }, { "sections": [ "AWS Kinesis Data Streams", @@ -16425,7 +16371,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20995, "_version": null, "_explanation": null, "sort": null, @@ -16465,7 +16411,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20995, "_version": null, "_explanation": null, "sort": null, @@ -16477,6 +16423,46 @@ "body": "What's included AWS Health installation docs Monitor AWS Health by connecting AWS to New Relic. Doc What is AWS Health? Oversight and alerts for how AWS outages and maintenance may affect your services. Get started! Start monitoring AWS Health by connecting Amazon Web Services (AWS) to New Relic" }, "id": "61566912196a679a39b70d4f" + }, + { + "sections": [ + "AWS EMR", + "What's included", + "AWS EMR installation docs", + "What is AWS EMR?", + "Get started!", + "More info" + ], + "title": "AWS EMR", + "type": "quickstarts", + "tags": [ + "aws", + "amazon web services" + ], + "quick_start_name": "AWS EMR", + "external_id": "239ca030e9a6e12b688167c0a2c68c35a9f081ca", + "image": "", + "url": "https://developer.newrelic.com/instant-observability/aws-emr/d84051ba-365f-4542-8db9-0829384ea55a/", + "published_at": "2022-02-07T01:45:29Z", + "updated_at": "2021-10-06T14:00:38Z", + "document_type": "page", + "popularity": 1, + "body": "What's included AWS EMR installation docs Monitor AWS EMR by connecting AWS to New Relic. Doc What is AWS EMR? Process and manage big data inputs from popular frameworks. Get started! Start monitoring AWS EMR by connecting Amazon Web Services (AWS) to New Relic! Check out our AWS EMR documentation to instrument your cloud service and manage the stability, scalability, and reliability of your systems with New Relic's infrastructure monitoring capabilities. More info Check out the documentation to learn more about New Relic monitoring for AWS EMR. This quickstart doesn't include any dashboards . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo This quickstart doesn't include any alerts . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo AWS EMR observability quickstart contains 1 data source . This is how you'll get your data into New Relic. AWS EMR installation docs Monitor AWS EMR by connecting AWS to New Relic. Docs", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 141.20995, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "title": "AWS EMR", + "sections": "AWS EMR", + "tags": "amazon web services", + "quick_start_name": "AWS EMR", + "body": "What's included AWS EMR installation docs Monitor AWS EMR by connecting AWS to New Relic. Doc What is AWS EMR? Process and manage big data inputs from popular frameworks. Get started! Start monitoring AWS EMR by connecting Amazon Web Services (AWS) to New Relic! Check out our AWS EMR documentation" + }, + "id": "61566b7b64441f603909962d" } ], "/aws-sns/651e8b9f-955b-49b1-bd27-0db7623a2ca1": [ @@ -16507,7 +16493,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20995, "_version": null, "_explanation": null, "sort": null, @@ -16547,7 +16533,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20995, "_version": null, "_explanation": null, "sort": null, @@ -16587,7 +16573,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20995, "_version": null, "_explanation": null, "sort": null, @@ -16627,7 +16613,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.2099, "_version": null, "_explanation": null, "sort": null, @@ -16667,7 +16653,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.2099, "_version": null, "_explanation": null, "sort": null, @@ -16723,7 +16709,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 260.51086, + "_score": 242.79662, "_version": null, "_explanation": null, "sort": null, @@ -16759,7 +16745,7 @@ "external_id": "509f5fb8490b652df4c6f31ec06b403c3393530e", "image": "", "url": "https://docs.newrelic.com/docs/infrastructure/amazon-integrations/aws-integrations-list/aws-elb-classic-monitoring-integration/", - "published_at": "2022-02-14T11:39:18Z", + "published_at": "2022-02-16T01:42:02Z", "updated_at": "2022-02-14T11:39:18Z", "document_type": "page", "popularity": 1, @@ -16767,7 +16753,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 249.37799, + "_score": 234.60384, "_version": null, "_explanation": null, "sort": null, @@ -16779,53 +16765,6 @@ }, "id": "617d6cbe28ccbc11ea800cd1" }, - { - "sections": [ - "Azure Load Balancer monitoring integration", - "Features", - "Requirements", - "Activate integration", - "Configuration and polling", - "Find and use data", - "Metric data", - "Inventory data", - "EOL NOTICE", - "azure/loadbalancer", - "azure/loadbalancer/backend-pool", - "azure/loadbalancer/frontend-ip", - "azure/loadbalancer/inbound-nat-rule", - "azure/loadbalancer/probe", - "azure/loadbalancer/rule" - ], - "title": "Azure Load Balancer monitoring integration", - "type": "docs", - "tags": [ - "Azure integrations list", - "Microsoft Azure integrations", - "Integrations" - ], - "external_id": "65ea6f2bbe5d2d402f4cd4021988b40e09497718", - "image": "", - "url": "https://docs.newrelic.com/docs/infrastructure/microsoft-azure-integrations/azure-integrations-list/azure-load-balancer-monitoring-integration/", - "published_at": "2022-02-15T19:39:01Z", - "updated_at": "2022-02-15T19:39:00Z", - "document_type": "page", - "popularity": 1, - "body": "New Relic infrastructure monitoring provides an integration for Microsoft Azure Load Balancer that reports data from your Azure Load Balancer service to New Relic. This document explains how to activate this integration and describes the data that can be captured. Features New Relic's integration for Azure Load Balancer reports metric data about TCP and UDP load balancers that distribute traffic among instances of services defined in a load-balanced set. Metric data include virtual IP and dynamic IP availability, and processed byte and packet counts. It also collects data about the status and configuration of the service. Using New Relic, you can: View Azure Load Balancer data in pre-built Infrastructure dashboards. Run custom queries and visualize the data. Create alert conditions to notify you of changes in data. Requirements Load Balancer metrics are collected only if you use the Microsoft Load Balancer Standard SKU. Activate integration To enable the integration: Make sure you are using the Microsoft Load Balancer Standard SKU. Follow standard procedures to activate your Azure service in New Relic Infrastructure. Configuration and polling You can change the polling frequency and filter data using configuration options. New Relic queries your Azure Load Balancer services according to a default polling interval, which varies depending on the integration. Polling frequency for the Azure Load Balancer integration: Polling interval: 5 minutes Resolution: 1 minute Find and use data To explore your integration data, go to one.newrelic.com > Infrastructure > Azure > (select an integration). You can query and explore your data using the AzureLoadBalancerSample event type, with a provider value of AzureLoadBalancer. Metric data To view metrics reported by the Azure Load Balancer integration, query the AzureLoadBalancerSample event type. Use the metadata associated with each metric to filter and facet the data being reported. For detailed metric information, see the Azure supported metrics documentation. Metric Description Metadata vipAvailability Average Load Balancer data path availability per time duration. frontendIPAddress frontendPort dipAvailability Average Load Balancer health probe status per time duration. frontendIPAddress frontendPort backendIPAddress backendPort protocolType byteCount Total number of Bytes transmitted within time period. frontendIPAddress frontendPort direction packetCount Total number of Packets transmitted within time period. frontendIPAddress frontendPort direction synCount Total number of SYN Packets transmitted within time period. frontendIPAddress frontendPort direction snatConnectionCount Total number of new SNAT connections, that is, outbound connections created within time period. frontendIPAddress backendIPAddress connectionState allocatedSnatPorts Total number of SNAT ports allocated within time period. frontendIPAddress backendIPAddress protocolType usedSnatPorts Total number of SNAT ports used within time period. frontendIPAddress backendIPAddress protocolType Inventory data EOL NOTICE After March 2022, we're discontinuing support for several capabilities, including inventory data for cloud integrations. For more details, including how you can easily prepare for this transition, see our Explorers Hub post. This integration collects the following inventory data about your system's state and configuration. azure/loadbalancer name regionName resourceGroupName sku type azure/loadbalancer/backend-pool name regionName resourceGroupName virtualMachineIds virtualMachineNames azure/loadbalancer/frontend-ip ipAddress ipAllocationMethod isPublicIp name regionName resourceGroupName azure/loadbalancer/inbound-nat-rule backendPort floatingIpEnabled frontendPort name protocol regionName resourceGroupName timeOutInMinutes azure/loadbalancer/probe intervalInSeconds name numberOfProbes port protocol regionName resourceGroupName requestPath azure/loadbalancer/rule backendPort frontendPort idleTimeoutInMinutes isFloatingIp name protocol regionName resourceGroupName", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 203.65631, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "title": "Azure Load Balancer monitoring integration", - "sections": "Azure Load Balancer monitoring integration", - "body": " for Azure Load Balancer reports metric data about TCP and UDP load balancers that distribute traffic among instances of services defined in a load-balanced set. Metric data include virtual IP and dynamic IP availability, and processed byte and packet counts. It also collects data about the status" - }, - "id": "617d72cf28ccbc08677fea43" - }, { "sections": [ "AWS VPC", @@ -16854,7 +16793,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.61609, + "_score": 189.5944, "_version": null, "_explanation": null, "sort": null, @@ -16895,7 +16834,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.51884, + "_score": 189.49849, "_version": null, "_explanation": null, "sort": null, @@ -16907,6 +16846,47 @@ "body": "What's included AWS TransitGateway installation docs Monitor AWS TransitGateway by connecting AWS to New Relic. Doc What is AWS TransitGateway? Collate VPC and on-premises resources through a central gateway. Get started! Start monitoring AWS TransitGateway by connecting Amazon Web Services (AWS" }, "id": "6156694b28ccbca493f21461" + }, + { + "sections": [ + "AWS Direct Connect", + "What's included", + "AWS Direct Connect installation docs", + "What is AWS Direct Connect?", + "Get started!", + "More info" + ], + "title": "AWS Direct Connect", + "type": "quickstarts", + "tags": [ + "aws", + "amazon web services", + "networking" + ], + "quick_start_name": "AWS Direct Connect", + "external_id": "376f4f6cca278b6a27ab85315fad1ebc6ae76d4b", + "image": "", + "url": "https://developer.newrelic.com/instant-observability/aws-direct-connect/913f6003-d559-4538-a6b2-9c19a7cc7d46/", + "published_at": "2022-02-04T02:04:55Z", + "updated_at": "2021-10-05T01:44:24Z", + "document_type": "page", + "popularity": 1, + "body": "What's included AWS Direct Connect installation docs Monitor AWS Direct Connect by connecting AWS to New Relic. Doc What is AWS Direct Connect? Establish a private, secure connection to AWS that runs outside of your ISP. Get started! Start monitoring AWS Direct Connect by connecting Amazon Web Services (AWS) to New Relic! Check out our AWS Direct Connect documentation to instrument your cloud service and manage the stability, scalability, and reliability of your systems with New Relic's infrastructure monitoring capabilities. More info Check out the documentation to learn more about New Relic monitoring for AWS Direct Connect. This quickstart doesn't include any dashboards . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo This quickstart doesn't include any alerts . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo AWS Direct Connect observability quickstart contains 1 data source . This is how you'll get your data into New Relic. AWS Direct Connect installation docs Monitor AWS Direct Connect by connecting AWS to New Relic. Docs", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 189.49841, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "title": "AWS Direct Connect", + "sections": "AWS Direct Connect", + "tags": "amazon web services", + "quick_start_name": "AWS Direct Connect", + "body": "What's included AWS Direct Connect installation docs Monitor AWS Direct Connect by connecting AWS to New Relic. Doc What is AWS Direct Connect? Establish a private, secure connection to AWS that runs outside of your ISP. Get started! Start monitoring AWS Direct Connect by connecting Amazon Web" + }, + "id": "61566b7a64441f5bfa09964c" } ], "/aws-elasticache/5c1e7b31-df21-4acb-b72c-0a8786b88301": [ @@ -16938,7 +16918,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.2279, + "_score": 177.22371, "_version": null, "_explanation": null, "sort": null, @@ -16979,7 +16959,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.2279, + "_score": 177.22371, "_version": null, "_explanation": null, "sort": null, @@ -17020,7 +17000,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.22784, + "_score": 177.22365, "_version": null, "_explanation": null, "sort": null, @@ -17061,7 +17041,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.22777, + "_score": 177.22359, "_version": null, "_explanation": null, "sort": null, @@ -17102,7 +17082,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.09012, + "_score": 177.0878, "_version": null, "_explanation": null, "sort": null, @@ -17148,7 +17128,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 82.06964, + "_score": 76.61313, "_version": null, "_explanation": null, "sort": null, @@ -17197,7 +17177,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.643425, + "_score": 76.2801, "_version": null, "_explanation": null, "sort": null, @@ -17245,7 +17225,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.63191, + "_score": 76.27109, "_version": null, "_explanation": null, "sort": null, @@ -17287,7 +17267,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.62691, + "_score": 76.26717, "_version": null, "_explanation": null, "sort": null, @@ -17330,7 +17310,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.5959, + "_score": 76.24292, "_version": null, "_explanation": null, "sort": null, @@ -17375,7 +17355,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 82.06964, + "_score": 76.61319, "_version": null, "_explanation": null, "sort": null, @@ -17424,7 +17404,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.643425, + "_score": 76.28016, "_version": null, "_explanation": null, "sort": null, @@ -17472,7 +17452,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.63191, + "_score": 76.27115, "_version": null, "_explanation": null, "sort": null, @@ -17514,7 +17494,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.62691, + "_score": 76.267235, "_version": null, "_explanation": null, "sort": null, @@ -17557,7 +17537,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.5959, + "_score": 76.24298, "_version": null, "_explanation": null, "sort": null, @@ -17610,7 +17590,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 199.69415, + "_score": 189.74747, "_version": null, "_explanation": null, "sort": null, @@ -17651,7 +17631,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 194.07266, + "_score": 184.11131, "_version": null, "_explanation": null, "sort": null, @@ -17689,7 +17669,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 190.96558, + "_score": 181.21945, "_version": null, "_explanation": null, "sort": null, @@ -17736,7 +17716,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 184.98126, + "_score": 175.79797, "_version": null, "_explanation": null, "sort": null, @@ -17792,7 +17772,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 180.33208, + "_score": 169.27922, "_version": null, "_explanation": null, "sort": null, @@ -17839,7 +17819,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 163.32745, + "_score": 152.22581, "_version": null, "_explanation": null, "sort": null, @@ -17882,7 +17862,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 159.40988, + "_score": 148.58963, "_version": null, "_explanation": null, "sort": null, @@ -17925,7 +17905,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 158.56952, + "_score": 147.81015, "_version": null, "_explanation": null, "sort": null, @@ -17968,7 +17948,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 158.55879, + "_score": 147.80179, "_version": null, "_explanation": null, "sort": null, @@ -18017,7 +17997,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 152.63333, + "_score": 143.86487, "_version": null, "_explanation": null, "sort": null, @@ -18038,31 +18018,31 @@ "Automate workflows", "Guides to automate workflows", "Quickly tag resources", - "Set up New Relic using the Kubernetes operator", "Automate common tasks", + "Set up New Relic using the Kubernetes operator", "Automatically tag a simple \"Hello World\" Demo across the entire stack", "Getting started with New Relic and Terraform", "Set up New Relic using Helm charts" ], - "published_at": "2022-02-15T01:37:23Z", + "published_at": "2022-02-16T01:38:14Z", "title": "Automate workflows", - "updated_at": "2022-02-15T01:37:23Z", + "updated_at": "2022-02-16T01:38:14Z", "type": "developer", "external_id": "d4f408f077ed950dc359ad44829e9cfbd2ca4871", "document_type": "page", "popularity": 1, - "body": "When building today's complex systems, you want an easy, predictable way to verify that your configuration is defined as expected. This concept, Observability as Code, is brought to life through a collection of New Relic-supported orchestration tools, including Terraform, AWS CloudFormation, and a command-line interface. These tools enable you to integrate New Relic into your existing workflows, easing adoption, accelerating deployment, and returning focus to your main job — getting stuff done. In addition to our Terraform and CLI guides below, find more automation solutions in our Developer Toolkit. Guides to automate workflows Quickly tag resources Add tags to apps for easy filtering 5 min Set up New Relic using the Kubernetes operator Learn how to provision New Relic resources using the Kubernetes operator 20 min Automate common tasks Use the New Relic CLI to tag apps and create deployment markers 20 min Automatically tag a simple \"Hello World\" Demo across the entire stack See how easy it is to leverage automation in your DevOps environment! 30 min Getting started with New Relic and Terraform Learn how to provision New Relic resources using Terraform 30 min Set up New Relic using Helm charts Learn how to set up New Relic using Helm charts 20 min", + "body": "When building today's complex systems, you want an easy, predictable way to verify that your configuration is defined as expected. This concept, Observability as Code, is brought to life through a collection of New Relic-supported orchestration tools, including Terraform, AWS CloudFormation, and a command-line interface. These tools enable you to integrate New Relic into your existing workflows, easing adoption, accelerating deployment, and returning focus to your main job — getting stuff done. In addition to our Terraform and CLI guides below, find more automation solutions in our Developer Toolkit. Guides to automate workflows Quickly tag resources Add tags to apps for easy filtering 5 min Automate common tasks Use the New Relic CLI to tag apps and create deployment markers 20 min Set up New Relic using the Kubernetes operator Learn how to provision New Relic resources using the Kubernetes operator 20 min Automatically tag a simple \"Hello World\" Demo across the entire stack See how easy it is to leverage automation in your DevOps environment! 30 min Getting started with New Relic and Terraform Learn how to provision New Relic resources using Terraform 30 min Set up New Relic using Helm charts Learn how to set up New Relic using Helm charts 20 min", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 95.14711, + "_score": 94.86789, "_version": null, "_explanation": null, "sort": null, "highlight": { "title": "Automate workflows", "sections": "Automate workflows", - "body": " deployment markers 20 min Automatically tag a simple "Hello World" Demo across the entire stack See how easy it is to leverage automation in your DevOps environment! 30 min Getting started with New Relic and Terraform Learn how to provision New Relic resources using Terraform 30 min Set up New Relic using Helm charts Learn how to set up New Relic using Helm charts 20 min" + "body": " the Kubernetes operator 20 min Automatically tag a simple "Hello World" Demo across the entire stack See how easy it is to leverage automation in your DevOps environment! 30 min Getting started with New Relic and Terraform Learn how to provision New Relic resources using Terraform 30 min Set up New Relic using Helm charts Learn how to set up New Relic using Helm charts 20 min" }, "id": "6091f7c8e7b9d2f6715068f1" }, @@ -18087,7 +18067,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.15608, + "_score": 94.278885, "_version": null, "_explanation": null, "sort": null, @@ -18133,7 +18113,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 86.503174, + "_score": 81.99892, "_version": null, "_explanation": null, "sort": null, @@ -18167,7 +18147,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 86.496025, + "_score": 81.99311, "_version": null, "_explanation": null, "sort": null, @@ -18212,7 +18192,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 86.49205, + "_score": 81.98988, "_version": null, "_explanation": null, "sort": null, @@ -18251,7 +18231,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.2279, + "_score": 177.22371, "_version": null, "_explanation": null, "sort": null, @@ -18292,7 +18272,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.2279, + "_score": 177.22371, "_version": null, "_explanation": null, "sort": null, @@ -18333,7 +18313,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.22784, + "_score": 177.22365, "_version": null, "_explanation": null, "sort": null, @@ -18374,7 +18354,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.22777, + "_score": 177.22359, "_version": null, "_explanation": null, "sort": null, @@ -18415,7 +18395,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.09012, + "_score": 177.0878, "_version": null, "_explanation": null, "sort": null, @@ -18456,7 +18436,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.46041, + "_score": 86.35066, "_version": null, "_explanation": null, "sort": null, @@ -18499,7 +18479,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 82.06972, + "_score": 76.61224, "_version": null, "_explanation": null, "sort": null, @@ -18548,7 +18528,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.6435, + "_score": 76.27921, "_version": null, "_explanation": null, "sort": null, @@ -18596,7 +18576,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.63199, + "_score": 76.2702, "_version": null, "_explanation": null, "sort": null, @@ -18638,7 +18618,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.626976, + "_score": 76.26629, "_version": null, "_explanation": null, "sort": null, @@ -18680,7 +18660,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.61609, + "_score": 189.5944, "_version": null, "_explanation": null, "sort": null, @@ -18721,7 +18701,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.51884, + "_score": 189.49849, "_version": null, "_explanation": null, "sort": null, @@ -18762,7 +18742,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.51877, + "_score": 189.49841, "_version": null, "_explanation": null, "sort": null, @@ -18803,7 +18783,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.51848, + "_score": 189.49814, "_version": null, "_explanation": null, "sort": null, @@ -18843,7 +18823,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -18887,7 +18867,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 120.249985, + "_score": 113.491585, "_version": null, "_explanation": null, "sort": null, @@ -18936,7 +18916,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 84.977295, + "_score": 80.72904, "_version": null, "_explanation": null, "sort": null, @@ -18977,7 +18957,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 82.06964, + "_score": 76.61218, "_version": null, "_explanation": null, "sort": null, @@ -19026,7 +19006,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.643425, + "_score": 76.27916, "_version": null, "_explanation": null, "sort": null, @@ -19074,7 +19054,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.63191, + "_score": 76.27015, "_version": null, "_explanation": null, "sort": null, @@ -19121,7 +19101,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 163.3273, + "_score": 152.22581, "_version": null, "_explanation": null, "sort": null, @@ -19164,7 +19144,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 159.40974, + "_score": 148.58963, "_version": null, "_explanation": null, "sort": null, @@ -19207,7 +19187,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 158.56938, + "_score": 147.81015, "_version": null, "_explanation": null, "sort": null, @@ -19250,7 +19230,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 158.55865, + "_score": 147.80179, "_version": null, "_explanation": null, "sort": null, @@ -19299,7 +19279,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 152.63321, + "_score": 143.86487, "_version": null, "_explanation": null, "sort": null, @@ -19337,7 +19317,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 1.4572625, + "_score": 1.489017, "_version": null, "_explanation": null, "sort": null, @@ -19376,7 +19356,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 194.57608, + "_score": 193.384, "_version": null, "_explanation": null, "sort": null, @@ -19441,7 +19421,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 118.09785, + "_score": 111.344894, "_version": null, "_explanation": null, "sort": null, @@ -19470,7 +19450,7 @@ "external_id": "1f13326e09d6da78f08f645bc069c22342fbac6c", "image": "", "url": "https://docs.newrelic.com/docs/infrastructure/install-infrastructure-agent/config-management-tools/configure-infrastructure-agent-using-ansible/", - "published_at": "2022-02-14T09:31:37Z", + "published_at": "2022-02-16T01:42:02Z", "updated_at": "2022-02-04T11:22:48Z", "document_type": "page", "popularity": 1, @@ -19478,7 +19458,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 109.76188, + "_score": 107.75284, "_version": null, "_explanation": null, "sort": null, @@ -19517,7 +19497,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 106.565414, + "_score": 99.142426, "_version": null, "_explanation": null, "sort": null, @@ -19529,44 +19509,38 @@ }, { "sections": [ - "Amazon RDS Enhanced Monitoring integration", - "Important", - "Features", - "Enable enhanced monitoring", - "Stream logs to Lambda function", - "Configuration and polling", - "Find and use data", - "Metric data", - "Metric data for all DB engines (except MS SQL Server) [#DB metrics]", - "Metric data for MS SQL", - "Definitions" + "OS versions page", + "Viewing the OS versions page", + "Viewing drill-down details" ], - "title": "Amazon RDS Enhanced Monitoring integration", + "title": "OS versions page", "type": "docs", "tags": [ - "AWS integrations list", - "Amazon integrations", - "Integrations" + "Mobile app pages", + "Mobile monitoring UI", + "Mobile monitoring" ], - "external_id": "b8fc293ef2181c19a5e816449b9a320e44e13ab3", - "image": "", - "url": "https://docs.newrelic.com/docs/infrastructure/amazon-integrations/aws-integrations-list/aws-rds-enhanced-monitoring-integration/", - "published_at": "2022-02-15T20:48:24Z", - "updated_at": "2022-02-15T20:48:24Z", + "external_id": "370b6f1584d001a17f414066097692b9189e1a50", + "image": "https://docs.newrelic.com/static/8d84abf966c2f4b75ca298b362995c0e/c1b63/os-version-pic_0.png", + "url": "https://docs.newrelic.com/docs/mobile-monitoring/mobile-monitoring-ui/mobile-app-pages/os-versions-page/", + "published_at": "2022-02-14T10:34:40Z", + "updated_at": "2021-07-09T11:46:41Z", "document_type": "page", "popularity": 1, - "body": "Important Enable the AWS CloudWatch Metric Streams integration to monitor all CloudWatch metrics from your AWS services, including custom namespaces. Individual integrations are no longer our recommended option. New Relic infrastructure integrations include an integration for collecting enhanced Amazon RDS data; this supplements the basic New Relic RDS integration with real-time metrics about the operating system the database instance runs on. Features This New Relic integration allows you to monitor and alert on RDS Enhanced Monitoring. You can use integration data and alerts to monitor the DB processes and identify potential trouble spots as well as to profile the DB allowing you to improve and optimize their response and cost. Enable enhanced monitoring Important Enabling this integration will incur some additional charges to your Amazon CloudWatch account. In addition there are some limitations and CPU metric data collection differences that are explained in Amazon's enhanced monitoring documentation. You must first have the New Relic Amazon RDS monitoring integration enabled before enabling RDS Enhanced Monitoring. Be sure that you have completed the steps in Connect AWS services to New Relic. New Relic uses AWS Lambda in order to collect RDS logs and provide near-real time data of your RDS instances, this capability is called RDS Enhanced Monitoring in AWS. Follow these steps in order to enable RDS Enhanced Monitoring integration: Specify the RDS instances that need Enable RDS Enhanced Monitoring. You can specify this when creating or modifying the instance: under Monitoring, set Enable Enhanced Monitoring to Yes. We recommend setting the data Granularity to 15 seconds. After enhanced monitoring is enabled, a stream called RDSOSMetrics is created in AWS CloudWatch Logs. Enhanced monitoring metrics are available via this stream. Create a lambda function and subscribe it to that stream in the following steps to obtain the data. Create a new AWS Lambda function from the Serverless Repository: Go to Lambda > Create Function > Browse serverless App repository, check the box for Show apps that create custom IAM roles or resource policies, and search for NewRelic-log-ingestion. Populate the LICENSE_KEY environment variable with your New Relic account license key. Select Deploy to create a new CloudFormation stack, a new function called newrelic-log-ingestion, and the required role. Make sure that the NewRelic-log-ingestion function execution role has attached the arn:aws:iam::aws:policy/CloudWatchLogsReadOnlyAccess policy, giving it the appropriate permissions to read CloudWatch Logs. Go to the newrelic-log-ingestion function. Continue with the procedure to stream logs to the Lambda function. Stream logs to Lambda function To link the RDSOSMetrics log stream to the Lambda function (JSON format): From AWS Console > CloudWatch > Logs, select RDSOSMetrics log group, and apply Actions > Create Lambda subscription filter. For the Lambda function, select newrelic-log-ingestion. From the Log Format dropdown, select JSON as the Log format. At the bottom, click the Start streaming button to save your Lambda subscription filter. Once completed, the Lambda function will send all the log lines from RDSOSMetrics to New Relic's ingest services. Configuration and polling You can change the polling frequency and filter data using configuration options. Default polling information for the Amazon RDS Enhanced Monitoring integration: New Relic polling interval: 30 seconds on average (collected via CloudWatch Logs) Configurable when setting up AWS Lambda Amazon CloudWatch data interval: 1 minute Find and use data To find your integration data, go to one.newrelic.com > Infrastructure > AWS and select the RDS > Enhanced monitoring dashboard link. You can query and explore your data using the DatastoreSample event type, with a provider value of RdsDbInstance . For more on how to use your data, see Understand and use integration data. Metric data New Relic collects the following enhanced RDS data: Metric data for all DB engines (except MS SQL Server) [#DB metrics] Group Metrics Description General engine The database engine for the DB instance. instanceId The DB instance identifier. instanceResourceId A region-unique, immutable identifier for the DB instance, also used as the log stream identifier. numVCpus The number of virtual CPUs for the DB instance. timestamp The time at which the metrics were taken. uptime The amount of time that the DB instance has been active. version The version of the OS metrics' stream JSON format. cpuUtilization guest The percentage of CPU in use by guest programs. idle The percentage of CPU that is idle. irq The percentage of CPU in use by software interrupts. nice The percentage of CPU in use by programs running at lowest priority. steal The percentage of CPU in use by other virtual machines. system The percentage of CPU in use by the kernel. total The total percentage of the CPU in use. This value excludes the nice value. user The percentage of CPU in use by user programs. wait The percentage of CPU unused while waiting for I/O access. diskIO (not available for Amazon Aurora) avgQueueLen The number of requests waiting in the I/O device's queue. avgReqSz The average request size, in kilobytes. await The number of milliseconds required to respond to requests, including queue time and service time. device The identifier of the disk device in use. readIOsPS The number of read operations per second. readKb The total number of kilobytes read. readKbPS The number of kilobytes read per second. rrqmPS The number of merged read requests queued per second. tps The number of I/O transactions per second. util The percentage of CPU time during which requests were issued. writeIOsPS The number of write operations per second. writeKb The total number of kilobytes written. writeKbPS The number of kilobytes written per second. wrqmPS The number of merged write requests queued per second. fileSys maxFiles The maximum number of files that can be created for the file system. total The total number of disk space available for the file system, in kilobytes. used The amount of disk space used by files in the file system, in kilobytes. usedFilePercent The percentage of available files in use. usedFiles The number of files in the file system. usedPercent The percentage of the file-system disk space in use. loadAverageMinute fifteen The number of processes requesting CPU time over the last 15 minutes. five The number of processes requesting CPU time over the last 5 minutes. one The number of processes requesting CPU time over the last minute. memory active The amount of assigned memory, in kilobytes. buffers The amount of memory used for buffering I/O requests prior to writing to the storage device, in kilobytes. cached The amount of memory used for caching file system–based I/O. dirty The amount of memory pages in RAM that have been modified but not written to their related data block in storage, in kilobytes. free The amount of unassigned memory, in kilobytes. hugePagesFree The number of free huge pages. Huge pages are a feature of the Linux kernel. hugePagesRsvd The number of committed huge pages. hugePagesSize The size for each huge pages unit, in kilobytes. hugePagesSurp The number of available surplus huge pages over the total. hugePagesTotal The total number of huge pages for the system. inactive The amount of least-frequently used memory pages, in kilobytes. mapped The total amount of file-system contents that is memory mapped inside a process address space, in kilobytes. pageTables The amount of memory used by page tables, in kilobytes. slab The amount of reusable kernel data structures, in kilobytes. total The total amount of memory, in kilobytes. writeback The amount ofn kilobytes. network rx The number of bytes received per second. tx The number of bytes uploaded per second. process cpuUsedPc The percentage of CPU used by the process. rss The amount of RAM allocated to the process, in kilobytes. memoryUsedPc The amount of memory used by the process, in kilobytes. processName The name of the process. swap cached The amount of swap memory, in kilobytes, used as cache memory. free The total amount of swap memory free, in kilobytes. total The total amount of swap memory available, in kilobytes. tasks blocked The number of tasks that are blocked. running The number of tasks that are running. sleeping The number of tasks that are sleeping. stopped The number of tasks that are stopped. total The total number of tasks. zombie The number of child tasks that are inactive with an active parent task. Metric data for MS SQL Group Metrics Description disks totalKb The total space of the disk, in kilobytes. usedKb The amount of space used on the disk, in kilobytes. usedPc The percentage of space used on the disk. availKb The space available on the disk, in kilobytes. availPc The percentage of space available on the disk. rdCountPS The number of read operations per second rdBytesPS The number of bytes read per second. wrCountPS The number of write operations per second. wBytesPS The amount of bytes written per second. memory commitToKb The amount of pagefile-backed virtual address space in use, that is, the current commit charge. This value is composed of main memory (RAM) and disk (pagefiles). commitLimitKb The maximum possible value for the commitTotKb metric. This value is the sum of the current pagefile size plus the physical memory available for pageable contents–excluding RAM that is assigned to non-pageable areas. commitPeakKb The largest value of the commitTotKb metric since the operating system was last started. kernTotKb The sum of the memory in the paged and non-paged kernel pools, in kilobytes. kernPagedKb The amount of memory in the paged kernel pool, in kilobytes. kernNonpagedKb The amount of memory in the non-paged kernel pool, in kilobytes. pageSize The size of a page, in bytes. physTotKb The amount of physical memory, in kilobytes. physAvailKb The amount of available physical memory, in kilobytes. sqlServerTotKb The amount of memory committed to Microsoft SQL Server, in kilobytes. sysCacheKb The amount of system cache memory, in kilobytes. network rdBytesPS The number of bytes received per second. wrBytesPS The number of bytes sent per second. process cpuUsedPc The percentage of CPU used by the process. memUsedPc The amount of memory used by the process, in kilobytes. processName The name of the process. workingSetKb The amount of memory in the private working set plus the amount of memory that is in use by the process and can be shared with other processes, in kilobytes. workingSetPrivKb The amount of memory that is in use by a process, but can't be shared with other processes, in kilobytes. workingSetShareableKb The amount of memory that is in use by a process and can be shared with other processes, in kilobytes. virtKb The amount of virtual address space the process is using, in kilobytes. Use of virtual address space does not necessarily imply corresponding use of either disk or main memory pages. system handles The number of handles that the system is using. processes The number of processes running on the system. threads The number of threads running on the system. Definitions Term Description Event type DataStoreSample Provider RdsDbInstance Processes Enhanced Monitoring allows you to monitor the following processes associated with your RDS instances. : RDS Process: Shows a summary of the resources used by the RDS management agent, diagnostics monitoring processes, and other AWS processes that are required to support RDS DB instances. RDS Child Process: Nested under RDS Processes, shows a summary of the RDS processes that support the DB instance, for example aurora for Amazon Aurora DB clusters and mysqld for MySQL DB instances. OS Processes: Shows a summary of the kernel and system processes, which generally have minimal impact on performance.", + "body": "The OS versions page for mobile monitoring provides performance details about the top operating system versions hosting your mobile application, such as iOS and Android. Charts compare the OS versions by: HTTP request time Network failures Requests per minute Active devices From here you can drill down into details by a major or minor OS version (for example, iOS 8, iOS 7.1.1, Android 4.2.2). Viewing the OS versions page one.newrelic.com > Mobile > (select an app) > App > OS versions: Use this page to view, sort, or drill down into detailed information about the top five types of operation system versions using your mobile app. To view performance details about the operating system versions for your mobile app users: Go to one.newrelic.com > Mobile > (select an app) > App > OS versions. To select the mobile app versions or time period, use the Versions menu and time picker below the UI menu bar. Optional: Select the Sort by and Hide < 1% throughput options. To expand or collapse the list of operating systems to include versions, select the operating system's name (for example, iOS 7). Viewing drill-down details To drill down into detailed information, use any of our standard user interface functions and page functions to drill down into detailed information. In addition: To view details for the minor and point releases of a major OS version (including interaction time, HTTP request times, network failures, active devices, and slowest traces or all subversions), select a major OS version from the list. To view details for a specific OS version, select its name from the expanded OS list. To view trace details a slow transaction (if available), select its link. For more information, see Interactions page. To return to the main OS versions page, select the Close (X) button.", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 97.13599, + "_score": 95.48022, "_version": null, "_explanation": null, "sort": null, "highlight": { - "body": " Amazon RDS data; this supplements the basic New Relic RDS integration with real-time metrics about the operating system the database instance runs on. Features This New Relic integration allows you to monitor and alert on RDS Enhanced Monitoring. You can use integration data and alerts to monitor" + "title": "OS versions page", + "sections": "OS versions page", + "body": "The OS versions page for mobile monitoring provides performance details about the top operating system versions hosting your mobile application, such as iOS and Android. Charts compare the OS versions by: HTTP request time Network failures Requests per minute Active devices From here you can drill" }, - "id": "617d6d5d64441fb952fbcb5d" + "id": "603eaee9e7b9d260112a0809" } ], "/azure-virtual-network/f812a124-5443-4451-94d3-32b54c109555": [ @@ -19599,7 +19573,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 120.249985, + "_score": 113.491585, "_version": null, "_explanation": null, "sort": null, @@ -19648,7 +19622,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 84.977295, + "_score": 80.72904, "_version": null, "_explanation": null, "sort": null, @@ -19689,7 +19663,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 82.06964, + "_score": 76.61218, "_version": null, "_explanation": null, "sort": null, @@ -19738,7 +19712,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.643425, + "_score": 76.27916, "_version": null, "_explanation": null, "sort": null, @@ -19786,7 +19760,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.63191, + "_score": 76.27015, "_version": null, "_explanation": null, "sort": null, @@ -19831,7 +19805,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 82.06964, + "_score": 76.61319, "_version": null, "_explanation": null, "sort": null, @@ -19880,7 +19854,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.643425, + "_score": 76.28016, "_version": null, "_explanation": null, "sort": null, @@ -19928,7 +19902,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.63191, + "_score": 76.27115, "_version": null, "_explanation": null, "sort": null, @@ -19970,7 +19944,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.62691, + "_score": 76.267235, "_version": null, "_explanation": null, "sort": null, @@ -20013,7 +19987,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.5959, + "_score": 76.24298, "_version": null, "_explanation": null, "sort": null, @@ -20054,7 +20028,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -20094,7 +20068,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -20134,7 +20108,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -20174,7 +20148,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -20214,7 +20188,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36096, + "_score": 141.20694, "_version": null, "_explanation": null, "sort": null, @@ -20256,7 +20230,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 133.48024, + "_score": 133.40433, "_version": null, "_explanation": null, "sort": null, @@ -20295,7 +20269,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 82.4346, + "_score": 82.271996, "_version": null, "_explanation": null, "sort": null, @@ -20333,7 +20307,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 61.96212, + "_score": 57.83287, "_version": null, "_explanation": null, "sort": null, @@ -20368,7 +20342,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 50.458664, + "_score": 50.079136, "_version": null, "_explanation": null, "sort": null, @@ -20396,7 +20370,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 39.86526, + "_score": 39.84732, "_version": null, "_explanation": null, "sort": null, @@ -20432,7 +20406,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 35.76979, + "_score": 35.053425, "_version": null, "_explanation": null, "sort": null, @@ -20469,7 +20443,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 194.57608, + "_score": 193.384, "_version": null, "_explanation": null, "sort": null, @@ -20534,7 +20508,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 118.09776, + "_score": 111.34482, "_version": null, "_explanation": null, "sort": null, @@ -20563,7 +20537,7 @@ "external_id": "1f13326e09d6da78f08f645bc069c22342fbac6c", "image": "", "url": "https://docs.newrelic.com/docs/infrastructure/install-infrastructure-agent/config-management-tools/configure-infrastructure-agent-using-ansible/", - "published_at": "2022-02-14T09:31:37Z", + "published_at": "2022-02-16T01:42:02Z", "updated_at": "2022-02-04T11:22:48Z", "document_type": "page", "popularity": 1, @@ -20571,7 +20545,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 109.761856, + "_score": 107.752815, "_version": null, "_explanation": null, "sort": null, @@ -20610,7 +20584,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 106.56532, + "_score": 99.14235, "_version": null, "_explanation": null, "sort": null, @@ -20622,44 +20596,38 @@ }, { "sections": [ - "Amazon RDS Enhanced Monitoring integration", - "Important", - "Features", - "Enable enhanced monitoring", - "Stream logs to Lambda function", - "Configuration and polling", - "Find and use data", - "Metric data", - "Metric data for all DB engines (except MS SQL Server) [#DB metrics]", - "Metric data for MS SQL", - "Definitions" + "OS versions page", + "Viewing the OS versions page", + "Viewing drill-down details" ], - "title": "Amazon RDS Enhanced Monitoring integration", + "title": "OS versions page", "type": "docs", "tags": [ - "AWS integrations list", - "Amazon integrations", - "Integrations" + "Mobile app pages", + "Mobile monitoring UI", + "Mobile monitoring" ], - "external_id": "b8fc293ef2181c19a5e816449b9a320e44e13ab3", - "image": "", - "url": "https://docs.newrelic.com/docs/infrastructure/amazon-integrations/aws-integrations-list/aws-rds-enhanced-monitoring-integration/", - "published_at": "2022-02-15T20:48:24Z", - "updated_at": "2022-02-15T20:48:24Z", + "external_id": "370b6f1584d001a17f414066097692b9189e1a50", + "image": "https://docs.newrelic.com/static/8d84abf966c2f4b75ca298b362995c0e/c1b63/os-version-pic_0.png", + "url": "https://docs.newrelic.com/docs/mobile-monitoring/mobile-monitoring-ui/mobile-app-pages/os-versions-page/", + "published_at": "2022-02-14T10:34:40Z", + "updated_at": "2021-07-09T11:46:41Z", "document_type": "page", "popularity": 1, - "body": "Important Enable the AWS CloudWatch Metric Streams integration to monitor all CloudWatch metrics from your AWS services, including custom namespaces. Individual integrations are no longer our recommended option. New Relic infrastructure integrations include an integration for collecting enhanced Amazon RDS data; this supplements the basic New Relic RDS integration with real-time metrics about the operating system the database instance runs on. Features This New Relic integration allows you to monitor and alert on RDS Enhanced Monitoring. You can use integration data and alerts to monitor the DB processes and identify potential trouble spots as well as to profile the DB allowing you to improve and optimize their response and cost. Enable enhanced monitoring Important Enabling this integration will incur some additional charges to your Amazon CloudWatch account. In addition there are some limitations and CPU metric data collection differences that are explained in Amazon's enhanced monitoring documentation. You must first have the New Relic Amazon RDS monitoring integration enabled before enabling RDS Enhanced Monitoring. Be sure that you have completed the steps in Connect AWS services to New Relic. New Relic uses AWS Lambda in order to collect RDS logs and provide near-real time data of your RDS instances, this capability is called RDS Enhanced Monitoring in AWS. Follow these steps in order to enable RDS Enhanced Monitoring integration: Specify the RDS instances that need Enable RDS Enhanced Monitoring. You can specify this when creating or modifying the instance: under Monitoring, set Enable Enhanced Monitoring to Yes. We recommend setting the data Granularity to 15 seconds. After enhanced monitoring is enabled, a stream called RDSOSMetrics is created in AWS CloudWatch Logs. Enhanced monitoring metrics are available via this stream. Create a lambda function and subscribe it to that stream in the following steps to obtain the data. Create a new AWS Lambda function from the Serverless Repository: Go to Lambda > Create Function > Browse serverless App repository, check the box for Show apps that create custom IAM roles or resource policies, and search for NewRelic-log-ingestion. Populate the LICENSE_KEY environment variable with your New Relic account license key. Select Deploy to create a new CloudFormation stack, a new function called newrelic-log-ingestion, and the required role. Make sure that the NewRelic-log-ingestion function execution role has attached the arn:aws:iam::aws:policy/CloudWatchLogsReadOnlyAccess policy, giving it the appropriate permissions to read CloudWatch Logs. Go to the newrelic-log-ingestion function. Continue with the procedure to stream logs to the Lambda function. Stream logs to Lambda function To link the RDSOSMetrics log stream to the Lambda function (JSON format): From AWS Console > CloudWatch > Logs, select RDSOSMetrics log group, and apply Actions > Create Lambda subscription filter. For the Lambda function, select newrelic-log-ingestion. From the Log Format dropdown, select JSON as the Log format. At the bottom, click the Start streaming button to save your Lambda subscription filter. Once completed, the Lambda function will send all the log lines from RDSOSMetrics to New Relic's ingest services. Configuration and polling You can change the polling frequency and filter data using configuration options. Default polling information for the Amazon RDS Enhanced Monitoring integration: New Relic polling interval: 30 seconds on average (collected via CloudWatch Logs) Configurable when setting up AWS Lambda Amazon CloudWatch data interval: 1 minute Find and use data To find your integration data, go to one.newrelic.com > Infrastructure > AWS and select the RDS > Enhanced monitoring dashboard link. You can query and explore your data using the DatastoreSample event type, with a provider value of RdsDbInstance . For more on how to use your data, see Understand and use integration data. Metric data New Relic collects the following enhanced RDS data: Metric data for all DB engines (except MS SQL Server) [#DB metrics] Group Metrics Description General engine The database engine for the DB instance. instanceId The DB instance identifier. instanceResourceId A region-unique, immutable identifier for the DB instance, also used as the log stream identifier. numVCpus The number of virtual CPUs for the DB instance. timestamp The time at which the metrics were taken. uptime The amount of time that the DB instance has been active. version The version of the OS metrics' stream JSON format. cpuUtilization guest The percentage of CPU in use by guest programs. idle The percentage of CPU that is idle. irq The percentage of CPU in use by software interrupts. nice The percentage of CPU in use by programs running at lowest priority. steal The percentage of CPU in use by other virtual machines. system The percentage of CPU in use by the kernel. total The total percentage of the CPU in use. This value excludes the nice value. user The percentage of CPU in use by user programs. wait The percentage of CPU unused while waiting for I/O access. diskIO (not available for Amazon Aurora) avgQueueLen The number of requests waiting in the I/O device's queue. avgReqSz The average request size, in kilobytes. await The number of milliseconds required to respond to requests, including queue time and service time. device The identifier of the disk device in use. readIOsPS The number of read operations per second. readKb The total number of kilobytes read. readKbPS The number of kilobytes read per second. rrqmPS The number of merged read requests queued per second. tps The number of I/O transactions per second. util The percentage of CPU time during which requests were issued. writeIOsPS The number of write operations per second. writeKb The total number of kilobytes written. writeKbPS The number of kilobytes written per second. wrqmPS The number of merged write requests queued per second. fileSys maxFiles The maximum number of files that can be created for the file system. total The total number of disk space available for the file system, in kilobytes. used The amount of disk space used by files in the file system, in kilobytes. usedFilePercent The percentage of available files in use. usedFiles The number of files in the file system. usedPercent The percentage of the file-system disk space in use. loadAverageMinute fifteen The number of processes requesting CPU time over the last 15 minutes. five The number of processes requesting CPU time over the last 5 minutes. one The number of processes requesting CPU time over the last minute. memory active The amount of assigned memory, in kilobytes. buffers The amount of memory used for buffering I/O requests prior to writing to the storage device, in kilobytes. cached The amount of memory used for caching file system–based I/O. dirty The amount of memory pages in RAM that have been modified but not written to their related data block in storage, in kilobytes. free The amount of unassigned memory, in kilobytes. hugePagesFree The number of free huge pages. Huge pages are a feature of the Linux kernel. hugePagesRsvd The number of committed huge pages. hugePagesSize The size for each huge pages unit, in kilobytes. hugePagesSurp The number of available surplus huge pages over the total. hugePagesTotal The total number of huge pages for the system. inactive The amount of least-frequently used memory pages, in kilobytes. mapped The total amount of file-system contents that is memory mapped inside a process address space, in kilobytes. pageTables The amount of memory used by page tables, in kilobytes. slab The amount of reusable kernel data structures, in kilobytes. total The total amount of memory, in kilobytes. writeback The amount ofn kilobytes. network rx The number of bytes received per second. tx The number of bytes uploaded per second. process cpuUsedPc The percentage of CPU used by the process. rss The amount of RAM allocated to the process, in kilobytes. memoryUsedPc The amount of memory used by the process, in kilobytes. processName The name of the process. swap cached The amount of swap memory, in kilobytes, used as cache memory. free The total amount of swap memory free, in kilobytes. total The total amount of swap memory available, in kilobytes. tasks blocked The number of tasks that are blocked. running The number of tasks that are running. sleeping The number of tasks that are sleeping. stopped The number of tasks that are stopped. total The total number of tasks. zombie The number of child tasks that are inactive with an active parent task. Metric data for MS SQL Group Metrics Description disks totalKb The total space of the disk, in kilobytes. usedKb The amount of space used on the disk, in kilobytes. usedPc The percentage of space used on the disk. availKb The space available on the disk, in kilobytes. availPc The percentage of space available on the disk. rdCountPS The number of read operations per second rdBytesPS The number of bytes read per second. wrCountPS The number of write operations per second. wBytesPS The amount of bytes written per second. memory commitToKb The amount of pagefile-backed virtual address space in use, that is, the current commit charge. This value is composed of main memory (RAM) and disk (pagefiles). commitLimitKb The maximum possible value for the commitTotKb metric. This value is the sum of the current pagefile size plus the physical memory available for pageable contents–excluding RAM that is assigned to non-pageable areas. commitPeakKb The largest value of the commitTotKb metric since the operating system was last started. kernTotKb The sum of the memory in the paged and non-paged kernel pools, in kilobytes. kernPagedKb The amount of memory in the paged kernel pool, in kilobytes. kernNonpagedKb The amount of memory in the non-paged kernel pool, in kilobytes. pageSize The size of a page, in bytes. physTotKb The amount of physical memory, in kilobytes. physAvailKb The amount of available physical memory, in kilobytes. sqlServerTotKb The amount of memory committed to Microsoft SQL Server, in kilobytes. sysCacheKb The amount of system cache memory, in kilobytes. network rdBytesPS The number of bytes received per second. wrBytesPS The number of bytes sent per second. process cpuUsedPc The percentage of CPU used by the process. memUsedPc The amount of memory used by the process, in kilobytes. processName The name of the process. workingSetKb The amount of memory in the private working set plus the amount of memory that is in use by the process and can be shared with other processes, in kilobytes. workingSetPrivKb The amount of memory that is in use by a process, but can't be shared with other processes, in kilobytes. workingSetShareableKb The amount of memory that is in use by a process and can be shared with other processes, in kilobytes. virtKb The amount of virtual address space the process is using, in kilobytes. Use of virtual address space does not necessarily imply corresponding use of either disk or main memory pages. system handles The number of handles that the system is using. processes The number of processes running on the system. threads The number of threads running on the system. Definitions Term Description Event type DataStoreSample Provider RdsDbInstance Processes Enhanced Monitoring allows you to monitor the following processes associated with your RDS instances. : RDS Process: Shows a summary of the resources used by the RDS management agent, diagnostics monitoring processes, and other AWS processes that are required to support RDS DB instances. RDS Child Process: Nested under RDS Processes, shows a summary of the RDS processes that support the DB instance, for example aurora for Amazon Aurora DB clusters and mysqld for MySQL DB instances. OS Processes: Shows a summary of the kernel and system processes, which generally have minimal impact on performance.", + "body": "The OS versions page for mobile monitoring provides performance details about the top operating system versions hosting your mobile application, such as iOS and Android. Charts compare the OS versions by: HTTP request time Network failures Requests per minute Active devices From here you can drill down into details by a major or minor OS version (for example, iOS 8, iOS 7.1.1, Android 4.2.2). Viewing the OS versions page one.newrelic.com > Mobile > (select an app) > App > OS versions: Use this page to view, sort, or drill down into detailed information about the top five types of operation system versions using your mobile app. To view performance details about the operating system versions for your mobile app users: Go to one.newrelic.com > Mobile > (select an app) > App > OS versions. To select the mobile app versions or time period, use the Versions menu and time picker below the UI menu bar. Optional: Select the Sort by and Hide < 1% throughput options. To expand or collapse the list of operating systems to include versions, select the operating system's name (for example, iOS 7). Viewing drill-down details To drill down into detailed information, use any of our standard user interface functions and page functions to drill down into detailed information. In addition: To view details for the minor and point releases of a major OS version (including interaction time, HTTP request times, network failures, active devices, and slowest traces or all subversions), select a major OS version from the list. To view details for a specific OS version, select its name from the expanded OS list. To view trace details a slow transaction (if available), select its link. For more information, see Interactions page. To return to the main OS versions page, select the Close (X) button.", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 97.135895, + "_score": 95.48022, "_version": null, "_explanation": null, "sort": null, "highlight": { - "body": " Amazon RDS data; this supplements the basic New Relic RDS integration with real-time metrics about the operating system the database instance runs on. Features This New Relic integration allows you to monitor and alert on RDS Enhanced Monitoring. You can use integration data and alerts to monitor" + "title": "OS versions page", + "sections": "OS versions page", + "body": "The OS versions page for mobile monitoring provides performance details about the top operating system versions hosting your mobile application, such as iOS and Android. Charts compare the OS versions by: HTTP request time Network failures Requests per minute Active devices From here you can drill" }, - "id": "617d6d5d64441fb952fbcb5d" + "id": "603eaee9e7b9d260112a0809" } ], "/collectd/b0ba29c2-be55-4da2-bf24-f8cc3c27715c": [ @@ -20687,7 +20655,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 125.995674, + "_score": 126.224014, "_version": null, "_explanation": null, "sort": null, @@ -20733,7 +20701,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 85.16167, + "_score": 80.60483, "_version": null, "_explanation": null, "sort": null, @@ -20775,7 +20743,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 71.07054, + "_score": 68.37395, "_version": null, "_explanation": null, "sort": null, @@ -20832,7 +20800,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 60.36673, + "_score": 59.388138, "_version": null, "_explanation": null, "sort": null, @@ -20881,7 +20849,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 59.39962, + "_score": 56.21035, "_version": null, "_explanation": null, "sort": null, @@ -20921,7 +20889,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.61609, + "_score": 189.5944, "_version": null, "_explanation": null, "sort": null, @@ -20962,7 +20930,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.51884, + "_score": 189.49849, "_version": null, "_explanation": null, "sort": null, @@ -21003,7 +20971,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.51877, + "_score": 189.49841, "_version": null, "_explanation": null, "sort": null, @@ -21044,7 +21012,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.51848, + "_score": 189.49814, "_version": null, "_explanation": null, "sort": null, @@ -21084,7 +21052,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.36101, + "_score": 141.20699, "_version": null, "_explanation": null, "sort": null, @@ -21131,7 +21099,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 263.92642, + "_score": 249.46866, "_version": null, "_explanation": null, "sort": null, @@ -21176,7 +21144,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 222.32343, + "_score": 210.2872, "_version": null, "_explanation": null, "sort": null, @@ -21217,7 +21185,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 207.46317, + "_score": 204.84909, "_version": null, "_explanation": null, "sort": null, @@ -21230,70 +21198,76 @@ "id": "6071cfc864441fa88f9d8530" }, { - "image": "", - "url": "https://docs.newrelic.com/docs/mobile-monitoring/new-relic-mobile-ios/get-started/new-relic-ios-and-dt/", "sections": [ - "New Relic iOS mobile monitoring with distributed tracing", - "Requirements", - "How to set up distributed tracing", - "Enable Infinite Tracing", - "Find data", - "Troubleshooting" + "Trace API: Decorate spans with attributes", + "Why decorate your spans?", + "How to decorate your spans with attributes", + "JSON examples", + "New Relic-format attribute examples", + "Zipkin-format attribute examples" ], - "published_at": "2022-02-14T12:01:29Z", - "title": "New Relic iOS mobile monitoring with distributed tracing", - "updated_at": "2022-02-14T12:01:29Z", + "title": "Trace API: Decorate spans with attributes", "type": "docs", - "external_id": "4aadcb82ece4eea8c394437e2182adc90dc9d77d", + "tags": [ + "Trace API", + "Distributed tracing", + "Understand dependencies" + ], + "external_id": "1e5d907a844f32e60b4cb7db4d7d1728a22adcde", + "image": "", + "url": "https://docs.newrelic.com/docs/distributed-tracing/trace-api/trace-api-decorate-spans-attributes/", + "published_at": "2022-02-14T03:19:10Z", + "updated_at": "2021-04-10T16:18:16Z", "document_type": "page", "popularity": 1, - "body": "New Relic iOS mobile monitoring agents support distributed tracing. This lets you see how your mobile app activity connects to related services. Watch this short video (approx. 2:15 minutes) to learn how to: Find mobile distributed tracing data in New Relic. Filter by standard and custom attributes. Requirements To use distributed tracing, you need at least XCFramework agent version 7.3.0. We recommend you use the most recent agent. How to set up distributed tracing For mobile agents that support this feature, it’s enabled by default. If you prefer to turn off distributed tracing, see iOS feature flag. Enable Infinite Tracing If your mobile apps have some downstream services that use the advanced type of distributed tracing called Infinite Tracing, you should enable this feature for your mobile apps. This ensures that your root span (the initiating mobile app) is included in the rest of the spans tracked by Infinite Tracing. To set this up, see the Infinite Tracing steps to create a trace observer and to select which mobile apps (data sources) should report spans. Find data Mobile spans appear in any New Relic distributed tracing UI where those spans are part of a trace. Here are some tips for finding and querying data: You can find end-user-originating traces in any New Relic One distributed tracing UI. In the distributed tracing UI, end-user spans are indicated with the icon. To see a span's attributes, select a span in the UI. Spans are reported as Span data, and can be queried in New Relic. Query tips: Query by name by setting mobileApp.name to the mobile app name. Query for traces containing at least one mobile app span with mobileApp.name is not null. Query for traces containing at least one back-end app with appName is not null. Query for traces containing both mobile and back-end spans by combining the two previous conditions. Troubleshooting If you don't see end-user spans, or are having other distributed tracing issues, see Troubleshooting.", + "body": "This document will explain how to add attributes to trace data sent to the Trace API so that spans display specific properties in the UI. Why decorate your spans? When you send data to our Trace API, you can add custom attributes to spans. For example, you might decide to add attributes like customer.id or user.id in order to help you analyze your trace data. Some expected attribute values cause our distributed tracing UI to display some specific trace properties and details. For example, if a span has an attribute with an error. prefix, the UI displays that span with an error. For another example, a span with an attribute that has a db. prefix will be displayed as a datastore span in the UI, and will have its datastore query highlighted. Decorating your spans to show specific properties in our UI can help you: Better understand the structure of your trace data. More easily troubleshoot problems. How to decorate your spans with attributes This table explains how to get spans sent to the Trace API to show up with specific properties in the UI. For property descriptions, see span properties. Desired span property UI indicator How to add property Errors Use an attribute with an error. prefix. For example: error.message. External Use an attribute with an http. prefix. For example: http.method. A span will also appear as an external if it has a child that comes from a different entity. Datastore Use an attribute with an db. prefix. For example: db.statement. Service This cannot be done with an attribute. A span is classified as a service span if it's the root span, or if its parent is from a different entity. If a span has several properties, service span classification takes precedence in the UI. In-process This cannot be done with an attribute. A span is classified as in-process if it hasn't been classified as a service span, datastore span, or external span. For more on how these span properties are determined and stored, see Trace structure. Tips for adding attributes: You can add any attribute to a span. For example: you might add an attribute like customer.id so that you could search traces globally for traces containing a specific customer. A span can be in multiple categories. For example, external is a more general category than is datastore, so if a span is classified as both external and datastore, it will be indicated as a datastore span in the UI. JSON examples Here are JSON examples showing how to use attributes to set span properties: New Relic-format attribute examples New Relic-format JSON with multiple types of attributes added. The significance of the custom attributes is described in customAttribute. [ { \"common\": { \"attributes\": { \"hostname\": \"cattle456.example.com\", \"environment\": \"staging\" } }, \"spans\": [ { \"id\": \"1-abcdefg\", \"trace.id\": \"abc123-xyz789\", \"attributes\": { \"service.name\": \"Order Service\", \"duration.ms\": 3.5, \"error.name\": \"StackOverflowException\", \"name\": \"/placeOrder [POST]\", \"customer.id\": \"datanerd@newrelic.com\", \"description\": \"This span is the root of the whole trace. It has no parent.id. Custom attributes like 'customer.id' can have any name. Using these kinds of attributes will allow you to do a global search across all traces for desired traces.\" } }, { \"id\": \"2-abcdefg\", \"trace.id\": \"abc123-xyz789\", \"attributes\": { \"service.name\": \"Order Service\", \"duration.ms\": 1, \"parent.id\": \"1-abcdefg\", \"db.query\": \"foo selection\", \"db.statement\": \"SELECT FOO FROM BAR\", \"name\": \"DB Span\", \"description\": \"This is a datastore span. The presence of one or more attributes prefixed with db. makes this display as a datastore span in the UI.\" } }, { \"id\": \"3-abcdefg\", \"trace.id\": \"abc123-xyz789\", \"attributes\": { \"service.name\": \"Order Service\", \"parent.id\": \"1-abcdefg\", \"duration.ms\": 1.5, \"http.method\": \"POST\", \"name\": \"HTTP Span\", \"description\": \"An external (HTTP) span. Spans with one or more attributes prefixed with http. are treated as external spans.\" } }, { \"id\": \"4-abcdefg\", \"trace.id\": \"abc123-xyz789\", \"attributes\": { \"service.name\": \"Order Service\", \"duration.ms\": 1.2, \"error.text\": \"404 file not found\", \"parent.id\": \"1-abcdefg\", \"http.method\": \"GET\", \"http.statusCode\": 404, \"name\": \"Error Http Span\", \"description\": \"Spans with error. prefixed attributes are displayed in red text in the UI. Errors can coexist with other span categories.\" } }, { \"id\": \"5-abcdefg\", \"trace.id\": \"abc123-xyz789\", \"attributes\": { \"service.name\": \"Order Service\", \"error.message\": \"404 file not found\", \"duration.ms\": 1.2, \"parent.id\": \"1-abcdefg\", \"http.method\": \"GET\", \"http.statusCode\": 404, \"db.query\": \"SELECT FOO FROM BAR\", \"name\": \"Error Http DB Span\", \"description\": \"Spans can have multiple properties. Relevant attributes are highlighted when you select a span to view its details.\" } }, { \"id\": \"6-abcdefg\", \"trace.id\": \"abc123-xyz789\", \"attributes\": { \"service.name\": \"Order Service\", \"duration.ms\": 1.6, \"parent.id\": \"1-abcdefg\", \"http.method\": \"GET\", \"db.query\": \"SELECT FOO FROM BAR\", \"name\": \"Http DB Span\", \"description\": \"External (HTTP) is a more general category than is datastore, so a span with both http.- and db.-prefixed attributes is displayed as a datastore span in the UI.\" } }, { \"id\": \"7-abcdefg\", \"trace.id\": \"abc123-xyz789\", \"attributes\": { \"service.name\": \"Order Service\", \"duration.ms\": 2.0, \"parent.id\": \"1-abcdefg\", \"description\": \"Spans with no explicit types that belong to the same entity as its parent and children are considered in-process spans.\", \"name\": \"In-process span 1\" } }, { \"id\": \"8-abcdefg\", \"trace.id\": \"abc123-xyz789\", \"attributes\": { \"service.name\": \"Order Service\", \"duration.ms\": 1.7, \"parent.id\": \"7-abcdefg\", \"name\": \"In-process span 2\", \"description\": \"In-process spans can represent a breakdown of work being done within a process.\" } }, { \"id\": \"9-abcdefg\", \"trace.id\": \"abc123-xyz789\", \"attributes\": { \"service.name\": \"Order Service\", \"duration.ms\": 1.0, \"parent.id\": \"8-abcdefg\", \"name\": \"In-process span 3\", \"description\": \"The number and granularity of in-process spans vary depending on instrumentation and frameworks being used.\" } }, { \"id\": \"10-abcdefg\", \"trace.id\": \"abc123-xyz789\", \"attributes\": { \"service.name\": \"Order Service\", \"duration.ms\": 2.2, \"parent.id\": \"1-abcdefg\", \"name\": \"In-process span\" } }, { \"id\": \"11-abcdefg\", \"trace.id\": \"abc123-xyz789\", \"attributes\": { \"service.name\": \"Order Service\", \"duration.ms\": 2.2, \"parent.id\": \"10-abcdefg\", \"name\": \"External determined by entity change\", \"description\": \"A span that’s a parent to a span from another entity is displayed as an external span in the UI.\" } }, { \"id\": \"12-abcdefg\", \"trace.id\": \"abc123-xyz789\", \"attributes\": { \"service.name\": \"Order Notification Service\", \"duration.ms\": 1.8, \"parent.id\": \"11-abcdefg\", \"name\": \"Entry span determined by entity change\", \"description\": \"The attribute 'service.name' is used to detect process boundaries in the UI. For compatibility with data from Lambda monitoring and APM agents, the attribute 'entity.name' can be used to search across all traces.\" } } ] } ] Copy Zipkin-format attribute examples Zipkin-format JSON with multiple attribute types added. The significance of the attributes (key-value pairs) is described in customAttribute. [ { \"traceId\": \"zipkinSampleTrace\", \"id\": \"1\", \"kind\": \"SERVER\", \"name\": \"Error Span\", \"duration\": 35000, \"localEndpoint\": { \"serviceName\": \"sampleApp\", \"ipv4\": \"127.0.0.1\", \"port\": 8080 }, \"tags\": { \"customAttribute\": \"This span is the root of the whole trace. It has no parent.id\" } }, { \"traceId\": \"zipkinSampleTrace\", \"id\": \"2\", \"parentId\": \"1\", \"kind\": \"SERVER\", \"name\": \"post\", \"duration\": 10000, \"localEndpoint\": { \"serviceName\": \"sampleApp\", \"ipv4\": \"127.0.0.1\", \"port\": 8080 }, \"tags\": { \"customAttribute\": \"This is a datastore span. The presence of one or more attributes prefixed with db. makes this display as a datastore span in the UI.\" } }, { \"traceId\": \"zipkinSampleTrace\", \"id\": \"2\", \"parentId\": \"1\", \"kind\": \"CLIENT\", \"name\": \"DB Span\", \"duration\": 12000, \"localEndpoint\": { \"serviceName\": \"sampleApp\", \"ipv4\": \"127.0.0.1\", \"port\": 8080 }, \"tags\": { \"db.query\": \"foo selection\", \"db.statement\": \"SELECT FOO FROM BAR\", \"customAttribute\": \"This is a datastore span. The presence of one or more attributes prefixed with db. makes this display as a datastore span in the UI.\" } }, { \"traceId\": \"zipkinSampleTrace\", \"id\": \"3\", \"parentId\": \"1\", \"kind\": \"CLIENT\", \"name\": \"HTTP Span\", \"duration\": 15000, \"localEndpoint\": { \"serviceName\": \"sampleApp\", \"ipv4\": \"127.0.0.1\", \"port\": 8080 }, \"tags\": { \"http.method\": \"POST\", \"customAttribute\": \"AAn external (HTTP) span. Spans with one or more attributes prefixed with http. are treated as external spans.\" } }, { \"traceId\": \"zipkinSampleTrace\", \"id\": \"4\", \"parentId\": \"1\", \"kind\": \"CLIENT\", \"name\": \"Error Span\", \"duration\": 12000, \"localEndpoint\": { \"serviceName\": \"sampleApp\", \"ipv4\": \"127.0.0.1\", \"port\": 8080 }, \"tags\": { \"error.message\": \"404 file not found\", \"http.method\": \"GET\", \"http.statusCode\": 404, \"customAttribute\": \"Spans with error. prefixed attributes are displayed in red text in the UI. Errors can coexist with other span categories.\" } }, { \"traceId\": \"zipkinSampleTrace\", \"id\": \"5\", \"parentId\": \"1\", \"kind\": \"CLIENT\", \"name\": \"HTTP Error DB Span\", \"duration\": 12000, \"localEndpoint\": { \"serviceName\": \"sampleApp\", \"ipv4\": \"127.0.0.1\", \"port\": 8080 }, \"tags\": { \"error.message\": \"404 file not found\", \"http.method\": \"GET\", \"http.statusCode\": 404, \"db.query\": \"SELECT FOO FROM BAR\", \"customAttribute\": \"Spans can have multiple properties. Relevant attributes are highlighted when you select a span to view its details.\" } }, { \"traceId\": \"zipkinSampleTrace\", \"id\": \"6\", \"parentId\": \"1\", \"kind\": \"CLIENT\", \"name\": \"HTTP DB Span\", \"duration\": 12000, \"localEndpoint\": { \"serviceName\": \"sampleApp\", \"ipv4\": \"127.0.0.1\", \"port\": 8080 }, \"tags\": { \"http.method\": \"GET\", \"db.query\": \"SELECT FOO FROM BAR\", \"customAttribute\": \"External (HTTP) is a more general category than is datastore, so a span with both http.- and db.-prefixed attributes is displayed as a datastore span in the UI.\" } }, { \"traceId\": \"zipkinSampleTrace\", \"id\": \"7\", \"parentId\": \"1\", \"kind\": \"SERVER\", \"name\": \"In process span 1\", \"duration\": 12000, \"localEndpoint\": { \"serviceName\": \"sampleApp\", \"ipv4\": \"127.0.0.1\", \"port\": 8080 }, \"tags\": { \"customAttribute\": \"Spans with no explicit types that belong to the same entity as its parent and children are considered in-process spans.\" } }, { \"traceId\": \"zipkinSampleTrace\", \"id\": \"8\", \"parentId\": \"7\", \"kind\": \"SERVER\", \"name\": \"In process span 2\", \"duration\": 12000, \"localEndpoint\": { \"serviceName\": \"sampleApp\", \"ipv4\": \"127.0.0.1\", \"port\": 8080 }, \"tags\": { \"customAttribute\": \"In-process spans can represent a breakdown of work being done within a process.\" } }, { \"traceId\": \"zipkinSampleTrace\", \"id\": \"9\", \"parentId\": \"8\", \"kind\": \"SERVER\", \"name\": \"In process span 2\", \"duration\": 12000, \"localEndpoint\": { \"serviceName\": \"sampleApp\", \"ipv4\": \"127.0.0.1\", \"port\": 8080 }, \"tags\": { \"customAttribute\": \"The number and granularity of in-process spans vary depending on instrumentation and frameworks being used.\" } }, { \"traceId\": \"zipkinSampleTrace\", \"id\": \"10\", \"parentId\": \"1\", \"kind\": \"CLIENT\", \"name\": \"In process remote parent\", \"duration\": 12000, \"localEndpoint\": { \"serviceName\": \"sampleApp\", \"ipv4\": \"127.0.0.1\", \"port\": 8080 }, \"tags\": { \"name\": \"in process remote parent\" } }, { \"traceId\": \"zipkinSampleTrace\", \"id\": \"10\", \"parentId\": \"1\", \"kind\": \"CLIENT\", \"name\": \"In process remote parent\", \"duration\": 12000, \"localEndpoint\": { \"serviceName\": \"sampleApp\", \"ipv4\": \"127.0.0.1\", \"port\": 8080 }, \"tags\": { \"customAttribute\": \"A span that is a parent to a span from another entity will be displayed as an external span.\" } }, { \"traceId\": \"zipkinSampleTrace\", \"id\": \"11\", \"parentId\": \"10\", \"kind\": \"SERVER\", \"name\": \"Downstream entry span\", \"duration\": 12000, \"localEndpoint\": { \"serviceName\": \"downstreamSampleApp\", \"ipv4\": \"127.0.0.1\", \"port\": 8080 }, \"tags\": { \"customAttribute\": \"The attribute 'service.name' is used to detect process boundaries in the UI. For compatibility with data from Lambda monitoring and APM agents, the attribute 'entity.name' can be used to search across all traces.\" } } ] Copy", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 154.46526, + "_score": 148.60413, "_version": null, "_explanation": null, "sort": null, "highlight": { - "title": "New Relic iOS mobile monitoring with distributed tracing", - "sections": "New Relic iOS mobile monitoring with distributed tracing", - "body": " of the spans tracked by Infinite Tracing. To set this up, see the Infinite Tracing steps to create a trace observer and to select which mobile apps (data sources) should report spans. Find data Mobile spans appear in any New Relic distributed tracing UI where those spans are part of a trace. Here" + "title": "Trace API: Decorate spans with attributes", + "sections": "Trace API: Decorate spans with attributes", + "tags": "Trace API", + "body": " customer.id or user.id in order to help you analyze your trace data. Some expected attribute values cause our distributed tracing UI to display some specific trace properties and details. For example, if a span has an attribute with an error. prefix, the UI displays that span with an error. For another" }, - "id": "61e46cfd28ccbc842dbc66bb" + "id": "6071cfc8196a678ed764a7b2" }, { "image": "", - "url": "https://docs.newrelic.com/docs/mobile-monitoring/new-relic-mobile-android/get-started/new-relic-android-and-dt/", + "url": "https://docs.newrelic.com/docs/mobile-monitoring/new-relic-mobile-ios/get-started/new-relic-ios-and-dt/", "sections": [ - "New Relic Android mobile monitoring with distributed tracing", + "New Relic iOS mobile monitoring with distributed tracing", "Requirements", "How to set up distributed tracing", "Enable Infinite Tracing", "Find data", "Troubleshooting" ], - "published_at": "2022-02-14T11:45:20Z", - "title": "New Relic Android mobile monitoring with distributed tracing", - "updated_at": "2022-02-14T11:45:20Z", + "published_at": "2022-02-14T12:01:29Z", + "title": "New Relic iOS mobile monitoring with distributed tracing", + "updated_at": "2022-02-14T12:01:29Z", "type": "docs", - "external_id": "fa29b98b23ba651deec9eef5c602eef7c57b8553", + "external_id": "4aadcb82ece4eea8c394437e2182adc90dc9d77d", "document_type": "page", "popularity": 1, - "body": "New Relic Android mobile monitoring agents support distributed tracing. This lets you see how your mobile app activity connects to related services. Watch this short video (approx. 2:15 minutes) to learn how to: Find mobile distributed tracing data in New Relic. Filter by standard and custom attributes. Requirements To use distributed tracing, you need at least Android agent version 6.0.0. We recommend you use the most recent agent. How to set up distributed tracing For mobile agents that support this feature, it’s enabled by default. If you prefer to turn off distributed tracing, see Android feature flag. Enable Infinite Tracing If your mobile apps have some downstream services that use the advanced type of distributed tracing called Infinite Tracing, you should enable this feature for your mobile apps. This ensures that your root span (the initiating mobile app) is included in the rest of the spans tracked by Infinite Tracing. To set this up, see the Infinite Tracing steps to create a trace observer and to select which mobile apps (data sources) should report spans. Find data Mobile spans appear in any New Relic distributed tracing UI where those spans are part of a trace. Here are some tips for finding and querying data: You can find end-user-originating traces in any New Relic One distributed tracing UI. In the distributed tracing UI, end-user spans are indicated with the icon. To see a span's attributes, select a span in the UI. Spans are reported as Span data, and can be queried in New Relic. Query tips: Query by name by setting mobileApp.name to the mobile app name. Query for traces containing at least one mobile app span with mobileApp.name is not null. Query for traces containing at least one back-end app with appName is not null. Query for traces containing both mobile and back-end spans by combining the two previous conditions. Troubleshooting If you don't see end-user spans, or are having other distributed tracing issues, see Troubleshooting.", + "body": "New Relic iOS mobile monitoring agents support distributed tracing. This lets you see how your mobile app activity connects to related services. Watch this short video (approx. 2:15 minutes) to learn how to: Find mobile distributed tracing data in New Relic. Filter by standard and custom attributes. Requirements To use distributed tracing, you need at least XCFramework agent version 7.3.0. We recommend you use the most recent agent. How to set up distributed tracing For mobile agents that support this feature, it’s enabled by default. If you prefer to turn off distributed tracing, see iOS feature flag. Enable Infinite Tracing If your mobile apps have some downstream services that use the advanced type of distributed tracing called Infinite Tracing, you should enable this feature for your mobile apps. This ensures that your root span (the initiating mobile app) is included in the rest of the spans tracked by Infinite Tracing. To set this up, see the Infinite Tracing steps to create a trace observer and to select which mobile apps (data sources) should report spans. Find data Mobile spans appear in any New Relic distributed tracing UI where those spans are part of a trace. Here are some tips for finding and querying data: You can find end-user-originating traces in any New Relic One distributed tracing UI. In the distributed tracing UI, end-user spans are indicated with the icon. To see a span's attributes, select a span in the UI. Spans are reported as Span data, and can be queried in New Relic. Query tips: Query by name by setting mobileApp.name to the mobile app name. Query for traces containing at least one mobile app span with mobileApp.name is not null. Query for traces containing at least one back-end app with appName is not null. Query for traces containing both mobile and back-end spans by combining the two previous conditions. Troubleshooting If you don't see end-user spans, or are having other distributed tracing issues, see Troubleshooting.", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 154.35654, + "_score": 145.17197, "_version": null, "_explanation": null, "sort": null, "highlight": { - "title": "New Relic Android mobile monitoring with distributed tracing", - "sections": "New Relic Android mobile monitoring with distributed tracing", - "body": " in the rest of the spans tracked by Infinite Tracing. To set this up, see the Infinite Tracing steps to create a trace observer and to select which mobile apps (data sources) should report spans. Find data Mobile spans appear in any New Relic distributed tracing UI where those spans are part of a trace. Here" + "title": "New Relic iOS mobile monitoring with distributed tracing", + "sections": "New Relic iOS mobile monitoring with distributed tracing", + "body": " of the spans tracked by Infinite Tracing. To set this up, see the Infinite Tracing steps to create a trace observer and to select which mobile apps (data sources) should report spans. Find data Mobile spans appear in any New Relic distributed tracing UI where those spans are part of a trace. Here" }, - "id": "61e46d23196a67a3b102a7ea" + "id": "61e46cfd28ccbc842dbc66bb" } ], "/silverlight/fe42916f-1756-4c93-af10-7236de6d0f6e": [ @@ -21328,7 +21302,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.12862, + "_score": 92.36895, "_version": null, "_explanation": null, "sort": null, @@ -21369,7 +21343,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -21410,7 +21384,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -21451,7 +21425,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -21492,7 +21466,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -21541,7 +21515,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 131.51285, + "_score": 122.78904, "_version": null, "_explanation": null, "sort": null, @@ -21584,7 +21558,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 82.06964, + "_score": 76.61218, "_version": null, "_explanation": null, "sort": null, @@ -21632,7 +21606,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.63191, + "_score": 76.27015, "_version": null, "_explanation": null, "sort": null, @@ -21674,7 +21648,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.62691, + "_score": 76.266235, "_version": null, "_explanation": null, "sort": null, @@ -21717,7 +21691,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.5959, + "_score": 76.24198, "_version": null, "_explanation": null, "sort": null, @@ -21762,7 +21736,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 186.58925, + "_score": 173.72247, "_version": null, "_explanation": null, "sort": null, @@ -21805,7 +21779,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 186.58481, + "_score": 173.71902, "_version": null, "_explanation": null, "sort": null, @@ -21848,7 +21822,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 186.57661, + "_score": 173.71263, "_version": null, "_explanation": null, "sort": null, @@ -21895,7 +21869,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 176.53557, + "_score": 166.21199, "_version": null, "_explanation": null, "sort": null, @@ -21909,48 +21883,52 @@ }, { "sections": [ - "Azure Cosmos DB (Document DB) monitoring integration", + "Azure SQL Database monitoring integration", "Features", "Activate integration", "Configuration and polling", - "View and query data", + "Find and use data", "Metric data", - "Important", - "Account Data", - "DataBase Data", - "Collection Data", + "Database sample metrics", + "Elastic pool sample metrics", + "Server sample metrics", "Inventory data", - "EOL NOTICE" + "EOL NOTICE", + "azure/sql/database/", + "azure/sql/elasticpool/", + "azure/sql/firewall", + "azure/sql/replication-link/", + "azure/sql/restore-point/" ], - "title": "Azure Cosmos DB (Document DB) monitoring integration", + "title": "Azure SQL Database monitoring integration", "type": "docs", "tags": [ "Azure integrations list", "Microsoft Azure integrations", "Integrations" ], - "external_id": "e4bb0ee9204d3af8c336f3bccd58052df2451116", + "external_id": "713c9f87ea4feb3a597c6ac45d3bcde593f99ffc", "image": "", - "url": "https://docs.newrelic.com/docs/infrastructure/microsoft-azure-integrations/azure-integrations-list/azure-cosmos-db-document-db-monitoring-integration/", - "published_at": "2022-02-15T19:28:51Z", - "updated_at": "2022-02-15T19:28:51Z", + "url": "https://docs.newrelic.com/docs/infrastructure/microsoft-azure-integrations/azure-integrations-list/azure-sql-database-monitoring-integration/", + "published_at": "2022-02-14T19:27:29Z", + "updated_at": "2022-02-14T19:27:29Z", "document_type": "page", "popularity": 1, - "body": "New Relic infrastructure monitoring provides an integration for Microsoft Azure's Cosmos DB service that reports your Cosmos DB metrics and other data to New Relic. This document explains how to activate the Cosmos DB integration and describes the data that can be captured. Features New Relic gathers both database data and collection billing data from your Azure Cosmos DB service. You can monitor and alert on your Azure Cosmos DB data from New Relic, and you can create custom queries and custom chart dashboards. Activate integration To enable this integration follow standard procedures to activate your Azure service in New Relic. The Cosmos DB integration requires you to create an additional role and permission to fetch database and collection data: Go to the Azure Portal and open a shell by selecting the terminal icon. Add the following command: az role definition create --role-definition '{ \"Name\": \"NewRelic Integrations\", \"Actions\": [ \"*/read\", \"Microsoft.DocumentDB/databaseAccounts/listKeys/action\" ], \"NotActions\": [], \"AssignableScopes\": [ \"/subscriptions/YOUR_INSERT_SUBSCRIPTION_ID\" ], \"Description\": \"Read Only for NewRelic Integrations\", \"IsCustom\": \"true\" }' Copy From Services > Subscriptions, select the subscription, go to Access control (IAM), and then select Add. In the Role search box, add the name of the newly created role definition (for example, NewRelic Integrations). In the Select search box, add the name of the New Relic integration application, and select it. Ensure that the application is added to the Selected members list, then Save. Configuration and polling You can change the polling frequency and filter data using configuration options. Default polling information for the Cosmos DB integration: Polling interval: 5 minutes Resolution: 1 minute or 5 minutes, varies by metric. For resolution information on a specific metric, see Microsoft Azure's documentation about support metrics. View and query data To view your integration data, go to one.newrelic.com > Infrastructure > Azure and select the Cosmos DB Integration. You can query and explore your data using the following event types: Entity Event Type Provider Account AzureCosmosDbAccountSample AzureCosmosDbAccount Database AzureCosmosDbDatabaseSample AzureCosmosDbDatabase Collection AzureCosmosDbCollectionSample AzureCosmosDbCollection For more on how to find and use data, see Understand and use integration data. Metric data Important For information on deprecated Cosmos DB events or metrics, see Azure Cosmos DB integration (deprecated). We strongly recommend migrating to the supported events and metrics in this document. To view metrics reported by the Cosmos DB integration, query the Entities below. Use the metadata associated with each metric to filter and facet the data being reported. For detailed metric information, see the Azure supported metrics documentation. Account Data Metric Description Metadata totalRequests Total number of requests. account kind region offerType statusCode resourceGroup metadataRequests Count of metadata requests. account kind region offerType statusCode resourceGroup mongoRequests Count of Mongo requests made. account kind region commandName offerType errorCode resourceGroup mongoRequestCharge Total number of Mongo request units consumed. account kind region commandName offerType errorCode resourceGroup totalRequestUnits Total number of request units consumed. account kind region offerType statusCode resourceGroup provisionedThroughput Throughput provisioned for the database or collection. account offerType kind resourceGroup availableStorageBytes Total available storage, in bytes. account kind offerType region resourceGroup dataUsageBytes Total data usage reported, in bytes. account kind offerType region resourceGroup indexUsageBytes Total index usage reported, in bytes. account kind offerType region resourceGroup documentQuotaBytes Total storage quota reported, in bytes. account kind offerType region resourceGroup documentCount Total document count reported. account kind offerType region resourceGroup ReplicationLatency P99 replication latency across source and target regions for geo-enabled account, in milliseconds. account kind sourceRegion offerType targetRegion resourceGroup ServiceAvailability Account requests availability percentage in hour, day, or month granularity. No specific metadata. cassandraRequests Count of Cassandra requests made. account kind errorCode offerType opperationType region resourceType resourceGroup cassandraRequestCharges Total number of request units consumed for Cassandra requests. account kind errorCode offerType opperationType region resourceType resourceGroup cassandraConnectionClosures Total number of Cassandra connections that were closed. account kind closureReason offerType region resourceGroup DataBase Data Metric Description Metadata totalRequests Total number of requests. account databaseName region statusCode metadataRequests Count of metadata requests. account databaseName region statusCode mongoRequests Count of Mongo requests made. account databaseName region commandName errorCode mongoRequestCharge Total number of Mongo request units consumed. account databaseName region commandName errorCode totalRequestUnits Total number of request units consumed. account databaseName region statusCode provisionedThroughput Throughput provisioned for the database or collection. account databaseName availableStorageBytes Total available storage, in bytes. account databaseName region dataUsageBytes Total data usage reported, in bytes. account databaseName region indexUsageBytes Total index usage reported, in bytes. account databaseName region documentQuotaBytes Total storage quota reported, in bytes. account databaseName region documentCount Total document count reported. account databaseName region replicationLatencyMilliseconds P99 replication latency across source and target regions for geo-enabled account, in milliseconds. account sourceRegion targetRegion serviceAvailabilityPercent Account requests availability percentage in hour, day, or month granularity. No specific metadata. cassandraRequests Count of Cassandra requests made. account databaseName errorCode opperationType region resourceType cassandraRequestCharges Total number of request units consumed for Cassandra requests. account databaseName errorCode opperationType region resourceType cassandraConnectionClosures Total number of Cassandra connections that were closed. account closureReason region DataBase Data Metric Description Metadata totalRequests Total number of requests. account databaseName region statusCode metadataRequests Count of metadata requests. account databaseName region statusCode mongoRequests Count of Mongo requests made. account databaseName region commandName errorCode mongoRequestCharge Total number of Mongo request units consumed. account databaseName region commandName errorCode totalRequestUnits Total number of request units consumed. account databaseName region statusCode provisionedThroughput Throughput provisioned for the database or collection. account databaseName availableStorageBytes Total available storage, in bytes. account databaseName region dataUsageBytes Total data usage reported, in bytes. account databaseName region indexUsageBytes Total index usage reported, in bytes. account databaseName region documentQuotaBytes Total storage quota reported, in bytes. account databaseName region documentCount Total document count reported. account databaseName region replicationLatencyMilliseconds P99 replication latency across source and target regions for geo-enabled account, in milliseconds. account sourceRegion targetRegion serviceAvailabilityPercent Account requests availability percentage in hour, day, or month granularity. No specific metadata. cassandraRequests Count of Cassandra requests made. account databaseName errorCode opperationType region resourceType cassandraRequestCharges Total number of request units consumed for Cassandra requests. account databaseName errorCode opperationType region resourceType cassandraConnectionClosures Total number of Cassandra connections that were closed. account closureReason region Collection Data Metric Description Metadata totalRequests Total number of requests. account collectionName database region statusCode metadataRequests Count of metadata requests. account collectionName database region statusCode mongoRequests Count of Mongo requests made. account collectionName database region commandName errorCode mongoRequestCharge Total number of Mongo request units consumed. account collectionName database region commandName errorCode totalRequestUnits Total number of request units consumed. account collectionName database region statusCode provisionedThroughput Throughput provisioned for the database or collection. account collectionName database availableStorageBytes Total available storage, in bytes. account collectionName database region dataUsageBytes Total data usage reported, in bytes. account collectionName database region indexUsageBytes Total index usage reported, in bytes. account collectionName database region documentQuotaBytes Total storage quota reported, in bytes. account collectionName database region documentCount Total document count reported. account collectionName database region replicationLatencyMilliseconds P99 replication latency across source and target regions for geo-enabled account, in milliseconds. account collectionName sourceRegion targetRegion serviceAvailabilityPercent Account requests availability percentage in hour, day, or month granularity. No specific metadata. cassandraRequests Count of Cassandra requests made. account collectionName database errorCode opperationType region resourceType cassandraRequestCharges Total number of request units consumed for Cassandra requests. account collectionName database errorCode opperationType region resourceType cassandraConnectionClosures Total number of Cassandra connections that were closed. account collectionName closureReason region Inventory data EOL NOTICE After March 2022, we're discontinuing support for several capabilities, including inventory data for cloud integrations. For more details, including how you can easily prepare for this transition, see our Explorers Hub post. Inventory data is information about your system's state and configuration. For details on how to find and use inventory data, see Understand and use data. The Cosmos DB integration reports the inventory data for the entity type azure/cosmosdb/account/ using the following metadata: documentEndpoint: The document end point. databaseAccountOfferType: The database account offer type. consistencyPolicy: The consistency policy for the Cosmos DB database account. defaultConsistencyLevel: The default consistency level for the Cosmos DB database account. kind: The type of database account set at database account creation. resourceGroupName: The Azure resource group name that the Cosmos DB database account belong to. regionName: The region name in which the Azure DocumentDB database account is deployed. type: The azure resource type, which is Microsoft.DocumentDB/databaseAccounts.", + "body": "New Relic's integrations include an integration for reporting your Microsoft Azure SQL Database metrics and other data to New Relic. This document explains how to activate the integration and describes the data reported. Features New Relic gathers database data from Azure's fully-managed relational cloud database service. Azure SQL provides single databases with their own set of resources, and elastic pools that share a set of resources. Both are associated with an Azure SQL Database logical server. If databases are protected by a firewall, they can be replicated and restored to a previous point in time. Using New Relic, you can: View Azure SQL Database data in pre-built Infrastructure dashboards. Run custom queries and visualize the data. Create alert conditions to notify you of changes in data. Activate integration To enable the integration follow standard procedures to activate your Azure service in New Relic. Configuration and polling You can change the polling frequency and filter data using configuration options. New Relic queries your Azure Database services according to a default polling interval, which varies depending on the integration. Polling frequency for the Azure SQL Database integration: Polling interval: 5 minutes Resolution: 1 data point per minute Find and use data To explore your integration data, go to one.newrelic.com > Infrastructure > Azure > (select an integration). Data is organized like this: Azure SQL Database data Organized in New Relic Single database Data about a single database is attached to AzureSqlDatabaseSample event type. Inventory data has a provider value of AzureSqlDatabase. Elastic pool Data about an elastic pool is attached to AzureSqlElasticPoolSampleevent type. Inventory data has a provider value of AzureSqlElasticPool. Firewall Data about a firewall is attached to AzureSqlFirewallSample event type. Inventory data has a provider value of AzureSqlFirewall. Database replication link Data about a database replication link is attached to AzureSqlReplicationLinkSample event type. Inventory data has a provider value of AzureSqlReplicationLink. Database restore point Data about a database restore point is attached to AzureSqlRestorePointSample event type. Inventory data has a provider value of AzureSqlRestorePoint. Logical server Data about a logical server is attached to AzureSqlServerSample event type. Inventory data has a provider value of AzureSqlServer. Metric data This integration collects the following metric data. Database sample metrics Metric Description cpuPercent Average CPU percentage. physicalDataReadPercent Average data IO percentage. logWritePercent Average log IO percentage. dtuConsumptionPercent Average DTU percentage. storage Total database size, in bytes. connectionSuccessful Total number of successful connections. connectionFailed Total number of failed connections. blockedByFirewall Total number of requests blocked by firewall. deadlock Total number of deadlocks. storagePercent Database size percentage. xtpStoragePercent Average in-memory OLTP storage percent. workersPercent Average workers percentage. sessionsPercent Average sessions percentage. dtuLimit Average number of DTU limit. dtuUsed Average number of used DTU. dwuLimit DWU limit. dwuConsumptionPercent Percentage of DWU. dwuUsed Number of used DWU. dwCpuPercent Average DW node level CPU percentage. dwPhysicalDataReadPercent Average DW node level data IO percentage. databaseSizeCurrentBytes Total current database size in bytes. databaseSizeLimitBytes Database limit in bytes. cpuLimit Average limit of CPUs. Applies to vCore-based databases. Elastic pool sample metrics Metric Description cpuPercent Average CPU percentage. databaseCpuPercent Average CPU percentage, per database. physicalDataReadPercent Average data IO percentage. databasePhysicalDataReadPercent Average data IO percentage, per database. logWritePercent Average log IO percentage. databaseLogWritePercent Average log IO percentage, per database. dtuConsumptionPercent Average DTU percentage. databaseDtuConsumptionPercent Average DTU percentage, per database. storagePercent Average storage percentage. workersPercent Average workers percentage. databaseWorkersPercent Average workers percentage, per database. sessionsPercent Average sessions percentage. databaseSessionsPercent Average sessions percentage, per database. eDTULimit Average DTU limit. storageLimitBytes Average storage limit, in bytes. eDTUUsed Average used eDTU. storageUsedBytes Average used storage, in bytes. databaseStorageUsedBytes Average used storage per database, in bytes. xtpStoragePercent Average in-memory OLTP storage percent. Server sample metrics Metric Description dtuCurrent Average utilization percentage relative to the DTU of the database. dtuLimit Database DTU limit. Inventory data EOL NOTICE After March 2022, we're discontinuing support for several capabilities, including inventory data for cloud integrations. For more details, including how you can easily prepare for this transition, see our Explorers Hub post. This integration collects the following inventory data about your system's state and configuration. azure/sql/database/ collation creationDate defaultSecondaryLocation earliestRestoreDate edition elasticPoolName isDataWarehouse maxSizeBytes name regionName resourceGroupName sqlServerName status transparentDataEncryptionStatus type azure/sql/elasticpool/ creationDate databaseDtuMax databaseDtuMin dtu edition name regionName resourceGroupName sqlServerName state storageMb type azure/sql/firewall endIpAddress name regionName resourceGroupName sqlServerName startIpAddress azure/sql/replication-link/ databaseName name partnerDatabase partnerLocation regionName replicationState resourceGroupName role sqlServerName startTime azure/sql/restore-point/ databaseName earliestRestoreDate name regionName resourceGroupName restorePointType sqlServerName", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 168.90613, + "_score": 158.10162, "_version": null, "_explanation": null, "sort": null, "highlight": { - "title": "Azure Cosmos DB (Document DB) monitoring integration", - "sections": "Azure Cosmos DB (Document DB) monitoring integration", + "title": "Azure SQL Database monitoring integration", + "sections": "Azure SQL Database monitoring integration", "tags": "Azure integrations list", - "body": "New Relic infrastructure monitoring provides an integration for Microsoft Azure's Cosmos DB service that reports your Cosmos DB metrics and other data to New Relic. This document explains how to activate the Cosmos DB integration and describes the data that can be captured. Features New Relic" + "body": "/elasticpool/ creationDate databaseDtuMax databaseDtuMin dtu edition name regionName resourceGroupName sqlServerName state storageMb type azure/sql/firewall endIpAddress name regionName resourceGroupName sqlServerName startIpAddress azure/sql/replication-link/ databaseName name partnerDatabase" }, - "id": "617dc763e7b9d2d3dac0580e" + "id": "617d731628ccbc959d7fe804" } ], "/microsoft-teams-for-codestream/c1a50e3c-f81a-4af2-b2ab-878076c9cc82": [ @@ -21975,7 +21953,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 97.40949, + "_score": 96.35417, "_version": null, "_explanation": null, "sort": null, @@ -22008,7 +21986,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 89.24481, + "_score": 83.92136, "_version": null, "_explanation": null, "sort": null, @@ -22038,7 +22016,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 66.61232, + "_score": 65.84598, "_version": null, "_explanation": null, "sort": null, @@ -22066,7 +22044,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 64.885506, + "_score": 64.152176, "_version": null, "_explanation": null, "sort": null, @@ -22078,38 +22056,38 @@ "id": "61744075196a67f1f32f2ba9" }, { - "image": "https://docs.newrelic.com/static/693426d805c82f1d9155cd04b116c36e/d9199/new-relic-product-relationships.png", - "url": "https://docs.newrelic.com/docs/style-guide/capitalization/product-capability-feature-usage/", "sections": [ - "Product, capability, and feature capitalization and usage", - "Important", - "Trademark guidelines", - "When to use title case", - "Examples", - "What not to capitalize", - "What to avoid", - "Copyright and trademark notices", - "Relationships between products, features, and capabilities" + "CodeStream licenses" ], - "published_at": "2022-02-14T07:10:58Z", - "title": "Product, capability, and feature capitalization and usage", - "updated_at": "2022-02-13T14:59:44Z", + "title": "CodeStream licenses", "type": "docs", - "external_id": "eaaecce5ea26ad32376e01d481d138326154d094", + "tags": [ + "CodeStream", + "Product or service licenses", + "Licenses" + ], + "external_id": "331f3d1cd0897f453f98bc054fda09a9ad2725c1", + "image": "", + "url": "https://docs.newrelic.com/docs/licenses/product-or-service-licenses/codestream/codestream-licenses/", + "published_at": "2022-02-15T21:13:37Z", + "updated_at": "2021-10-23T17:05:38Z", "document_type": "page", "popularity": 1, - "body": "This page is the definitive resource for capitalizing products, features, and capabilities for use by all New Relic content creators. These guidelines have been vetted by members of legal, content and product marketing, and docs, as well as leaders from around New Relic. Visit the word-nerds slack channel if you have questions about or additions to the list. In general, the only things that we use title case for are our company name, product/platform name, and a few capabilities and integrations that require it for legal reasons. The following sections also call out first and subsequent uses of terms. First use refers to the first mention in the body copy. It's okay to use the subsequent versions in space-constrained areas such as titles, headers, tables, web navigation, the UI, social media posts, and so on. Do not use acronyms specific to or coined by New Relic externally; only use industry-recognized acronyms such as APM externally. Important Find capitalization guidelines for user types and product editions. Trademark guidelines New Relic trademarks are adjectives (brand names) modifying nouns (the generic product type). Do not make New Relic or its platform, capabilities, and so on possessive using apostrophes. For example, use the “functionality of New Relic One” or “the New Relic One functionality” instead of “New Relic One's functionality.” Do not pluralize New Relic or its platform, capabilities, and so on. Do not abbreviate or combine New Relic or its platform, capabilities, and so on. Do not hyphenate New Relic or its platform, capabilities, and so on, and do not allow them to break across a page line when used in text. Avoid confusion by clearly separating and identifying New Relic trademarks from other companies' names and/or trademarks. These trademark guidelines apply to other companies' trademarked names and products too. When to use title case You must use title case for our trademarked names including our company name plus our product/platform name and a few capability and integration names. Name What it is Use this Not this New Relic* our company First use: New Relic, Inc. (corporation/entity), New Relic® (printed assets), or New Relic (digital assets) Subsequent uses: New Relic, our company, we, or it Do not use: New Relic's, new relic, New relic, NR, their New Relic One our product/ our platform First use: New Relic One (docs, UI, titles) or New Relic One observability platform (marketing content) Subsequent uses: New Relic One or New Relic One platform Note: New Relic One observability platform is recommended for marketing content where users might not be familiar with our product. Do not use: New Relic One's, New Relic one, NR1 FutureStack* { Future } Stack* our annual user group conference First use: FutureStack® or { Future } Stack® (printed assets), or FutureStack or { Future } Stack (digital assets) Subsequent uses: FutureStack Do not use: Future Stack, Futurestack, Future stack NerdGraph* our GraphQL API First use: NerdGraph® (printed assets) or NerdGraph (digital assets) Subsequent uses: NerdGraph Do not use: Nerd Graph, Nerdgraph, nerdgraph, nerd graph Nerdlet* component of New Relic One apps; a specific UI view represented by a React JavaScript package First use: Nerdlet® (printed assets) or Nerdlet (digital assets) Subsequent uses: Nerdlet Do not use: nerdlet, NerdLet Nerdpack* component of New Relic One apps; the package containing all the files needed by that app First use: Nerdpack® (printed assets) or Nerdpack (digital assets) Subsequent uses: Nerdpack Do not use: nerdpack, NerdPack, Nerd Pack, nerd pack NerdStorage* component of New Relic One apps; used to store and retrieve simple sets of data First use: NerdStorage® (printed assets) or NerdStorage (digital assets) Subsequent uses: NerdStorage Do not use: Nerdstorage, nerdstorage, Nerd Storage, Nerd storage, nerd storage New Relic CodeStream IDE extension that integrates with New Relic One New Relic CodeStream (for the New Relic integration with CodeStream) or CodeStream (for just the CodeStream app) Do not use: New Relic CodeStream's, New Relic Code Stream, Code Stream New Relic Explorer capability of New Relic One First use: New Relic Explorer Subsequent uses: New Relic Explorer Describing actions in the UI: Explorer Do not use: New Relic Explorer's, Explorer (okay when directing what to select in the UI), explorer New Relic Infinite Tracing* our fully-managed, tail-based, distributed tracing solution First use: New Relic Infinite Tracing® (printed assets) or New Relic Infinite Tracing (digital assets) Subsequent uses: Infinite Tracing Do not use: Infinite tracing, infinite tracing, New Relic Edge with Infinite Tracing New Relic Instant Observability ecosystem of quickstarts for New Relic One First use: New Relic Instant Observability or New Relic Instant Observability (I/O) Subsequent uses: Instant Observability or New Relic I/O (avoid using the acronym externally, if possible) Do not use: New Relic instant observability, instant observability, NRIO, IO, I/O New Relic Lookout capability of New Relic One First use: New Relic Lookout Subsequent uses: New Relic Lookout Describing actions in the UI: Lookout Do not use: New Relic Lookout's, Lookout (okay when directing what to select in the UI), lookout New Relic Navigator capability of New Relic One First use: New Relic Navigator Subsequent uses: New Relic Navigator Describing actions in the UI: Navigator Do not use: New Relic Navigator's, Navigator (okay when directing what to select in the UI), navigator * Trademarked Examples New Relic is a registered trademark of New Relic, Inc. It was founded in 2008. We call our employees Relics. The New Relic support team can answer all of your questions about New Relic One. They're happy to help. The New Relic One observability platform lets you ingest data from practically any source. New Relic One gives you access to our curated UI experiences like application performance monitoring, browser monitoring, mobile monitoring, and more. Optimize code performance and feature planning with access to telemetry data from production and pre-production environments directly in your IDE via the New Relic CodeStream integration. New Relic Edge with Infinite Tracing is a fully managed, cloud-based solution. Infinite Tracing can analyze 100% of your trace data and choose the most actionable data. What not to capitalize Do not capitalize our capability and feature names (what you get with our platform) unless they begin a sentence (and then only capitalize the first word) or are included in the table above. If a capability or feature name includes the name of a trademarked product, then only capitalize the trademarked name (for example, Pixie or Kubernetes). Feature and capability defined: A feature is an individual experience or element of functionality in the New Relic One platform or a New Relic One capability. A capability is a collection of features that enable a customer to achieve a use case. A capability is considered a superset of features and often tends to be an outside-in term that customers associate with an existing category such as application performance monitoring, applied intelligence, infrastructure monitoring, and log management. In other words, capabilities are the things we'd treat as SKUs if we sold them all separately. Notes about features and capabilities: These are largely internal terms used so that we can discuss New Relic and its structure more clearly. For public resources, we should attempt to avoid these terms and their distinctions and simply talk about how something works. Note that this use of “capability” is different from how we define “capability” in the user management space. View a diagram of the relationship between our product, features, and capabilities. Name What it is Use this Not this alerts capability of New Relic One; detection and notification of issues alerts Do not use: Alerts anomaly detection feature of the applied intelligence capability in New Relic One that helps distinguish between typical and atypical system performance anomaly detection Do not use: Anomaly Detection, Anomaly detection application performance monitoring capability of New Relic One; using real-time data to track the uptime and reliability of an application First use: application performance monitoring (APM) Subsequent uses: application performance monitoring, APM, or application monitoring Do not use: Application Performance Management, Application Performance Monitoring, Application Monitoring applied intelligence capability of New Relic One; our AIOps solution; machine learning engine that reduces alert noise, correlates incidents, and automatically detects anomalies applied intelligence Do not use: Applied Intelligence, Applied intelligence, AI, AIOps automap feature of New Relic One; automatically displays relationships between entities in topology view automap Do not use: auto map, Auto Map, Auto map auto-telemetry with Pixie Pixie integration with New Relic One First use: auto-telemetry with Pixie Subsequent uses: auto-telemetry with Pixie, the Pixie integration with New Relic One, our Pixie integration, or the integration with Pixie Do not use: Pixie (okay if referring to px.dev and the open-source Pixie project), Pixie's, Auto-telemetry with Pixie browser monitoring capability of New Relic One; our real-user monitoring (RUM) solution along with mobile monitoring browser monitoring Do not use: Browser Monitoring, Browser monitoring containers a method to package software for deployment containers Do not use: Containers dashboards capability of New Relic One that uses NRQL to build custom visualizations dashboards Do not use: Dashboards data explorer feature of New Relic One; navigating data in New Relic One without NRQL know-how data explorer Do not use: Data Explorer, Data explorer data ingest bringing metrics, events, logs, and traces (MELT) data into New Relic One data ingest Do not use: Data Ingest, Data ingest digital experience monitoring a combo of New Relic One front-end monitoring capabilities (browser, mobile, synthetics) First use: digital experience monitoring (DEM) Subsequent uses: digital experience monitoring or DEM Do not use: Digital Experience Monitoring, Digital experience monitoring, digital monitoring distributed tracing feature of New Relic One; a solution for observing service requests as they flow through a distributed system distributed tracing Do not use: Distributed Tracing, Distributed tracing errors inbox capability of New Relic One; our error tracking solution for triaging and resolving full-stack errors errors inbox Do not use: Errors Inbox, Errors inbox event correlation feature of the applied intelligence capability in New Relic One that automatically groups alerts to decrease noise event correlation Do not use: Event Correlation, Event correlation incident intelligence feature of the applied intelligence capability in new Relic One that correlates incidents and offers suggested responders incident intelligence Do not use: Incident Intelligence, Incident intelligence infrastructure monitoring capability of New Relic One that collects performance data on hosts and servers (IT infrastructure) to understand health First use: infrastructure monitoring Subsequent uses: infrastructure monitoring, infra monitoring, or infra (for space-constrained areas only) Do not use: Infrastructure Monitoring, Infrastructure monitoring Kubernetes cluster explorer feature of the Kubernetes monitoring capability that ​provides a multi-dimensional representation of a Kubernetes cluster and enables teams to drill down into Kubernetes data Kubernetes cluster explorer Do not use: Kubernetes Cluster Explorer, kubernetes cluster explorer Kubernetes monitoring capability of New Relic One; form of reporting that helps with proactive management of clusters Kubernetes monitoring Do not use: Kubernetes Monitoring, kubernetes monitoring microservices modern application architecture (vs. monolith) microservices Do not use: micro services, Micro Services, Microservices integrations solutions that integrate with/gather data from third parties; all our integrations can be found as quickstarts in New Relic Instant Observability integrations Do not use: Integrations log management capability of New Relic One; collecting, formatting, and analyzing log data to optimize systems First use: log management Subsequent uses: log management or logs Do not use: Log Management, Log management, Logs logs in context feature of the log management capability in New Relic One; tracing logs throughout a complex service logs in context Do not use: Logs in Context, Logs in context metrics, events, logs, and traces what you monitor for full-stack observability First use: metrics, events, logs, and traces or metrics, events, logs, and traces (MELT) Subsequent uses: metrics, events, logs, and traces or MELT Do not use: Metrics, Events, Logs, and Traces mobile monitoring capability of New Relic One; our RUM solution along with browser monitoring mobile monitoring Do not use: Mobile Monitoring, Mobile monitoring model performance monitoring capability of New Relic One; our solution for MLOps; observability for machine learning (ML) models in production model performance monitoring Do not use: Model Performance Monitoring, Model performance monitoring, ML model monitoring, ML model performance monitoring, MPM network performance monitoring capability of New Relic One; understanding how a network is experienced by users First use: network performance monitoring or network performance monitoring (NPM) Subsequent uses: network performance monitoring, NPM, or network monitoring Do not use: Network Performance Monitoring, Network performance monitoring, Network Monitoring, Network monitoring observability methodology for understanding a complex system First use: observability or observability (o11y) Subsequent uses: observability, o11y, full-stack observability, or end-to-end observability Do not use: Observability, O11y, Full-Stack Observability, Full-stack Observability, Full-stack observability query, queries, querying feature of New Relic One; NRQL- or Prom-QL-style way of asking bespoke questions of data query, queries, or querying Do not use: Query, Queries, Querying query builder feature of New Relic One; previously known as chart builder query builder Do not use: Query Builder, Query builder quickstarts feature of New Relic Instant Observability; pre-built open-source integrations that include dashboards and alerts quickstarts Do not use: quick starts, Quick Starts, QuickStarts, Quickstarts serverless monitoring capability of New Relic One for Lambda and serverless functions serverless monitoring Do not use: Serverless Monitoring, Serverless monitoring service maps feature of New Relic One; visual representation of a service service maps Do not use: Service Maps, Service maps synthetic monitoring capability of New Relic One; simulating users across geographies to identify bottlenecks or experience issues; aka synthetic tests for APIs or browsers First use: synthetic monitoring Subsequent uses: synthetic monitoring or synthetics or synthetic monitors Do not use: synthetics monitoring, Synthetic Monitoring, Synthetic monitoring If you don't see a feature or capability in one of the above tables, assume that it is not capitalized. Examples Application performance monitoring (APM) helps you instantly understand application performance, dependencies, and bottlenecks. APM gives you a complete view of your applications and operating environment. Covered entities can now send application, infrastructure, digital experience, and network monitoring data to New Relic One while maintaining HIPAA compliance. When you need to correlate log data with other telemetry data, enable logs in context in New Relic One. NRQL is a query language you can use to query the New Relic database. With a quickstart, you can quickly install dashboards, alerts, and other resources. What to avoid Avoid the use of our deprecated names such as old product SKUs. Name What it is Use this Not this Applied Intelligence formerly a separate product—now a capability of New Relic One applied intelligence Do not use: Applied Intelligence, AI, AIOps Full-Stack Observability formerly a separate product—now in lowercase, it describes an outcome of using New Relic One full-stack observability Do not use: Full-Stack Observability, Full-stack Observability, Full Stack Observability, full stack observability, FSO Telemetry Data Platform formerly a separate product—now part of New Relic One telemetry data platform (avoid this term altogether when possible) Do not use: Telemetry Data Platform, Telemetry data platform, TDP Examples Engineers can use applied intelligence to detect, diagnose, and mitigate problems more quickly and easily. A set of dashboards with data from all New Relic products gives you full-stack observability of your metrics, events, logs, and traces. Copyright and trademark notices Downloadable or printable documents that are available to the public—including customer-, partner-, and reseller-facing documents—require a copyright disclaimer in the footer for all registered and unregistered trademarks used within the document. In any instance where the registration marks are not used in downloadable/printable documents, include the following statement in the copyright area of the footer: © 2008-22 New Relic, Inc. All rights reserved. New Relic and the New Relic logo are registered trademarks of New Relic, Inc. All product and company names herein may be trademarks of their registered owners. Update the copyright year to reflect the current year. For purely internal documents, neither the copyright or the trademark notices are required because we are not publishing the documents or putting third parties on notice. Instead, add the following disclaimer to the footer: New Relic confidential; for internal use only You should also add the word “internal” to the file name. Relationships between products, features, and capabilities This is not an exhaustive diagram, but it provides a model for how our features and capabilities fit together into our product.", + "body": "We love open-source software, and we use the following with CodeStream. Thank you, open-source community, for making these fine tools! Some of these are listed under multiple software licenses, and in that case we have listed the license we've chosen to use. CodeStream license on GitHub CodeStream's third-party software notices on GitHub", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 64.19933, + "_score": 63.995792, "_version": null, "_explanation": null, "sort": null, "highlight": { - "body": " CodeStream IDE extension that integrates with New Relic One New Relic CodeStream (for the New Relic integration with CodeStream) or CodeStream (for just the CodeStream app) Do not use: New Relic CodeStream's, New Relic Code Stream, Code Stream New Relic Explorer capability of New Relic One First use: New" + "title": "CodeStream licenses", + "sections": "CodeStream licenses", + "tags": "CodeStream", + "body": "We love open-source software, and we use the following with CodeStream. Thank you, open-source community, for making these fine tools! Some of these are listed under multiple software licenses, and in that case we have listed the license we've chosen to use. CodeStream license on GitHub CodeStream's third-party software notices on GitHub" }, - "id": "61fd071728ccbc7880c0c1b5" + "id": "617440e2196a677ea62f0193" } ], "/consul/b65825cc-faee-47b5-8d7c-6d60d6ab3c59": [ @@ -22149,7 +22127,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 85.16167, + "_score": 80.60483, "_version": null, "_explanation": null, "sort": null, @@ -22191,7 +22169,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 71.07054, + "_score": 68.37395, "_version": null, "_explanation": null, "sort": null, @@ -22248,7 +22226,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 60.36673, + "_score": 59.388138, "_version": null, "_explanation": null, "sort": null, @@ -22297,7 +22275,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 59.39962, + "_score": 56.21035, "_version": null, "_explanation": null, "sort": null, @@ -22332,7 +22310,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 59.39747, + "_score": 56.20861, "_version": null, "_explanation": null, "sort": null, @@ -22378,7 +22356,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 82.06964, + "_score": 76.61319, "_version": null, "_explanation": null, "sort": null, @@ -22427,7 +22405,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.643425, + "_score": 76.28016, "_version": null, "_explanation": null, "sort": null, @@ -22475,7 +22453,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.63191, + "_score": 76.27115, "_version": null, "_explanation": null, "sort": null, @@ -22517,7 +22495,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.62691, + "_score": 76.267235, "_version": null, "_explanation": null, "sort": null, @@ -22560,7 +22538,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.5959, + "_score": 76.24298, "_version": null, "_explanation": null, "sort": null, @@ -22605,7 +22583,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 110.59576, + "_score": 102.69997, "_version": null, "_explanation": null, "sort": null, @@ -22647,7 +22625,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 109.75423, + "_score": 101.91959, "_version": null, "_explanation": null, "sort": null, @@ -22689,7 +22667,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 109.746796, + "_score": 101.91382, "_version": null, "_explanation": null, "sort": null, @@ -22731,7 +22709,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 105.93259, + "_score": 98.52919, "_version": null, "_explanation": null, "sort": null, @@ -22774,7 +22752,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 101.1855, + "_score": 95.34475, "_version": null, "_explanation": null, "sort": null, @@ -22812,7 +22790,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 89.10144, + "_score": 82.97359, "_version": null, "_explanation": null, "sort": null, @@ -22840,7 +22818,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.91729, + "_score": 81.52136, "_version": null, "_explanation": null, "sort": null, @@ -22873,7 +22851,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 74.78172, + "_score": 74.35149, "_version": null, "_explanation": null, "sort": null, @@ -22914,7 +22892,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 71.027794, + "_score": 71.01109, "_version": null, "_explanation": null, "sort": null, @@ -22950,7 +22928,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 59.388878, + "_score": 59.5029, "_version": null, "_explanation": null, "sort": null, @@ -22994,7 +22972,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 110.59576, + "_score": 102.69989, "_version": null, "_explanation": null, "sort": null, @@ -23036,7 +23014,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 109.75423, + "_score": 101.91952, "_version": null, "_explanation": null, "sort": null, @@ -23078,7 +23056,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 109.746796, + "_score": 101.91374, "_version": null, "_explanation": null, "sort": null, @@ -23120,7 +23098,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 105.93259, + "_score": 98.52911, "_version": null, "_explanation": null, "sort": null, @@ -23163,7 +23141,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 101.1855, + "_score": 95.344696, "_version": null, "_explanation": null, "sort": null, @@ -23199,7 +23177,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 13.423264, + "_score": 13.77377, "_version": null, "_explanation": null, "sort": null, @@ -23235,7 +23213,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 301.65002, + "_score": 283.12827, "_version": null, "_explanation": null, "sort": null, @@ -23271,7 +23249,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 291.86832, + "_score": 274.42047, "_version": null, "_explanation": null, "sort": null, @@ -23317,7 +23295,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 287.31323, + "_score": 266.0613, "_version": null, "_explanation": null, "sort": null, @@ -23356,7 +23334,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 285.95203, + "_score": 264.79092, "_version": null, "_explanation": null, "sort": null, @@ -23402,7 +23380,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 284.11005, + "_score": 263.1209, "_version": null, "_explanation": null, "sort": null, @@ -23447,7 +23425,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.12862, + "_score": 92.36895, "_version": null, "_explanation": null, "sort": null, @@ -23488,7 +23466,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -23529,7 +23507,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -23570,7 +23548,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -23611,7 +23589,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -23654,7 +23632,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.12862, + "_score": 92.36895, "_version": null, "_explanation": null, "sort": null, @@ -23695,7 +23673,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -23736,7 +23714,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -23777,7 +23755,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -23818,7 +23796,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -23861,7 +23839,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.12862, + "_score": 92.36895, "_version": null, "_explanation": null, "sort": null, @@ -23902,7 +23880,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -23943,7 +23921,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -23984,7 +23962,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -24025,7 +24003,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -24060,7 +24038,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 333.56525, + "_score": 313.2794, "_version": null, "_explanation": null, "sort": null, @@ -24100,7 +24078,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 319.93628, + "_score": 300.44778, "_version": null, "_explanation": null, "sort": null, @@ -24136,7 +24114,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 291.84717, + "_score": 274.43045, "_version": null, "_explanation": null, "sort": null, @@ -24182,7 +24160,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 287.28598, + "_score": 266.06723, "_version": null, "_explanation": null, "sort": null, @@ -24221,7 +24199,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 285.92676, + "_score": 264.79858, "_version": null, "_explanation": null, "sort": null, @@ -24258,7 +24236,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 301.65027, + "_score": 283.12827, "_version": null, "_explanation": null, "sort": null, @@ -24294,7 +24272,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 291.86853, + "_score": 274.42047, "_version": null, "_explanation": null, "sort": null, @@ -24340,7 +24318,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 287.31348, + "_score": 266.0613, "_version": null, "_explanation": null, "sort": null, @@ -24379,7 +24357,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 285.9523, + "_score": 264.79092, "_version": null, "_explanation": null, "sort": null, @@ -24425,7 +24403,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 284.1103, + "_score": 263.1209, "_version": null, "_explanation": null, "sort": null, @@ -24465,7 +24443,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 245.43628, + "_score": 232.31778, "_version": null, "_explanation": null, "sort": null, @@ -24488,7 +24466,7 @@ "3. Triage your errors", "4. Manage your triaged errors" ], - "published_at": "2022-02-15T01:40:34Z", + "published_at": "2022-02-16T01:43:31Z", "title": "Resolve Errors Faster with Full Stack Error Tracking", "updated_at": "2022-02-12T01:48:33Z", "type": "developer", @@ -24499,7 +24477,7 @@ "body": "You're one of the developers of an eCommerce website called Geek's Movie Shop, and recently, you introduced some new features. Before you push your changes to production where all your users will have access to them, you want to discover as many errors as you can in your development environment. Then you can decide which ones to fix and which ones to ignore. Errors Inbox is the perfect tool to help you do this. Learning Objectives In this lab, you: Spin up Geek's Movie Shop in your development environment Set up a workload for Errors Inbox Resolve and ignore errors in your inbox Assign unresolved errors Filter errors in your inbox by status Integrate Errors Inbox with Jira, CodeStream, or Slack Requirements Create a free New Relic account Install Docker Procedures 1. Spin up your application Set up your your environment to deploy Geek's Movie Shop. 5 min 2. Set up Errors Inbox Set up Errors Inbox in New Relic 5 min 3. Triage your errors Track and triage errors across your stack with Errors Inbox 5 min 4. Manage your triaged errors Managed your triaged errors in Errors Inbox 5 min", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 210.78061, + "_score": 200.53262, "_version": null, "_explanation": null, "sort": null, @@ -24547,7 +24525,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 162.52324, + "_score": 159.9733, "_version": null, "_explanation": null, "sort": null, @@ -24582,7 +24560,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 156.96213, + "_score": 154.99075, "_version": null, "_explanation": null, "sort": null, @@ -24594,37 +24572,38 @@ "id": "617cbd54e7b9d28f12c0535e" }, { - "image": "https://docs.newrelic.com/static/595e873b1bfe20aa5d39a66adf18d0c9/f96db/RequestFeedback.png", - "url": "https://docs.newrelic.com/docs/codestream/how-use-codestream/request-feedback/", + "image": "https://docs.newrelic.com/static/3c5d34598b67191429a2a95f8f7b1895/c1b63/error-ide.png", + "url": "https://docs.newrelic.com/docs/codestream/start-here/what-is-codestream/", "sections": [ - "Request feedback on CodeStream", - "Request feedback", - "Tip", - "Provide feedback", - "Comments and change requests", - "Add more code changes" + "Intro to New Relic CodeStream", + "Preview release", + "Discuss code just like commenting on a Google Doc", + "Get feedback on work-in-progress with pre-PR code review", + "Create and review pull requests", + "Monitor your code’s performance in production", + "See your errors and what's causing them" ], - "published_at": "2022-02-14T04:37:01Z", - "title": "Request feedback on CodeStream", - "updated_at": "2022-02-14T04:37:01Z", + "published_at": "2022-02-14T04:56:00Z", + "title": "Intro to New Relic CodeStream", + "updated_at": "2021-12-15T01:41:52Z", "type": "docs", - "external_id": "752fa4dd9516d616b763d1552836fc30a93ccc7b", + "external_id": "0b3f4199050df98161ce8c46259a8bad30269d72", "document_type": "page", "popularity": 1, - "body": "New Relic CodeStream's feedback requests are powerful enough to use for traditional end-of-cycle code reviews, but at the same time they're so easy and flexible that you can use them throughout the development process to get quick feedback on work-in-progress. You can even use feedback requests for your uncommitted changes. Traditional code review happens at the end of the development cycle, when you’re looking to get the changes merged. Not only are end-of-cycle code reviews much more burdensome on your teammates, but you run the risk of identifying issues so late in the game that you end up having to decide between blowing up your schedule or taking on technical debt. Whether you’re at the beginning of a project, with just some stubbed out functions, are mid-way through a work in progress, or are ready for a final review of a finished project, CodeStream enables feedback at any point during the development cycle. CodeStream handles the complexity of sharing your current status, including pushed commits, local commits, and staged and saved changes. Your teammates can provide feedback from their IDE, with no need to switch applications, and no need to switch branches or pull changes. By the time you get to the formal code review/pull request at the end of the development cycle, it’s far less painful and more of a formality because issues have been raised, discussed, and resolved all along the way. Request feedback To request feedback at any time, regardless of the current state of your work, click the + Create button at the top of the CodeStream pane, or the + Request Feedback button in the header of the Feedback Requests section. You can also use a keyboard shortcut (ctlr+shift+/ r or ctrl+/ r on a Mac). With a single click you can name the feedback request based on the last commit message, the branch name, or, if you started work by selecting a ticket, the ticket title. CodeStream assumes that you are requesting feedback on changes in the repository/branch of the file you've currently selected in your editor. If you have multiple repositories open in your IDE, you can change this via the repository dropdown at the very top of the feedback request form. Depending on your organization's settings, CodeStream may suggest specific reviewers. Based on the commit history of the code being changed, the suggestions may even include someone that isn't yet on your CodeStream team. In that case, they'd be notified by email. Hover over a reviewer’s name to see more details or to remove them. If multiple reviewers are assigned you may also have the option to determine whether any of them can approve the review or if each one has to approve it individually. The Changed Files section lists all of the files that have been added, removed, or modified. Click any file to view a diff if you want to review your changes before submitting the feedback request. If you have a file that’s not suitable for review, such as a checked-in binary file, you can hover over any file and click the x to exclude that file from the feedback request. That file will be moved to a list below the form. New files are, by default, excluded from the feedback request, but you can hover over their entry in the list and click + to add them. Hover over an excluded file and click the trashcan to permanently exclude it from all future feedback requests. Permanently excluding files creates a .codestreamignore file in the repository. If you think your teammates will also want to exclude these files (for example, a package-lock.json or other system-generated file), you can commit and push the file so that they can make use of it as well. The changes represented across the selected files are broken out into four different categories, allowing you to select exactly what you would like to include in the feedback request. This includes changes that haven't been pushed, or even committed. The four categories are: Saved changes Staged changes Local commits Pushed commits Commits are listed in descending order across the Local Commits and Pushed Commits sections. If you uncheck the box for a commit, it will automatically uncheck the boxes for all of its preceding commits. In other words, the commits included in the feedback request must be consecutive. Only your commits are checked by default, but you can include any of them in your review. Tip Make sure the email address in your git configuration matches your CodeStream email address. Or set up a blame map to map your git email address to you CodeStream email address. Optionally, you can share your feedback request out to either Slack or Microsoft Teams. When you submit your feedback request, your teammates will be notified via the activity feed, with anyone assigned as a reviewer being @mentioned so that they’ll also receive an email notification. Provide feedback The best part of CodeStream's feedback requests is that having your teammates look over your code doesn't put any extra burden on them. There's no need for them to set aside their own work to switch branches or pull changes and no need to for them to leave their IDE. As long as they have the appropriate repository, they can open the feedback request and start reviewing your changes. Click any file in the Changed Files section to review the changes. The changes are presented with a diff in your editor. You can step through the changes in the file using your IDE's native navigation or click the up/down arrows at the top of your IDE. For JetBrains IDEs, CodeStream only supports the side-by-side diff viewer. Typically, the diff will represent the changes in the branch associated with the feedback request (such as, a feature/topic branch) against the base branch, at the point at which the feature branch was created. With CodeStream diffs this may not always be the case, because the developer may not have included all of their changes in the feedback request. As a result, the version of the files that the changes are being diff’ed against may, in fact, also include changes that aren’t in the base branch. This is important in order to provide continuity. Comments and change requests If you have a general comment about the changes, add a reply to the feedback request's thread. If you want to comment on the actual changes, select some code from the right side of the diff and then click the comment button that appears in the CodeStream pane next to your selection. You can also use a keyboard shortcut (ctlr+shift+/ c or ctrl+/ c on a Mac) after selecting some code. Since you have the full file context, you aren’t limited to commenting on just the lines of code that were changed. For example, you might notice another part of the file that needs work as well or that you simply want to reference. Whether it’s a general comment or a comment on code, you can mark it as a change request to let the developer know that it’s required before you’ll approve the changes. While you're providing feedback, you can even comment on files that aren't part of the changeset and they'll get added as a reply to the review. This is helpful to be able to point your teammate to another location in the codebase that might need improving. All of the change requests associated with the the feedback request are summarized in a section at the top, in addition to being part of the discussion thread. This is where they'll get marked complete when the work is done. Look for the green and red buttons at the top to either approve the changes or request additional changes. If there are any open change requests, the approve button will be replaced by a blue button that shows the number. You can still approve the changes, but we wanted to make sure you were aware of the outstanding work. ![A screenshot showing outstanding changes] (/images/ApproveWithChgReqs3.png \"Outstanding changes) When there are multiple reviewers, and an approval is required from each, CodeStream makes it very clear when there are still outstanding approvals. The blue button at the top right shows how many approvals are outstanding. The green thumbs up on the headshots of reviewers indicates that they've already approved your changes. Add more code changes A typical workflow involves the reviewer leaving some comments or suggesting some changes and then the developer responding to that feedback with more changes to the code. To continue the process, click the blue Amend button to add your changes. Similar to when you originally submitted the feedback request, you can choose from your saved and staged changes and your local and pushed commits. Any open change requests are also listed so you can mark off any that are addressed by your update. By default, when the reviewer goes back into the feedback request, they’ll be looking at the complete changeset (such as, changes across all updates) as they go through the diffs for each file. They can also view the diffs for any individual update. The feedback review process can continue across as many updates as needed to get to the final approval of your changes. Once the feedback request has been approved, you can create a pull request from within CodeStream to get your code merged. Tip The feedback request can't be amended or reopened once a pull request has been created.", + "body": "New Relic CodeStream is a developer collaboration platform that enables your development team to discuss and review code in a natural and contextual way. CodeStream not only makes your discussions easier, by allowing them to happen in context in your IDE, but it also preserves the institutional knowledge that is currently being lost in Slack channels and emails. Not only that, our observability solutions take you from finding errors to fixing them, all within your IDE. A quick overview of how you can use New Relic CodeStream to discover, troubleshoot, and triage errors in your IDE. (2:27) If you haven't already, sign up for a free New Relic account so that you can get the most out of New Relic CodeStream. Preview release CodeStream's integration with New Relic One is a preview release limited to New Relic One accounts on our US data center, and your use is subject to the pre-release policy. (This does not apply to all other CodeStream functionality.) Discuss code just like commenting on a Google Doc Simply select a block of code and type your question or comment. Teammates can participate in the discussion right from their IDE and you can optionally share the discussion on Slack or Microsoft Teams so teammates can participate from their chat clients as well. Select some code and then click the add comment button. CodeStream turns conversation into documentation by capturing all of the discussion about your code and saving it with your code. And the real magic is that the discussions are automatically repositioned as your code changes, even across branches. All with zero effort on your part. Get feedback on work-in-progress with pre-PR code review CodeStream's lightweight feedback requests let you have someone look over your changes regardless of the current state of your repo, without the friction of committing, pushing, or issuing a pull request. Once you've made some changes to a file, in the Feedback requests section, click the + button to request feedback on that change. Your teammates can review your changes right in their IDE, with full file context, and with no need to set aside their current work to switch branches or pull the latest. Use code comments to respond to a feedback request on a change. CodeStream’s feedback requests are so easy that you can start doing them throughout the development process instead of waiting until the end. You’re a few days into a sprint and have some work stubbed out? Maybe some work that hasn’t even been committed? Request feedback on your work in progress so that you can identify and resolve issues early instead of saving those gotchas for when you need to get the code merged. Create and review pull requests For most development teams, the final step in the development process is a pull request. Even if your team has decided to use CodeStream's feedback requests as a replacement for, and not just a precursor to, your end-of-cycle PR-based code reviews, you can create and review pull requests right inside your IDE. CodeStream shows a diff view of all the files changed in a PR. Review and approve the PR as you would on GitHub. Monitor your code’s performance in production Your pursuit of software quality doesn’t end once the code has been merged. Connect CodeStream to your New Relic One account and you can either jump from an error on New Relic One into your IDE or you can discover errors in CodeStream's Observability section. Navigate the stack trace to find the offending code and collaborate with your teammates to resolve the issue. Once you've connected New Relic CodeStream to your repositories and are observing your code's performance, use the observability section to find errors and collaborate with your team on solving them. See your errors and what's causing them After you connect CodeStream and New Relic, use workloads and errors inbox to jump to the offending code in your IDE. Once you've connected CodeStream to your repositories and configured it to connect with New Relic One, you can use errors inbox to find and an error and then jump to that error in your IDE and the branch of your repository that's generating the error.", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 141.6868, + "_score": 135.90573, "_version": null, "_explanation": null, "sort": null, "highlight": { - "title": "Request feedback on CodeStream", - "sections": "Request feedback on CodeStream", - "body": " during the development cycle. CodeStream handles the complexity of sharing your current status, including pushed commits, local commits, and staged and saved changes. Your teammates can provide feedback from their IDE, with no need to switch applications, and no need to switch branches or pull" + "title": "Intro to New Relic CodeStream", + "sections": "Intro to New Relic CodeStream", + "body": ". Once you've connected CodeStream to your repositories and configured it to connect with New Relic One, you can use errors inbox to find and an error and then jump to that error in your IDE and the branch of your repository that's generating the error." }, - "id": "6174403d28ccbc9b20c6cca0" + "id": "617440e3e7b9d2836c13c43c" } ], "/gcp-cloud-run/161abdd1-8d62-49c7-84cf-3b3f3abb59ef": [ @@ -24651,7 +24630,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 323.27817, + "_score": 303.6427, "_version": null, "_explanation": null, "sort": null, @@ -24687,7 +24666,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 291.84695, + "_score": 274.43045, "_version": null, "_explanation": null, "sort": null, @@ -24733,7 +24712,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 287.2857, + "_score": 266.06723, "_version": null, "_explanation": null, "sort": null, @@ -24772,7 +24751,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 285.92648, + "_score": 264.79858, "_version": null, "_explanation": null, "sort": null, @@ -24818,7 +24797,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 284.08276, + "_score": 263.12634, "_version": null, "_explanation": null, "sort": null, @@ -24855,7 +24834,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 301.65027, + "_score": 283.12827, "_version": null, "_explanation": null, "sort": null, @@ -24891,7 +24870,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 291.86853, + "_score": 274.42047, "_version": null, "_explanation": null, "sort": null, @@ -24937,7 +24916,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 287.31348, + "_score": 266.0613, "_version": null, "_explanation": null, "sort": null, @@ -24976,7 +24955,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 285.9523, + "_score": 264.79092, "_version": null, "_explanation": null, "sort": null, @@ -25022,7 +25001,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 284.1103, + "_score": 263.1209, "_version": null, "_explanation": null, "sort": null, @@ -25067,7 +25046,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.12862, + "_score": 92.36895, "_version": null, "_explanation": null, "sort": null, @@ -25108,7 +25087,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -25149,7 +25128,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -25190,7 +25169,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -25231,7 +25210,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -25266,7 +25245,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 301.65027, + "_score": 283.12827, "_version": null, "_explanation": null, "sort": null, @@ -25302,7 +25281,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 291.86853, + "_score": 274.42047, "_version": null, "_explanation": null, "sort": null, @@ -25348,7 +25327,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 287.31348, + "_score": 266.0613, "_version": null, "_explanation": null, "sort": null, @@ -25387,7 +25366,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 285.9523, + "_score": 264.79092, "_version": null, "_explanation": null, "sort": null, @@ -25433,7 +25412,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 284.1103, + "_score": 263.1209, "_version": null, "_explanation": null, "sort": null, @@ -25478,7 +25457,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.12862, + "_score": 92.36895, "_version": null, "_explanation": null, "sort": null, @@ -25519,7 +25498,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -25560,7 +25539,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -25601,7 +25580,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -25642,7 +25621,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -25685,7 +25664,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.12862, + "_score": 92.36895, "_version": null, "_explanation": null, "sort": null, @@ -25726,7 +25705,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -25767,7 +25746,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -25808,7 +25787,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -25849,7 +25828,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -25896,7 +25875,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.30765, + "_score": 168.5256, "_version": null, "_explanation": null, "sort": null, @@ -25947,7 +25926,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.30219, + "_score": 168.52115, "_version": null, "_explanation": null, "sort": null, @@ -26005,7 +25984,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 145.08542, + "_score": 142.9581, "_version": null, "_explanation": null, "sort": null, @@ -26059,7 +26038,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 144.11206, + "_score": 136.87427, "_version": null, "_explanation": null, "sort": null, @@ -26100,7 +26079,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 143.57901, + "_score": 136.43945, "_version": null, "_explanation": null, "sort": null, @@ -26142,7 +26121,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 170.66867, + "_score": 167.69897, "_version": null, "_explanation": null, "sort": null, @@ -26180,7 +26159,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 125.04024, + "_score": 125.053764, "_version": null, "_explanation": null, "sort": null, @@ -26220,7 +26199,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 124.82577, + "_score": 124.92693, "_version": null, "_explanation": null, "sort": null, @@ -26257,7 +26236,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 116.15205, + "_score": 116.225136, "_version": null, "_explanation": null, "sort": null, @@ -26292,7 +26271,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 111.488815, + "_score": 111.64672, "_version": null, "_explanation": null, "sort": null, @@ -26340,7 +26319,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 129.47281, + "_score": 128.03642, "_version": null, "_explanation": null, "sort": null, @@ -26379,7 +26358,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 121.09124, + "_score": 120.536, "_version": null, "_explanation": null, "sort": null, @@ -26410,7 +26389,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 118.92081, + "_score": 118.37877, "_version": null, "_explanation": null, "sort": null, @@ -26453,7 +26432,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 117.15877, + "_score": 116.63826, "_version": null, "_explanation": null, "sort": null, @@ -26467,54 +26446,47 @@ }, { "sections": [ - "Install the Kubernetes integration using Helm", - "Compatibility and requirements", - "Install Kubernetes integration with Helm", - "Installing and configuring nri-bundle with Helm", - "Tip", - "Install with Helm 2 and nri-bundle (legacy)", - "Installation instructions for Helm 2", - "Important", - "Helm configuration options", - "Configure the integration", - "Change the scrape interval", - "Upgrade using Helm", - "Monitor services running on Kubernetes", - "Use your Kubernetes data", - "Reduce data ingest", - "New Relic Infrastructure", - "Prometheus OpenMetrics Integration", - "New Relic Logging", - "New Relic Pixie Integration", - "Uninstall Kubernetes integration" + "Set up your Prometheus remote write integration", + "Set up the integration", + "Map Prometheus and New Relic metric types", + "Override metric type mappings", + "Set allow or deny lists for sent metrics", + "Customize remote write behavior", + "X-License Key", + "prometheus_server URL parameter", + "Optimize throughput and memory consumption", + "Troubleshoot error messages", + "Remove the integration" ], - "title": "Install the Kubernetes integration using Helm", + "title": "Set up your Prometheus remote write integration", "type": "docs", "tags": [ - "Installation", - "Kubernetes integration", + "Install and configure remote write", + "Prometheus integrations", "Integrations" ], - "external_id": "29d42af98d41e7e4e7be86f0150254e132cb6b6a", + "external_id": "2b83e518967d4375d0530d239067a0c49c42ad3a", "image": "", - "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/installation/install-kubernetes-integration-using-helm/", - "published_at": "2022-02-15T20:53:53Z", - "updated_at": "2022-02-15T20:53:53Z", + "url": "https://docs.newrelic.com/docs/infrastructure/prometheus-integrations/install-configure-remote-write/set-your-prometheus-remote-write-integration/", + "published_at": "2022-02-14T10:24:51Z", + "updated_at": "2022-01-22T08:48:16Z", "document_type": "page", "popularity": 1, - "body": "Helm is a package manager on top of Kubernetes. It facilitates installation, upgrades, or revision tracking, and it manages dependencies for the services that you install in Kubernetes. To install the integration using Helm, we recommend our Kubernetes automated installer, which will prompt for some configuration options and autopopulate secrets and values for you. Additionally, our automated installer also allows installing our integration as plain manifests rather than a Helm release. See Kubernetes integration: install and configure for more details about how to use our automated installer. Start the installer This page describes in more depth how to install and configure the New Relic integration without using the automated installer. Compatibility and requirements Make sure Helm is installed on your machine. Version 3 of the Kubernetes Integration requires Helm version 3. If you are still using Helm 2, you can still install the legacy version of the integration. To install the Kubernetes integration using Helm, you will need your New Relic account license key and your Kubernetes cluster's name: Find and copy your New Relic license key. Choose a display name for your cluster. For example, you could use the output of: bash Copy $ kubectl config current-context Note these values somewhere safe, as you will need them later during the installation process. Install Kubernetes integration with Helm New Relic has several charts for the different components which offer different features for the platform: newrelic-infrastructure-v3: Contains the main Kubernetes integration and the infrastructure agent. This is the core component for the New Relic Kubernetes experience, responsible for reporting most of the data that is surfaced in the Kubernetes Dashboard and the Kubernetes Cluster Explorer. newrelic-logging: Provides a DaemonSet with New Relic's Fluent Bit output plugin to easily forward your logs to New Relic. nri-kube-events: Collects and reports cluster events (such as kubectl get events) to New Relic. nri-prometheus: New Relic's Prometheus OpenMetrics Integration, automatically scrapes Prometheus endpoints present in the cluser and reports metrics to New Relic. nri-metadata-injection: Sets up a minimal MutatingAdmissionWebhook that injects a couple of environment variables in the containers. These contain metadata about the cluster and New Relic installation and will be later picked up by applications instrumented using APM, allowing to correlate APM and infrastructure data. nri-statsd: New Relic StatsD integration. For convenience, New Relic provides the nri-bundle chart, which pulls a selectable set of the charts mentioned above. nri-bundle can also install Kube State Metrics and Pixie for you if needed. While it is possible to install those charts separately, we strongly recommend using the nri-bundle chart for Kubernetes deployments, as it ensures that values across all the charts are consistent and provides full control over which components are installed, as well as the possibility to configure all of them as Helm dependencies. This is the same chart that is used and referenced by our automated installer. Installing and configuring nri-bundle with Helm Ensure you are using the appropriate context in the machine where you will run Helm and kubectl: You can check the available contexts with: bash Copy $ kubectl config get-contexts And switch to the desired context using: bash Copy $ kubectl config use-context _CONTEXT_NAME_ Add the New Relic Helm charts repo: bash Copy $ helm repo add newrelic https://helm-charts.newrelic.com Create a file named values-newrelic.yaml, which will be used to define your configuration: global: licenseKey: _YOUR_NEW_RELIC_LICENSE_KEY_ cluster: _K8S_CLUSTER_NAME_ prometheus: # Automatically scrape prometheus metrics for annotated services in the cluster # Collecting prometheus metrics for large clusters might impact data usage significantly enabled: true lowDataMode: true # Skip ingesting cluster-level metrics webhook: # Deploy our webhook to link APM and Kubernetes entities enabled: true kubeEvents: # Report Kubernetes events enabled: true logging: # Report logs for containers running in the cluster enabled: true ksm: # Deploy kube-state-metrics in the cluster. # Set this to true unless it is already deployed. enabled: true Copy Make sure everything is configured properly in the chart by running the following command. Notice that we are specifying --dry-run and --debug, so nothing will be installed in this step: bash Copy $ helm upgrade --install newrelic newrelic/nri-bundle \\ > --namespace newrelic --create-namespace \\ > -f values-newrelic.yaml \\ > --devel \\ > --dry-run \\ > --debug Tip By specifying --devel, you will be installing the version 3 of our solution, currently in Beta and scheduled to be generally available during Spring 2022. We strongly encourage you to try it out as it includes significant improvements over the v2. See what's changed. Please notice and adjust the following flags: global.licenseKey=YOUR_NEW_RELIC_LICENSE_KEY: Must be set to a valid License Key for your account. global.cluster=K8S_CLUSTER_NAME: Is used to identify the cluster in the New Relic UI, so should be a descriptive value not used by any other Kubernetes cluster configured in your New Relic account. ksm.enabled=true: Setting this to true will automatically install Kube State Metrics (KSM) for you, which is required for our integration to run. You can set this to false if KSM is already present in your cluster, even if it is on a different namespace. prometheus.enabled=true: Will deploy our Prometheus OpenMetrics integration, which automatically collects data from Prometheus endpoints present in the cluster. webhook.enabled=true: Will install our minimal webhook, which adds environment variables that, in turn, allows linking applications instrumented with New Relic APM to Kubernetes. Our Kubernetes charts have a comprehensive set of flags and tunables that can be edited to better fit your particular needs. Please check the Configure the integration section below to see what can be changed. Install the Kubernetes integration by running the command without --debug and --dry-run: bash Copy $ helm upgrade --install newrelic newrelic/nri-bundle \\ > --namespace newrelic --create-namespace \\ > -f values-newrelic.yaml \\ > --devel Check that pods are being deployed and reach a stable state: bash Copy $ kubectl -n newrelic get pods -w You should see: One newrelic-nrk8s-ksm pod. One newrelic-nrk8s-kubelet pod for each node in your cluster. One newrelic-nrk8s-control-plane pod for each master node in your cluster, if any. One newrelic-kube-state-metrics pod, if you included KSM with our installation. One newrelic-nri-kube-events pod, if you enabled Kubernetes events reporting. One newrelic-nri-prometheus pod, if you enabled the Prometheus integration. One newrelic-newrelic-logging pod for each node in your cluster, if you enabled the Logging integration. Install with Helm 2 and nri-bundle (legacy) Installation instructions for Helm 2 Important The last version the nri-bundle chart supporting Helm 2 is 3.2.11. Please upgrade to Helm 3 to install the latest version. We will be removing support for Helm 2 installations soon. To install using Helm 2: Add the New Relic Helm charts repo: bash Copy $ helm repo add newrelic https://helm-charts.newrelic.com Create a namespace for newrelic: bash Copy $ kubectl create namespace newrelic Make sure everything is configured properly in the chart by running the following command. This step uses the --dry-run and --debug switches and therefore the agent is not installed. bash Copy $ helm upgrade --install newrelic newrelic/nri-bundle \\ > --version 3.2.11 \\ > --dry-run \\ > --debug \\ > --namespace newrelic \\ > --set global.licenseKey=_YOUR_NEW_RELIC_LICENSE_KEY_ \\ > --set global.cluster=_K8S_CLUSTER_NAME_ \\ > --set ksm.enabled=true \\ > --set newrelic-infrastructure.privileged=true \\ > --set prometheus.enabled=true \\ > --set webhook.enabled=true \\ > --set kubeEvents.enabled=true \\ > --set logging.enabled=true Install the New Relic Kubernetes integration by running the same command without --dry-run and --debug bash Copy $ helm upgrade --install newrelic newrelic/nri-bundle \\ > --version 3.2.11 \\ > --namespace newrelic \\ > --set global.licenseKey=_YOUR_NEW_RELIC_LICENSE_KEY_ \\ > --set global.cluster=_K8S_CLUSTER_NAME_ \\ > --set ksm.enabled=true \\ > --set newrelic-infrastructure.privileged=true \\ > --set prometheus.enabled=true \\ > --set webhook.enabled=true \\ > --set kubeEvents.enabled=true \\ > --set logging.enabled=true Wait a few seconds, then check that the DaemonSet and pods have been created: Check that pods are being deployed and reach a stable state: bash Copy $ kubectl -n newrelic get pods -w Make sure you see a DaemonSet, and one pod per node. Helm configuration options When you install or upgrade the Kubernetes integration with Helm using the command line, you can pass your configuration variables with the --set flag. bash Copy $ helm install newrelic/newrelic-infrastructure \\ > --set licenseKey=_YOUR_NEW_RELIC_LICENSE_KEY_ \\ > --set cluster=_YOUR_CLUSTER_NAME_ A full list of the configuration parameters can be found in the newrelic-infrastructure chart README Configure the integration Our nri-bundle chart. whose installation instructions can be found above, acts as a wrapper or a meta-package for a couple of other charts, which are the ones containing the components for our solution. By offering such a wrapper we can provide a controlled set of our components with versions that we know are compatible with each other, while keeping the component's charts relatively simple. To configure the individual integration components, you must use Helm's dependency system, which simply means that configuration for the children charts must be put under a section with the name of said chart. For example, to configure the newrelic-infrastructure chart, you would add the following to the values-newrelic.yaml: global: licenseKey: _YOUR_NEW_RELIC_LICENSE_KEY_ cluster: _K8S_CLUSTER_NAME_ # ... Other settings as shown above # Configuration for newrelic-infrastructure newrelic-infrastructure: verboseLog: true # Enable debug logs privileged: false # Install with minimal privileges # Other options from https://github.com/newrelic/helm-charts/tree/master/charts/newrelic-infrastructure-v3 Copy The full list of flags that can be tweaked can be found in our chart's repository: newrelic-infrastructure Configure debug logs, privilege mode, control plane monitoring, etc. nri-kube-events nri-metadata-injection Configure how the webhook for APM linkage is deployed. nri-prometheus Configure which Prometheus endpoints are scraped. newrelic-logging Configure which logs are sent to New Relic. Tip Remember that when specifying options for these charts, you must put them under the chart name in your values-newrelic.yaml. Change the scrape interval The Kubernetes Integration v3 and above allows changing the interval at which metrics are gathered from the cluster. This allows choosing a tradeoff between data resolution and usage. We recommend choosing an interval between 15 and 30 seconds for optimal experience. In order to change the scrape interval, add the following to your values-newrelic.yaml, under the newrelic-infratructure section: common: config: interval: 25s Copy So it ends up looking like: global: licenseKey: _YOUR_NEW_RELIC_LICENSE_KEY_ cluster: _K8S_CLUSTER_NAME_ # ... Other settings as shown above # Configuration for newrelic-infrastructure newrelic-infrastructure: # ... Other settings as shown above common: config: interval: 25s Copy Important Setting interval to values larger than 40s is not allowed. A full list of the settings that can be modified can be found at the chart's README. Upgrade using Helm To update your Kubernetes integration installed via Helm: Update the local chart repository: bash Copy $ helm repo update Update the release by running again the appropriate helm upgrade --install ... command in the section above bash Copy $ helm upgrade --install newrelic newrelic/nri-bundle \\ > --namespace newrelic --create-namespace \\ > -f values-newrelic.yaml \\ > --devel Monitor services running on Kubernetes After having installed our Kubernetes integration, you can start instrumenting the services that run in your cluster. To learn more about how to do this, please check our Monitor services running on Kubernetes page. Use your Kubernetes data To learn more about how to use your Kubernetes data, please head to our detailed Find and use your Kubernetes data pages. Reduce data ingest Our charts support setting an option to reduce the amount of data ingested at the cost of dropping detailed information. To enable it, set global.lowDataMode to true in the nri-bundle chart. lowDataMode affects three specific components of the nri-bundle chart outlined below. New Relic Infrastructure If lowDataMode is enabled, the default scrape interval changes from 15s to 30s. You can also specify a custom value for it using config.interval, which will take preference over lowDataMode. Prometheus OpenMetrics Integration If lowDataMode is enabled, the following metrics are excluded by default as they are already collected and used by the New Relic Kubernetes Integration. - kube_ - container_ - machine_ - cadvisor_ Copy New Relic Logging If lowDataMode is enabled, Labels and Annotations are set to Off in the Filter section of the fluent-bit.conf file. This means that this detail will be dropped from the container log files which reduces the overall data ingest into New Relic. The following fields are retained: Allowlist_key container_name Allowlist_key namespace_name Allowlist_key pod_name Allowlist_key stream Allowlist_key log Copy Low Data Mode Log Example Complete Log Record [ { \"cluster_name\": \"api-test\", \"kubernetes\": { \"annotations\": { \"kubernetes.io/psp\": \"eks.privileged\" }, \"container_hash\": \"fryckbos/test@sha256:5b098eaf3c7d5b3585eb10cebee63665b6208bea31ef31a3f0856c5ffdda644b\", \"container_image\": \"fryckbos/test:latest\", \"container_name\": \"newrelic-logging\", \"docker_id\": \"134e1daf63761baa15e035b08b7aea04518a0f0e50af4215131a50c6a379a072\", \"host\": \"ip-192-168-17-123.ec2.internal\", \"labels\": { \"app\": \"newrelic-logging\", \"app.kubernetes.io/name\": \"newrelic-logging\", \"controller-revision-hash\": \"84db95db86\", \"pod-template-generation\": \"1\", \"release\": \"nri-bundle\" }, \"namespace_name\": \"nrlogs\", \"pod_id\": \"54556e3e-719c-46b5-af69-020b75d69bf1\", \"pod_name\": \"nri-bundle-newrelic-logging-jxnbj\" }, \"message\": \"[2021/09/14 12:30:49] [ info] [engine] started (pid=1)\\n\", \"plugin\": { \"source\": \"kubernetes\", \"type\": \"fluent-bit\", \"version\": \"1.8.1\" }, \"stream\": \"stderr\", \"time\": \"2021-09-14T12:30:49.138824971Z\", \"timestamp\": 1631622649138 } ] Copy Log Record after enabling lowDataMode. [ { \"cluster_name\": \"api-test\", \"container_name\": \"newrelic-logging\", \"namespace_name\": \"nrlogs\", \"pod_name\": \"nri-bundle-newrelic-logging-jxnbj\", \"message\": \"[2021/09/14 12:30:49] [ info] [engine] started (pid=1)\\n\", \"stream\": \"stderr\", \"timestamp\": 1631622649138 } ] Copy New Relic Pixie Integration If lowDataMode is enabled, the newrelic-pixie integration performs heavier sampling on Pixie spans and reduces the collection interval from 10 seconds to 15 seconds. lowDataMode settings: HTTP_SPAN_LIMIT: 750 DB_SPAN_LIMIT: 250 COLLECT_INTERVAL_SEC: 15 Copy The default settings for these parameters and others can be found in the newrelic-pixie-integration Github repo. Uninstall Kubernetes integration To uninstall the Kubernetes integration using Helm, run the following command: bash Copy $ helm uninstall newrelic -n newrelic", + "body": "You can get Prometheus data flowing in New Relic with just a few simple steps. Once you integrate, your data will be visible in query-based dashboards (and other query results), often within about five minutes. This page covers basic setup for the remote write integration, as well as a few common troubleshooting topics. For information on integrating Prometheus servers in a high availability (HA) configuration, see our Prometheus high availability documentation. Set up the integration Go to the Prometheus remote write setup launcher in New Relic One, and complete these steps to add Prometheus data. Add Prometheus data Enter a name for the Prometheus server to be connected and your remote_write URL. Important: The name you enter for the server will create an attribute on your data. It will also be the name that identifies which Prometheus server is sending data to New Relic. Add a new remote_write URL to your Prometheus YML file. Add this information under global_config in the file, at the same indentation level as the global section. Use the following syntax: Prometheus v2.26 and newer remote_write: - url: https://metric-api.newrelic.com/prometheus/v1/write?prometheus_server=YOUR_DATA_SOURCE_NAME authorization: credentials: YOUR_LICENSE_KEY Copy Prometheus older than v2.26 remote_write: - url: https://metric-api.newrelic.com/prometheus/v1/write?prometheus_server=YOUR_DATA_SOURCE_NAME bearer_token:YOUR_LICENSE_KEY Copy OR Any Prometheus version remote_write: - url: https://metric-api.newrelic.com/prometheus/v1/write?X-License-Key=YOUR_LICENSE_KEY&prometheus_server=YOUR_DATA_SOURCE_NAME Copy This approach passes credentials in the URL. We don't recommend using it unless one of these other approaches doesn't work in your environment. European Union accounts: If you're connecting from the EU, use the following URL: https://metric-api.eu.newrelic.com/prometheus/v1/write Copy Kubernetes and Helm remote write integrations: Add the remote write URL to your Helm values.yaml file. Replace remoteWrite: [] with two lines similar to the following example. Be sure to use your remote write URL and use indentation that matches the rest of the file: remoteWrite: - url: https://metric-api.newrelic.com/prometheus/v1/write?prometheus_server=YOUR_DATA_SOURCE_NAME bearer_token:YOUR_LICENSE_KEY Copy Restart your Prometheus server. View your data in the New Relic UI. For example, use the remote write dashboard we automatically create when you set up your integration. Map Prometheus and New Relic metric types The Prometheus remote write protocol doesn't include metric type information or other helpful metric metadata when sending metrics to New Relic, so we infer the metric type based on Prometheus naming conventions. Metrics not following these naming conventions may not be mapped correctly. We map Prometheus metrics types into New Relic metric types based on Prometheus metric naming conventions as follows: metricName_bucket is stored as a New Relic count metric type. metricName_count is stored as a New Relic count metric type. metricName_total is stored as a New Relic count metric type. metricName_sum is stored as a New Relic summary metric type. Everything else is stored as a New Relic gauge metric type. Override metric type mappings If you have metrics that don't follow Prometheus naming conventions, you can configure remote write to tag the metric with a newrelic_metric_type label that indicates the metric type. This label is stripped when received by New Relic. Example: You have a counter metric named my_counter, which does not have our naming convention suffix of _bucket, _count or _total. In this situation, your metric would be identified as a gauge rather than a counter. To correct this, add the following relabel configuration to your prometheus.yml: - url: https://metric-api.newrelic.com/prometheus/v1/write?X-License-Key=... write_relabel_configs: - source_labels: [__name__] regex: ^my_counter$ target_label: newrelic_metric_type replacement: \"counter\" action: replace Copy This rule matches any metric with the name my_counter and adds a newrelic_metric_type label that identifies it as a counter. You can use the following (case sensitive!) values as the replacement value: counter gauge summary When a newrelic_metric_type label is present on a metric received and set to one of the valid values, New Relic will assign the indicated type to the metric (and strip the label) before downstream consumption in the data pipeline. If you have multiple metrics that don't follow the above naming conventions, you can add multiple rules with each rule matching different source labels. Set allow or deny lists for sent metrics If you need greater control over the data you send to New Relic, you can send a subset of your metrics. To do this, configure remote-write with the write_relabel_configs parameter with a subparameter action value of keep or deny. In this example, you'll only send the metrics that match the regular expression. Unmatched metrics won't be sent. Alternatively, you can use action: drop to drop all of the metrics that match the regular expression. - url: https://metric-api.newrelic.com/prometheus/v1/write?X-License-Key=... write_relabel_configs: - source_labels: [__name__] regex: \"coredns_(.*)|etcd_(.*)\" action: keep Copy This Kubernetes example uses this Helm chart's values.yaml file. If you're using a different Helm chart, please check its remoteWrite documentation (for example, some Helm files use camelcase writeRelabelConfigs instead). remoteWrite: - url: https://metric-api.newrelic.com/prometheus/v1/write?X-License-Key=... write_relabel_configs: - source_labels: [__name__] regex: \"coredns_(.*)|etcd_(.*)\" action: keep Copy Customize remote write behavior You can customize the following parameters if you are writing to more than one account in New Relic or are connecting more than one Prometheus data source to the same account in New Relic. For more information, see the docs on remote write tuning. X-License Key Your account's license key is not an API key. The license key is used for authentication and to identify which account to write data into. If you are configuring Prometheus to write into different New Relic accounts, use a different key on each remote write URL. prometheus_server URL parameter The prometheus_server parameter is a label or attribute used to add to stats that are written to NRDB. Use this same label when configuring your Grafana data source to limit results to just those from a particular prometheus_server. Optimize throughput and memory consumption Remote write increases the total memory consumption of your Prometheus servers. If you're experiencing issues we recommend the following: Increase max_samples_per_send for higher throughput workloads, along a proportional increase in capacity. If memory consumption is still a problem, try limiting the number of max_shards per server. Troubleshoot error messages If you receive an integration error message from New Relic or error messages in your Prometheus server logs after restarting your Prometheus server, review our remote write troubleshooting documentation. This includes fixing common errors, such as missing or incorrect characters, bad requests, request entity too large, and rate limit errors. Remove the integration When you remove the Prometheus remote write integration, this stops new data from flowing, but it will not purge or remove any historical data. To remove the integration, remove the configuration code snippet from your Prometheus YML file, then restart the server.", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 100.9886, + "_score": 95.261734, "_version": null, "_explanation": null, "sort": null, "highlight": { - "sections": "Prometheus OpenMetrics Integration", - "body": " and the Kubernetes Cluster Explorer. newrelic-logging: Provides a DaemonSet with New Relic's Fluent Bit output plugin to easily forward your logs to New Relic. nri-kube-events: Collects and reports cluster events (such as kubectl get events) to New Relic. nri-prometheus: New Relic's Prometheus OpenMetrics" + "title": "Set up your Prometheus remote write integration", + "sections": "Set up your Prometheus remote write integration", + "tags": "Prometheus integrations", + "body": " parameter The prometheus_server parameter is a label or attribute used to add to stats that are written to NRDB. Use this same label when configuring your Grafana data source to limit results to just those from a particular prometheus_server. Optimize throughput and memory consumption Remote write" }, - "id": "617d5841196a67bb40f7c1de" + "id": "617d515264441fc9eafbe18f" } ], "/gcp-firebase-database/8737e244-282b-41a8-a25b-f527c40eaa4d": [ @@ -26541,7 +26513,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 301.65027, + "_score": 283.12827, "_version": null, "_explanation": null, "sort": null, @@ -26577,7 +26549,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 291.86853, + "_score": 274.42047, "_version": null, "_explanation": null, "sort": null, @@ -26623,7 +26595,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 287.31348, + "_score": 266.0613, "_version": null, "_explanation": null, "sort": null, @@ -26662,7 +26634,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 285.9523, + "_score": 264.79092, "_version": null, "_explanation": null, "sort": null, @@ -26708,7 +26680,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 284.1103, + "_score": 263.1209, "_version": null, "_explanation": null, "sort": null, @@ -26754,7 +26726,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 58.28247, + "_score": 55.379017, "_version": null, "_explanation": null, "sort": null, @@ -26799,7 +26771,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 58.28148, + "_score": 55.378212, "_version": null, "_explanation": null, "sort": null, @@ -26837,7 +26809,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 45.105595, + "_score": 44.534454, "_version": null, "_explanation": null, "sort": null, @@ -26880,7 +26852,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 42.988182, + "_score": 42.366783, "_version": null, "_explanation": null, "sort": null, @@ -26925,7 +26897,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 42.068123, + "_score": 41.471107, "_version": null, "_explanation": null, "sort": null, @@ -26961,7 +26933,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 301.65002, + "_score": 283.12827, "_version": null, "_explanation": null, "sort": null, @@ -26997,7 +26969,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 291.86832, + "_score": 274.42047, "_version": null, "_explanation": null, "sort": null, @@ -27043,7 +27015,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 287.31323, + "_score": 266.0613, "_version": null, "_explanation": null, "sort": null, @@ -27082,7 +27054,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 285.95203, + "_score": 264.79092, "_version": null, "_explanation": null, "sort": null, @@ -27128,7 +27100,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 284.11005, + "_score": 263.1209, "_version": null, "_explanation": null, "sort": null, @@ -27165,7 +27137,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 78.399506, + "_score": 73.24777, "_version": null, "_explanation": null, "sort": null, @@ -27174,6 +27146,41 @@ }, "id": "617db48e64441f3722fbe764" }, + { + "sections": [ + "CloudFoundry", + "What's included", + "CloudFoundry installation docs" + ], + "title": "CloudFoundry", + "type": "quickstarts", + "tags": [ + "containers", + "kubernetes", + "k8s" + ], + "quick_start_name": "CloudFoundry", + "external_id": "c1637f37b21c87a03c1f1153f1a78fea7bc00c66", + "image": "", + "url": "https://developer.newrelic.com/instant-observability/cloudfoundry/944e2662-5ff7-4989-b9a6-66a200401d1e/", + "published_at": "2022-02-04T02:10:01Z", + "updated_at": "2021-10-05T01:50:06Z", + "document_type": "page", + "popularity": 1, + "body": "What's included CloudFoundry installation docs Create, deploy, and manage microservice applications in Kubernetes containers. Doc Create, deploy, and manage microservice applications in Kubernetes containers. This quickstart doesn't include any dashboards . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo This quickstart doesn't include any alerts . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo CloudFoundry observability quickstart contains 1 data source . This is how you'll get your data into New Relic. CloudFoundry installation docs Create, deploy, and manage microservice applications in Kubernetes containers. Docs", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 66.27654, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "tags": "containers", + "body": "What's included CloudFoundry installation docs Create, deploy, and manage microservice applications in Kubernetes containers. Doc Create, deploy, and manage microservice applications in Kubernetes containers. This quickstart doesn't include any dashboards . Do you think it should? You can edit" + }, + "id": "61566bc528ccbc1ce8f21454" + }, { "sections": [ "Amazon EMR monitoring integration", @@ -27207,7 +27214,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 67.26387, + "_score": 63.529423, "_version": null, "_explanation": null, "sort": null, @@ -27216,41 +27223,6 @@ }, "id": "617da7ad28ccbc182c7fec49" }, - { - "sections": [ - "CloudFoundry", - "What's included", - "CloudFoundry installation docs" - ], - "title": "CloudFoundry", - "type": "quickstarts", - "tags": [ - "containers", - "kubernetes", - "k8s" - ], - "quick_start_name": "CloudFoundry", - "external_id": "c1637f37b21c87a03c1f1153f1a78fea7bc00c66", - "image": "", - "url": "https://developer.newrelic.com/instant-observability/cloudfoundry/944e2662-5ff7-4989-b9a6-66a200401d1e/", - "published_at": "2022-02-04T02:10:01Z", - "updated_at": "2021-10-05T01:50:06Z", - "document_type": "page", - "popularity": 1, - "body": "What's included CloudFoundry installation docs Create, deploy, and manage microservice applications in Kubernetes containers. Doc Create, deploy, and manage microservice applications in Kubernetes containers. This quickstart doesn't include any dashboards . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo This quickstart doesn't include any alerts . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo CloudFoundry observability quickstart contains 1 data source . This is how you'll get your data into New Relic. CloudFoundry installation docs Create, deploy, and manage microservice applications in Kubernetes containers. Docs", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 66.17399, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "tags": "containers", - "body": "What's included CloudFoundry installation docs Create, deploy, and manage microservice applications in Kubernetes containers. Doc Create, deploy, and manage microservice applications in Kubernetes containers. This quickstart doesn't include any dashboards . Do you think it should? You can edit" - }, - "id": "61566bc528ccbc1ce8f21454" - }, { "sections": [ "Amazon ECS/ECR monitoring integration", @@ -27287,7 +27259,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 64.914055, + "_score": 60.529343, "_version": null, "_explanation": null, "sort": null, @@ -27323,7 +27295,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 61.309082, + "_score": 60.1861, "_version": null, "_explanation": null, "sort": null, @@ -27366,7 +27338,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.12862, + "_score": 92.36895, "_version": null, "_explanation": null, "sort": null, @@ -27407,7 +27379,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -27448,7 +27420,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -27489,7 +27461,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -27530,7 +27502,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -27568,7 +27540,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 89.00305, + "_score": 82.84308, "_version": null, "_explanation": null, "sort": null, @@ -27606,7 +27578,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 70.97598, + "_score": 70.94992, "_version": null, "_explanation": null, "sort": null, @@ -27642,7 +27614,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 59.352642, + "_score": 59.45992, "_version": null, "_explanation": null, "sort": null, @@ -27685,7 +27657,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 58.28247, + "_score": 55.378372, "_version": null, "_explanation": null, "sort": null, @@ -27730,7 +27702,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 58.28148, + "_score": 55.377567, "_version": null, "_explanation": null, "sort": null, @@ -27775,7 +27747,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.12862, + "_score": 92.36895, "_version": null, "_explanation": null, "sort": null, @@ -27816,7 +27788,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -27857,7 +27829,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -27898,7 +27870,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -27939,7 +27911,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -27982,7 +27954,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.12862, + "_score": 92.36895, "_version": null, "_explanation": null, "sort": null, @@ -28023,7 +27995,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -28064,7 +28036,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -28105,7 +28077,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -28146,7 +28118,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -28189,7 +28161,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.12862, + "_score": 92.367485, "_version": null, "_explanation": null, "sort": null, @@ -28230,7 +28202,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.301125, "_version": null, "_explanation": null, "sort": null, @@ -28271,7 +28243,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.301125, "_version": null, "_explanation": null, "sort": null, @@ -28312,7 +28284,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.301125, "_version": null, "_explanation": null, "sort": null, @@ -28353,7 +28325,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.301125, "_version": null, "_explanation": null, "sort": null, @@ -28405,7 +28377,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 203.14462, + "_score": 189.07884, "_version": null, "_explanation": null, "sort": null, @@ -28416,6 +28388,32 @@ }, "id": "6044151e28ccbc19ab2c60d8" }, + { + "image": "https://docs.newrelic.com/static/6decabb9d8cd5dc3e18f2f647f7c7cdd/c1b63/arrow-step-diagram-trans.png", + "url": "https://docs.newrelic.com/docs/distributed-tracing/concepts/quick-start/", + "sections": [ + "Distributed tracing setup options" + ], + "published_at": "2022-02-14T03:23:44Z", + "title": "Distributed tracing setup options", + "updated_at": "2022-02-14T03:23:44Z", + "type": "docs", + "external_id": "44df1a2d07693a41fa23c9bba9473ce8ebabe47e", + "document_type": "page", + "popularity": 1, + "body": "We recommend you do an initial setup of distributed tracing and consider the advanced Infinite Tracing feature if you are not getting the data you need. Also, if you are currently using New Relic APM agents and would like to enable distributed tracing, see our planning guide. Ready to get started? If you don't already have one, sign up for a New Relic account. It's free, forever! To set up distributed tracing, you'll complete three general steps: Identify services: Identify and write down the endpoints, services, languages, and systems that are used to complete this request (you'll need this information in the next step). If you have an environment diagram like the following, you could use it to create a list of services handling requests: Instrument services: Instrument each service you identify so it can send your trace data. Some tools, such as APM agents, instrument services automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile monitoring Kamon OpenTelemetry Trace API: generic format Trace API: Zipkin format View traces: After you instrument the services, generate some traffic in your application, and then go to the New Relic UI to see your trace data.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 147.47621, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "body": " automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile" + }, + "id": "61d8b6a664441fbe9700cc16" + }, { "image": "", "url": "https://docs.newrelic.com/attribute-dictionary/", @@ -28472,7 +28470,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 156.67883, + "_score": 147.26907, "_version": null, "_explanation": null, "sort": null, @@ -28482,32 +28480,6 @@ }, "id": "603f53b164441f41894e8875" }, - { - "image": "https://docs.newrelic.com/static/6decabb9d8cd5dc3e18f2f647f7c7cdd/c1b63/arrow-step-diagram-trans.png", - "url": "https://docs.newrelic.com/docs/distributed-tracing/concepts/quick-start/", - "sections": [ - "Distributed tracing setup options" - ], - "published_at": "2022-02-14T03:23:44Z", - "title": "Distributed tracing setup options", - "updated_at": "2022-02-14T03:23:44Z", - "type": "docs", - "external_id": "44df1a2d07693a41fa23c9bba9473ce8ebabe47e", - "document_type": "page", - "popularity": 1, - "body": "We recommend you do an initial setup of distributed tracing and consider the advanced Infinite Tracing feature if you are not getting the data you need. Also, if you are currently using New Relic APM agents and would like to enable distributed tracing, see our planning guide. Ready to get started? If you don't already have one, sign up for a New Relic account. It's free, forever! To set up distributed tracing, you'll complete three general steps: Identify services: Identify and write down the endpoints, services, languages, and systems that are used to complete this request (you'll need this information in the next step). If you have an environment diagram like the following, you could use it to create a list of services handling requests: Instrument services: Instrument each service you identify so it can send your trace data. Some tools, such as APM agents, instrument services automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile monitoring Kamon OpenTelemetry Trace API: generic format Trace API: Zipkin format View traces: After you instrument the services, generate some traffic in your application, and then go to the New Relic UI to see your trace data.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 155.96875, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "body": " automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile" - }, - "id": "61d8b6a664441fbe9700cc16" - }, { "sections": [ "Android agent compatibility and requirements", @@ -28538,7 +28510,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 134.96759, + "_score": 127.50815, "_version": null, "_explanation": null, "sort": null, @@ -28572,7 +28544,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 129.5227, + "_score": 121.85154, "_version": null, "_explanation": null, "sort": null, @@ -28620,7 +28592,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.30777, + "_score": 168.52728, "_version": null, "_explanation": null, "sort": null, @@ -28671,7 +28643,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.3023, + "_score": 168.52283, "_version": null, "_explanation": null, "sort": null, @@ -28714,7 +28686,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 146.85614, + "_score": 144.60301, "_version": null, "_explanation": null, "sort": null, @@ -28772,7 +28744,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 145.08545, + "_score": 142.95915, "_version": null, "_explanation": null, "sort": null, @@ -28826,7 +28798,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 144.11217, + "_score": 136.8765, "_version": null, "_explanation": null, "sort": null, @@ -28871,7 +28843,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.12862, + "_score": 92.36895, "_version": null, "_explanation": null, "sort": null, @@ -28912,7 +28884,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -28953,7 +28925,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -28994,7 +28966,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -29035,7 +29007,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -29082,7 +29054,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.30765, + "_score": 168.52571, "_version": null, "_explanation": null, "sort": null, @@ -29133,7 +29105,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.30219, + "_score": 168.52126, "_version": null, "_explanation": null, "sort": null, @@ -29191,7 +29163,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 145.08542, + "_score": 142.95813, "_version": null, "_explanation": null, "sort": null, @@ -29245,7 +29217,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 144.11206, + "_score": 136.87433, "_version": null, "_explanation": null, "sort": null, @@ -29286,7 +29258,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 143.57901, + "_score": 136.43954, "_version": null, "_explanation": null, "sort": null, @@ -29331,7 +29303,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.12862, + "_score": 92.36895, "_version": null, "_explanation": null, "sort": null, @@ -29372,7 +29344,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -29413,7 +29385,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -29454,7 +29426,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -29495,7 +29467,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -29538,7 +29510,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.12862, + "_score": 92.36895, "_version": null, "_explanation": null, "sort": null, @@ -29579,7 +29551,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -29620,7 +29592,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -29661,7 +29633,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -29702,7 +29674,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -29754,7 +29726,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 93.30186, + "_score": 86.50932, "_version": null, "_explanation": null, "sort": null, @@ -29789,7 +29761,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 91.16295, + "_score": 85.40664, "_version": null, "_explanation": null, "sort": null, @@ -29828,7 +29800,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.26317, + "_score": 76.73684, "_version": null, "_explanation": null, "sort": null, @@ -29840,6 +29812,46 @@ }, "id": "604418abe7b9d2d1015799cc" }, + { + "sections": [ + "Install Android apps with Gradle and Android Studio", + "Install your Android app", + "Configure with Gradle and Android Studio", + "Project level build.gradle file:", + "App level build.gradle file:", + "Important", + "Update your Android installation", + "Android 4.x: Multidex support", + "New Relic Gradle Plugin configuration" + ], + "title": "Install Android apps with Gradle and Android Studio", + "type": "docs", + "tags": [ + "Install configure", + "New Relic Mobile Android", + "Mobile monitoring" + ], + "external_id": "51fab3eba87ddee949cd4729de8b5f64534de9c7", + "image": "", + "url": "https://docs.newrelic.com/docs/mobile-monitoring/new-relic-mobile-android/install-configure/install-android-apps-gradle-android-studio/", + "published_at": "2022-02-14T11:59:24Z", + "updated_at": "2022-02-04T07:09:38Z", + "document_type": "page", + "popularity": 1, + "body": "If you use Gradle or Android Studio, follow these instructions to install New Relic's mobile monitoring for the first time. If you have previously installed the Android agent SDK for mobile monitoring, follow the steps before upgrading to the latest version with Gradle and Android Studio. Install your Android app As part of the installation process for mobile monitoring, New Relic automatically generates an application token. This is a 40-character hexadecimal string for authenticating each mobile app you monitor in New Relic. Go to one.newrelic.com > Mobile. If applicable: From the Mobile apps index, select Add a new app. From the Get started page, select Android as the platform for mobile monitoring. Type a meaningful name for your mobile app, and select Continue. Continue with the steps to configure mobile monitoring with Gradle and Android Studio. OR: To complete the configuration process for a new mobile app later: Go to one.newrelic.com > Mobile, then select See instructions next to your mobile app name. To upgrade an existing Android installation: Go to one.newrelic.com > Mobile > (select an app) > Settings > Installation. Configure with Gradle and Android Studio These procedures to configure your Android app with Gradle and Android Studio also appear on the Get started page in New Relic. Merge New Relic's mobile monitoring code in the Gradle & Android Studio tab to your build.gradle file. Project level build.gradle file: In this example, AGENT_VERSION represents your agent version number. See the agent release notes, and use the latest version. buildscript { repositories { mavenCentral() } dependencies { classpath \"com.newrelic.agent.android:agent-gradle-plugin:AGENT_VERSION\" } } Copy App level build.gradle file: In this example, AGENT_VERSION represents your agent version number. See the agent release notes, and use the latest version. repositories { mavenCentral() } apply plugin: 'android' apply plugin: 'newrelic' dependencies { implementation 'com.newrelic.agent.android:android-agent:AGENT_VERSION' } Copy ProGuard or DexGuard: In your project’s root directory (projectname/app), add a newrelic.properties file with the following line: com.newrelic.application_token=GENERATED_TOKEN Copy Follow the additional, required configuration steps for using ProGuard or DexGuard with New Relic. Set app permissions: Ensure that your Android app requests INTERNET and ACCESS_NETWORK_STATE permissions by adding these lines to your AndroidManifest.xml file: Copy To start New Relic's mobile monitoring agent: In your Default Activity (as defined in your Manifest), import the NewRelic class: import com.newrelic.agent.android.NewRelic; Copy Important We do not support starting the mobile monitoring agent in other classes, as that can cause unexpected or unstable behavior. To initialize mobile monitoring: In the onCreate() method, add this call: NewRelic.withApplicationToken(\"GENERATED_TOKEN\").start(this.getApplication()); Copy Optional: Change the logging level. To build and run your application: Clean your project, then run your app in an emulator or device to generate traffic. Wait a few minutes, then view data for your Android app from the Overview page: Go to one.newrelic.com > Mobile > (select an app). If you have problems with your Android installation, or if you do not see data in the mobile monitoring UI, follow the troubleshooting procedures. Update your Android installation To keep your Android agent up to date, follow the procedures to upgrade the Android agent SDK for New Relic. To install the latest Android version for your mobile app: Go to one.newrelic.com > Mobile > (select an app) > Settings > Installation. Android 4.x: Multidex support New Relic's mobile monitoring for Android versions prior to Android 5.0 (API level 21) use the Dalvik runtime to execute app code. By default, Dalvik limits apps to a single classes.dex bytecode file per APK. In order to get around this limitation, you must enable multidex support. Then you can use the multidex support library, which becomes part of the primary DEX file of your app and then manages access to the additional DEX files and the code they contain. When building each DEX file for a multidex app, the build tools perform complex decision making to determine which classes are needed in the primary DEX file so that your app can start successfully. If any class required during startup is not provided in the primary DEX file, then your app crashes with the error java.lang.NoClassDefFoundError. If you see the java.lang.NoClassDefFoundError error, then you must manually specify these additional classes as required in the primary DEX file: Create a proguard.multidex.config file within the /app folder of your project. Update mypackage to reflect your package name. #################### # keep class names # #################### #Keep New Relic in the main dex -keep class com.newrelic.** { *; } -keep class com.mypackage.activities.** { *; } Copy Merge the following code into the app-level build.gradle file: android { defaultConfig{ … multiDexKeepProguard file(\"proguard.multidex.config\") } } Copy For more information, see the Android Developers documentation on declaring classes required in the primary DEX file. New Relic Gradle Plugin configuration For details on how to configure the behavior of the mobile monitoring agent plugin during Gradle builds, see the New Relic Gradle plugin extension guide.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 62.383125, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "tags": "New Relic Mobile Android", + "body": "If you use Gradle or Android Studio, follow these instructions to install New Relic's mobile monitoring for the first time. If you have previously installed the Android agent SDK for mobile monitoring, follow the steps before upgrading to the latest version with Gradle and Android Studio. Install" + }, + "id": "603ea70128ccbc59c2eba74e" + }, { "image": "", "url": "https://docs.newrelic.com/attribute-dictionary/", @@ -29896,7 +29908,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 66.64693, + "_score": 62.33004, "_version": null, "_explanation": null, "sort": null, @@ -29905,50 +29917,6 @@ "body": ". Recommendation: Upgrade to the most recent mobile monitoring agent version to take full advantage of the new event types. Attribute name Definition Data types category The type of data, either session or interaction. MobileSession Mobile interactionDuration For interaction category events only" }, "id": "603f53b164441f41894e8875" - }, - { - "sections": [ - "Mobile monitoring alert information", - "Mobile alert conditions", - "Execution time", - "Errors", - "Crash reporting", - "Tip", - "Custom", - "View policies and conditions", - "View events and activities", - "View alert violations", - "Important" - ], - "title": "Mobile monitoring alert information", - "type": "docs", - "tags": [ - "Get started", - "New Relic Mobile", - "Mobile monitoring" - ], - "external_id": "93dd2fd0c629d0653bcb69533dd62162814a5ebd", - "image": "", - "url": "https://docs.newrelic.com/docs/mobile-monitoring/new-relic-mobile/get-started/mobile-monitoring-alert-information/", - "published_at": "2022-02-15T12:26:47Z", - "updated_at": "2022-02-15T12:26:47Z", - "document_type": "page", - "popularity": 1, - "body": "Well-defined alerts help notify individuals or teams about changes in their systems. You can use any of our alerts and applied intelligence capabilities across all the entities you monitor with New Relic. For example, you can use alerts to manage alert policies and conditions that focus on the metrics for mobile apps you monitor with New Relic. After you define alert conditions for your mobile apps, you can view that information in the alerts UI or in other UI experiences, like the mobile monitoring UI. Mobile alert conditions Use any of the following conditions and thresholds when you set up alerts for your mobile apps: Execution time Execution time metrics include Database, Images, JSON, Network, and View loading. Define the thresholds for these metrics by selecting a time and duration. Errors Error metrics include Network error percentage and Status error percentage. Define the thresholds for these metrics by selecting a error percentage and duration. Crash reporting You may want to be notified when your app's crash rate goes above a certain threshold. Define the thresholds for these metrics by selecting an error percentage and duration. Tip Get notified about your exceeding app crash rate as soon as it happens by setting up notification channels, including PagerDuty and Slack. Custom Create your own custom metric to fit specific alert needs. Add a name for the customized metric, and define your own thresholds. View policies and conditions To view alert policy and condition information for a specific mobile app: Go to one.newrelic.com, click Mobile, and click Alert conditions. From the Alert conditions page, use the available tools to search, sort, view, or update the alert conditions and their associated policies. View events and activities To view summary information about events and other activity directly from the mobile monitoring UI: Go to one.newrelic.com and click Mobile. From the index, mouse over the entity's color-coded health status, select a link from the Mobile activity list, or select a mobile app to view additional details. For example, if a Critical alert violation occurs: The health status indicator turns red on the mobile index and on the selected app. The background color for various charts changes to pink. On your list of monitored mobile apps, the Application activity section shows Warning (yellow) and Critical (red) violations as applicable. To learn more about an alert violation, mouse over or select any pink area on a chart. View alert violations Important In July 2020, we transitioned alert violations for browser apps, mobile apps, and synthetic monitors to a new format in one.newrelic.com. For more information, see the Applied Intelligence and alerting docs and this Explorers Hub post. If an alert condition has thresholds set up for Warning (yellow) or Critical (red) violations, the color-coded health status for a product entity will change to indicate a violation. You can view the violations directly from the mobile app's page in New Relic: Go to one.newrelic.com and click Mobile. Select a mobile app, and then review its Open violations.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 64.175156, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "title": "Mobile monitoring alert information", - "sections": "Mobile monitoring alert information", - "tags": "New Relic Mobile", - "body": " on the metrics for mobile apps you monitor with New Relic. After you define alert conditions for your mobile apps, you can view that information in the alerts UI or in other UI experiences, like the mobile monitoring UI. Mobile alert conditions Use any of the following conditions and thresholds when you set up" - }, - "id": "6044144228ccbcd7422c608a" } ], "/newrelic-alert-cloudformation-resource/908e2847-ee50-4cf7-8e15-f426a1d5e4d0": [ @@ -29980,7 +29948,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 203.51042, + "_score": 188.8508, "_version": null, "_explanation": null, "sort": null, @@ -29990,43 +29958,6 @@ }, "id": "620c0440196a672b7b81be7d" }, - { - "sections": [ - "Introduction to AWS integrations", - "Connect AWS and New Relic", - "Integrations and AWS costs", - "View your AWS data", - "Region availability" - ], - "title": "Introduction to AWS integrations", - "type": "docs", - "tags": [ - "Get started", - "Amazon integrations", - "Integrations" - ], - "external_id": "7cf89c2eb75c934cc6fb30bcb7f5fb1f397326a6", - "image": "", - "url": "https://docs.newrelic.com/docs/infrastructure/amazon-integrations/get-started/introduction-aws-integrations/", - "published_at": "2022-02-15T17:15:27Z", - "updated_at": "2022-02-15T17:15:27Z", - "document_type": "page", - "popularity": 1, - "body": "Amazon integrations let you monitor your AWS data in several New Relic features. Enabling the AWS CloudWatch Metric Streams integration is the recommended solution to monitor all CloudWatch metrics from all AWS services (including custom namespaces). On top of this, additional integrations are available to get extended visibility on key AWS services beyond the available CloudWatch metrics. For a full reference of the supported metrics, please check the available CloudWatch metrics for each service in the AWS documentation pages. Connect AWS and New Relic In order to obtain AWS data, follow the procedure to connect AWS to New Relic. Additional API Polling integrations can be enabled on top of the AWS CloudWatch metric streams in order to pull data that's not available as CloudWatch metrics. The following integrations are not replaced by the metric streams: AWS Billing AWS CloudTrail AWS Health AWS Trusted Advisor AWS VPC AWS X-Ray Finally, other integrations may require additional configurations in your AWS account: AWS VPC Flow Logs AWS CloudFormation Integrations and AWS costs Keep in mind the following items: AWS CloudWatch metric streams pricing is defined based on the number of metric updates. For up-to-date pricing information check AWS CloudWatch Pricing. AWS Kinesis Data Firehose is used as the delivery method. For details, see the AWS Firehose pricing page. AWS Config can be optionally enabled in your AWS account, and used to enrich CloudWatch metrics with custom tags and resource metadata. With AWS Config, you are charged based on the number of configuration items recorded. See the AWS Config pricing page for details. If polling integrations are enabled (instead of metric streams), New Relic uses the Amazon CloudWatch API to obtain metrics from the AWS services you monitor. The number of calls to the CloudWatch API increases as you enable more integrations. Add AWS resources to those integrations, or scale those integrations across more regions. This can cause requests to the CloudWatch API to exceed the 1 million free limits granted by AWS and increase your CloudWatch bill. AWS offers enhanced monitoring for some of their services which allows for more metrics, more often. For example, see RDS enhanced monitoring costs. View your AWS data Once you follow the configuration process, data from your Amazon Web Services report directly to New Relic. AWS entities for most used services will be listed in the New Relic Explorer. Metrics and events will appear in the Data Explorer. AWS data will also be visible in the Infrastructure UI. To view your AWS data: Go to one.newrelic.com > Infrastructure > AWS. For any of the AWS integrations listed: For active streams, select the Explore your data link. OR For other integrations, browse the available dashboard or click on the Explore Data link. You can view and reuse NRQL queries both in the pre-configured dashboards and in the Events explorer dashboards. This allows you to tailor queries to your specific needs. Region availability Most AWS services offer regional endpoints to reduce data latency between cloud resources and applications. New Relic can obtain monitoring data from services and endpoints that are located in all AWS regions, except China.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 180.77469, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "title": "Introduction to AWS integrations", - "sections": "View your AWS data", - "body": "Amazon integrations let you monitor your AWS data in several New Relic features. Enabling the AWS CloudWatch Metric Streams integration is the recommended solution to monitor all CloudWatch metrics from all AWS services (including custom namespaces). On top of this, additional integrations" - }, - "id": "617dc3ed64441f8880fbe1c6" - }, { "sections": [ "Amazon CloudWatch Metric Streams integration", @@ -30072,7 +30003,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 178.96014, + "_score": 168.5786, "_version": null, "_explanation": null, "sort": null, @@ -30083,6 +30014,43 @@ }, "id": "617da828196a6740e2f7d130" }, + { + "sections": [ + "Introduction to AWS integrations", + "Connect AWS and New Relic", + "Integrations and AWS costs", + "View your AWS data", + "Region availability" + ], + "title": "Introduction to AWS integrations", + "type": "docs", + "tags": [ + "Get started", + "Amazon integrations", + "Integrations" + ], + "external_id": "7cf89c2eb75c934cc6fb30bcb7f5fb1f397326a6", + "image": "", + "url": "https://docs.newrelic.com/docs/infrastructure/amazon-integrations/get-started/introduction-aws-integrations/", + "published_at": "2022-02-15T17:15:27Z", + "updated_at": "2022-02-15T17:15:27Z", + "document_type": "page", + "popularity": 1, + "body": "Amazon integrations let you monitor your AWS data in several New Relic features. Enabling the AWS CloudWatch Metric Streams integration is the recommended solution to monitor all CloudWatch metrics from all AWS services (including custom namespaces). On top of this, additional integrations are available to get extended visibility on key AWS services beyond the available CloudWatch metrics. For a full reference of the supported metrics, please check the available CloudWatch metrics for each service in the AWS documentation pages. Connect AWS and New Relic In order to obtain AWS data, follow the procedure to connect AWS to New Relic. Additional API Polling integrations can be enabled on top of the AWS CloudWatch metric streams in order to pull data that's not available as CloudWatch metrics. The following integrations are not replaced by the metric streams: AWS Billing AWS CloudTrail AWS Health AWS Trusted Advisor AWS VPC AWS X-Ray Finally, other integrations may require additional configurations in your AWS account: AWS VPC Flow Logs AWS CloudFormation Integrations and AWS costs Keep in mind the following items: AWS CloudWatch metric streams pricing is defined based on the number of metric updates. For up-to-date pricing information check AWS CloudWatch Pricing. AWS Kinesis Data Firehose is used as the delivery method. For details, see the AWS Firehose pricing page. AWS Config can be optionally enabled in your AWS account, and used to enrich CloudWatch metrics with custom tags and resource metadata. With AWS Config, you are charged based on the number of configuration items recorded. See the AWS Config pricing page for details. If polling integrations are enabled (instead of metric streams), New Relic uses the Amazon CloudWatch API to obtain metrics from the AWS services you monitor. The number of calls to the CloudWatch API increases as you enable more integrations. Add AWS resources to those integrations, or scale those integrations across more regions. This can cause requests to the CloudWatch API to exceed the 1 million free limits granted by AWS and increase your CloudWatch bill. AWS offers enhanced monitoring for some of their services which allows for more metrics, more often. For example, see RDS enhanced monitoring costs. View your AWS data Once you follow the configuration process, data from your Amazon Web Services report directly to New Relic. AWS entities for most used services will be listed in the New Relic Explorer. Metrics and events will appear in the Data Explorer. AWS data will also be visible in the Infrastructure UI. To view your AWS data: Go to one.newrelic.com > Infrastructure > AWS. For any of the AWS integrations listed: For active streams, select the Explore your data link. OR For other integrations, browse the available dashboard or click on the Explore Data link. You can view and reuse NRQL queries both in the pre-configured dashboards and in the Events explorer dashboards. This allows you to tailor queries to your specific needs. Region availability Most AWS services offer regional endpoints to reduce data latency between cloud resources and applications. New Relic can obtain monitoring data from services and endpoints that are located in all AWS regions, except China.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 167.9933, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "title": "Introduction to AWS integrations", + "sections": "View your AWS data", + "body": "Amazon integrations let you monitor your AWS data in several New Relic features. Enabling the AWS CloudWatch Metric Streams integration is the recommended solution to monitor all CloudWatch metrics from all AWS services (including custom namespaces). On top of this, additional integrations" + }, + "id": "617dc3ed64441f8880fbe1c6" + }, { "image": "", "url": "https://developer.newrelic.com/automate-workflows/", @@ -30090,24 +30058,24 @@ "Automate workflows", "Guides to automate workflows", "Quickly tag resources", - "Set up New Relic using the Kubernetes operator", "Automate common tasks", + "Set up New Relic using the Kubernetes operator", "Automatically tag a simple \"Hello World\" Demo across the entire stack", "Getting started with New Relic and Terraform", "Set up New Relic using Helm charts" ], - "published_at": "2022-02-15T01:37:23Z", + "published_at": "2022-02-16T01:38:14Z", "title": "Automate workflows", - "updated_at": "2022-02-15T01:37:23Z", + "updated_at": "2022-02-16T01:38:14Z", "type": "developer", "external_id": "d4f408f077ed950dc359ad44829e9cfbd2ca4871", "document_type": "page", "popularity": 1, - "body": "When building today's complex systems, you want an easy, predictable way to verify that your configuration is defined as expected. This concept, Observability as Code, is brought to life through a collection of New Relic-supported orchestration tools, including Terraform, AWS CloudFormation, and a command-line interface. These tools enable you to integrate New Relic into your existing workflows, easing adoption, accelerating deployment, and returning focus to your main job — getting stuff done. In addition to our Terraform and CLI guides below, find more automation solutions in our Developer Toolkit. Guides to automate workflows Quickly tag resources Add tags to apps for easy filtering 5 min Set up New Relic using the Kubernetes operator Learn how to provision New Relic resources using the Kubernetes operator 20 min Automate common tasks Use the New Relic CLI to tag apps and create deployment markers 20 min Automatically tag a simple \"Hello World\" Demo across the entire stack See how easy it is to leverage automation in your DevOps environment! 30 min Getting started with New Relic and Terraform Learn how to provision New Relic resources using Terraform 30 min Set up New Relic using Helm charts Learn how to set up New Relic using Helm charts 20 min", + "body": "When building today's complex systems, you want an easy, predictable way to verify that your configuration is defined as expected. This concept, Observability as Code, is brought to life through a collection of New Relic-supported orchestration tools, including Terraform, AWS CloudFormation, and a command-line interface. These tools enable you to integrate New Relic into your existing workflows, easing adoption, accelerating deployment, and returning focus to your main job — getting stuff done. In addition to our Terraform and CLI guides below, find more automation solutions in our Developer Toolkit. Guides to automate workflows Quickly tag resources Add tags to apps for easy filtering 5 min Automate common tasks Use the New Relic CLI to tag apps and create deployment markers 20 min Set up New Relic using the Kubernetes operator Learn how to provision New Relic resources using the Kubernetes operator 20 min Automatically tag a simple \"Hello World\" Demo across the entire stack See how easy it is to leverage automation in your DevOps environment! 30 min Getting started with New Relic and Terraform Learn how to provision New Relic resources using Terraform 30 min Set up New Relic using Helm charts Learn how to set up New Relic using Helm charts 20 min", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 158.04663, + "_score": 157.73964, "_version": null, "_explanation": null, "sort": null, @@ -30147,7 +30115,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 135.12305, + "_score": 127.346054, "_version": null, "_explanation": null, "sort": null, @@ -30201,7 +30169,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 309.57907, + "_score": 287.7202, "_version": null, "_explanation": null, "sort": null, @@ -30246,7 +30214,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 246.40292, + "_score": 241.39114, "_version": null, "_explanation": null, "sort": null, @@ -30257,54 +30225,6 @@ }, "id": "61fd193d196a672daae826d6" }, - { - "sections": [ - "Link your applications to Kubernetes", - "Tip", - "Compatibility and requirements", - "Kubernetes requirements", - "Network requirements", - "APM agent compatibility", - "Openshift requirements", - "Important", - "Configure the injection of metadata", - "Default configuration", - "Custom configuration", - "Manage custom certificates", - "Validate the injection of metadata", - "Disable the injection of metadata", - "Troubleshooting" - ], - "title": "Link your applications to Kubernetes", - "type": "docs", - "tags": [ - "Link apps and services", - "Kubernetes integration", - "Integrations" - ], - "external_id": "0fe0951312aaf683f6614d5956f8c402b9693780", - "image": "", - "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/link-your-applications/link-your-applications-kubernetes/", - "published_at": "2022-02-06T01:24:10Z", - "updated_at": "2022-02-06T01:24:10Z", - "document_type": "page", - "popularity": 1, - "body": "You can surface Kubernetes metadata and link it to your APM agents as distributed traces to explore performance issues and troubleshoot transaction errors. For more information, see this New Relic blog post. You can quickly start monitoring Kubernetes clusters using our Auto-telemetry with Pixie integration, which doesn't require a language agent. Learn more about Auto-telemetry with Pixie. The metadata injection product uses a MutatingAdmissionWebhook to add the following environment variables to pods: NEW_RELIC_METADATA_KUBERNETES_CLUSTER_NAME NEW_RELIC_METADATA_KUBERNETES_NODE_NAME NEW_RELIC_METADATA_KUBERNETES_NAMESPACE_NAME NEW_RELIC_METADATA_KUBERNETES_DEPLOYMENT_NAME NEW_RELIC_METADATA_KUBERNETES_POD_NAME NEW_RELIC_METADATA_KUBERNETES_CONTAINER_NAME NEW_RELIC_METADATA_KUBERNETES_CONTAINER_IMAGE_NAME Copy Tip Our Kubernetes metadata injection project is open source. Here's the code to link APM and infrastructure data and the code to automatically manage certificates. Compatibility and requirements Before linking Kubernetes metadata to your APM agents, make sure you meet the following requirements: Kubernetes requirements Network requirements APM agent compatibility OpenShift requirements Kubernetes requirements To link your applications and Kubernetes, your cluster must have the MutatingAdmissionWebhook controller enabled, which requires Kubernetes 1.9 or higher. To verify that your cluster is compatible, run the following command: kubectl api-versions | grep admissionregistration.k8s.io/v1beta1 admissionregistration.k8s.io/v1beta1 Copy If you see a different result, follow the Kubernetes documentation to enable admission control in your cluster. Network requirements For Kubernetes to speak to our MutatingAdmissionWebhook, the master node (or the API server container, depending on how the cluster is set up) should be allowed egress for HTTPS traffic on port 443 to pods in all of the other nodes in the cluster. This might require specific configuration depending on how the infrastructure is set up (on-premises, AWS, Google Cloud, etc). Tip Until Kubernetes v1.14, users were only allowed to register admission webhooks on port 443. Since v1.15 it's possible to register them on different ports. To ensure backward compatibility, the webhook is registered by default on port 443 in the YAML config file we distribute. APM agent compatibility The following New Relic agents collect Kubernetes metadata: Go 2.3.0 or higher Java 4.10.0 or higher Node.js 5.3.0 or higher Python 4.14.0 or higher Ruby 6.1.0 or higher .NET 8.17.438 or higher Openshift requirements To link Openshift and Kubernetes you must enable mutating admission webhooks, which requires Openshift 3.9 or higher. During the process, install a resource that requires admin permissions to the cluster. Run this to log in as admin: oc login -u system:admin Copy Check that webhooks are correctly configured. If they are not, update the master-config.yaml file. admissionConfig: pluginConfig: MutatingAdmissionWebhook: configuration: apiVersion: apiserver.config.k8s.io/v1alpha1 kubeConfigFile: /dev/null kind: WebhookAdmission ValidatingAdmissionWebhook: configuration: apiVersion: apiserver.config.k8s.io/v1alpha1 kubeConfigFile: /dev/null kind: WebhookAdmission location: \"\" Copy Important Add kubeConfigFile: /dev/null to address some issues in Openshift. Enable certificate signing by editing the YAML file and updating your configuration: kubernetesMasterConfig: controllerArguments: cluster-signing-cert-file: - \"/etc/origin/master/ca.crt\" cluster-signing-key-file: - \"/etc/origin/master/ca.key\" Copy Restart the Openshift services in the master node. Configure the injection of metadata By default, all the pods you create that include APM agents have the correct environment variables set and the metadata injection applies to the entire cluster. To check that the environment variables have been set, any container that is running must be stopped, and a new instance started (see Validate the injection of metadata). This default configuration also uses the Kubernetes certificates API to automatically manage the certificates required for the injection. If needed, you can limit the injection of metadata to specific namespaces in your cluster or self-manage your certificates. Default configuration We offer instructions for deploying our integration using Helm. Just be sure that, when you are configuring the chart, the webhook that inject the metadata is enabled. Notice that we are specifying --dry-run and --debug, so nothing will be installed in this step: helm upgrade --install newrelic newrelic/nri-bundle \\ --dry-run \\ --debug \\ --namespace newrelic --create-namespace \\ --set global.licenseKey=YOUR_NEW_RELIC_LICENSE_KEY \\ --set global.cluster=K8S_CLUSTER_NAME \\ --set ksm.enabled=true \\ --set newrelic-infrastructure.privileged=true \\ --set infrastructure.enabled=true \\ --set prometheus.enabled=true \\ --set webhook.enabled= true \\ --set kubeEvents.enabled=true \\ --set logging.enabled=true Copy Custom configuration You can limit the injection of metadata only to specific namespaces by using labels. To enable this feature, edit nri-bundle Helm values.yaml file: nri-metadata-injection: injectOnlyLabeledNamespaces: true Copy Or add a --set when installing or upgrading your Helm release: helm upgrade --install newrelic newrelic/nri-bundle \\ --dry-run \\ --debug \\ --namespace newrelic --create-namespace \\ --set global.licenseKey=YOUR_NEW_RELIC_LICENSE_KEY \\ --set global.cluster=K8S_CLUSTER_NAME \\ --set ksm.enabled=true \\ --set newrelic-infrastructure.privileged=true \\ --set infrastructure.enabled=true \\ --set prometheus.enabled=true \\ --set webhook.enabled= true \\ --set nri-metadata-injection.injectOnlyLabeledNamespaces=true \\ --set kubeEvents.enabled=true \\ --set logging.enabled=true Copy With this option, injection is only applied to those namespaces that have the newrelic-metadata-injection label set to enabled: kubectl label namespace YOUR_NAMESPACE newrelic-metadata-injection=enabled Copy Manage custom certificates To use custom certificates you need to disable the automatic installation of certificates when you are installing using Helm. To disable the installation for certificates just modify nri-bundle Helm values.yaml like this: nri-metadata-injection: customTLSCertificate: true Copy Or add a --set when installing or upgrading your Helm release: helm upgrade --install newrelic newrelic/nri-bundle \\ --dry-run \\ --debug \\ --namespace newrelic --create-namespace \\ --set global.licenseKey=YOUR_NEW_RELIC_LICENSE_KEY \\ --set global.cluster=K8S_CLUSTER_NAME \\ --set ksm.enabled=true \\ --set newrelic-infrastructure.privileged=true \\ --set infrastructure.enabled=true \\ --set prometheus.enabled=true \\ --set webhook.enabled= true \\ --set nri-metadata-injection.customTLSCertificate=true \\ --set kubeEvents.enabled=true \\ --set logging.enabled=true Copy Now you can proceed with the custom certificate management option. You need your certificate, server key, and Certification Authority (CA) bundle encoded in PEM format. If you have them in the standard certificate format (X.509), install openssl, and run the following: openssl x509 -in CERTIFICATE_FILENAME -outform PEM -out CERTIFICATE_FILENAME.pem openssl x509 -in SERVER_KEY_FILENAME -outform PEM -out SERVER_KEY_FILENAME.pem openssl x509 -in CA_BUNDLE_FILENAME -outform PEM -out BUNDLE_FILENAME.pem Copy If your certificate/key pair are in another format, see the Digicert knowledgebase for more help. Create the TLS secret with the signed certificate/key pair, and patch the mutating webhook configuration with the CA using the following commands: kubectl create secret tls newrelic-metadata-injection-admission \\ --key=PEM_ENCODED_SERVER_KEY \\ --cert=PEM_ENCODED_CERTIFICATE \\ --dry-run -o yaml | kubectl -n newrelic apply -f - caBundle=$(cat PEM_ENCODED_CA_BUNDLE | base64 | td -d $'\\n') kubectl patch mutatingwebhookconfiguration newrelic-metadata-injection-cfg --type='json' -p \"[{'op': 'replace', 'path': '/webhooks/0/clientConfig/caBundle', 'value':'${caBundle}'}]\" Copy Important Certificates signed by Kubernetes have an expiration of one year. For more information, see the Kubernetes source code in GitHub. Validate the injection of metadata In order to validate that the webhook (responsible for injecting the metadata) was installed correctly, deploy a new pod and check for the New Relic environment variables. Create a dummy pod containing Busybox by running: kubectl create -f https://git.io/vPieo Copy Check if New Relic environment variables were injected: kubectl exec busybox0 -- env | grep NEW_RELIC_METADATA_KUBERNETES NEW_RELIC_METADATA_KUBERNETES_CLUSTER_NAME=fsi NEW_RELIC_METADATA_KUBERNETES_NODE_NAME=nodea NEW_RELIC_METADATA_KUBERNETES_NAMESPACE_NAME=default NEW_RELIC_METADATA_KUBERNETES_POD_NAME=busybox0 NEW_RELIC_METADATA_KUBERNETES_CONTAINER_NAME=busybox Copy Disable the injection of metadata To disable/uninstall the injection of metadata, use the following commands: Delete the Kubernetes objects using the yaml file: kubectl delete -f k8s-metadata-injection-latest.yaml Copy Delete the TLS secret containing the certificate/key pair: kubectl delete secret/newrelic-metadata-injection-secret Copy Troubleshooting Follow these troubleshooting tips as needed. No Kubernetes metadata in APM or distributed tracing transactions Problem The creation of the secret by the k8s-webhook-cert-manager job used to fail due to the kubectl version used by the image when running in Kubernetes version 1.19.x, The new version 1.3.2 fixes this issue, therefore it is enough to run again the job using an update version of the image to fix the issue. Solution Update the image k8s-webhook-cert-manager (to a version >= 1.3.2) and re-run the job. The secret will be correctly created and the k8s-metadata-injection pod will be able to start. Note that the new version of the manifest and of the nri-bundle are already updated with the correct version of the image. Problem In OpenShift version 4.x, the CA that is used in order to patch the mutatingwebhookconfiguration resource is not the one used when signing the certificates. This is a known issue currently tracked here. In the logs of the Pod nri-metadata-injection, you'll see the following error message: TLS handshake error from 10.131.0.29:37428: remote error: tls: unknown certificate authority TLS handshake error from 10.129.0.1:49314: remote error: tls: bad certificate Copy Workaround Manually update the certificate stored in the mutatingwebhookconfiguration object. The correct CA locations might change according to the cluster configuration. However, you can usually find the CA in the secret csr-signer in the namespace openshift-kube-controller-manager. Problem There is no Kubernetes metadata included in the transactions' attributes of your APM agent or in distributed tracing. Solution Verify that the environment variables are being correctly injected by following the instructions described in the Validate your installation step. If they are not present, get the name of the metadata injection pod by running: kubectl get pods | grep newrelic-metadata-injection-deployment kubectl logs -f pod/podname Copy In another terminal, create a new pod (for example, see Validate your installation), and inspect the logs of the metadata injection deployment for errors. For every created pod there should be a set of 4 new entries in the logs like: {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.107Z\",\"caller\":\"server/main.go:139\",\"msg\":\"POST https://newrelic-metadata-injection-svc.default.svc:443/mutate?timeout=30s HTTP/2.0\\\" from 10.11.49.2:32836\"} {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.110Z\",\"caller\":\"server/webhook.go:168\",\"msg\":\"received admission review\",\"kind\":\"/v1, Kind=Pod\",\"namespace\":\"default\",\"name\":\"\",\"pod\":\"busybox1\",\"UID\":\"6577519b-7a61-11ea-965e-0e46d1c9335c\",\"operation\":\"CREATE\",\"userinfo\":{\"username\":\"admin\",\"uid\":\"admin\",\"groups\":[\"system:masters\",\"system:authenticated\"]}} {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.111Z\",\"caller\":\"server/webhook.go:182\",\"msg\":\"admission response created\",\"response\":\"[{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env\\\",\\\"value\\\":[{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_CLUSTER_NAME\\\",\\\"value\\\":\\\"adn_kops\\\"}]},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_NODE_NAME\\\",\\\"valueFrom\\\":{\\\"fieldRef\\\":{\\\"fieldPath\\\":\\\"spec.nodeName\\\"}}}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_NAMESPACE_NAME\\\",\\\"valueFrom\\\":{\\\"fieldRef\\\":{\\\"fieldPath\\\":\\\"metadata.namespace\\\"}}}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_POD_NAME\\\",\\\"valueFrom\\\":{\\\"fieldRef\\\":{\\\"fieldPath\\\":\\\"metadata.name\\\"}}}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_CONTAINER_NAME\\\",\\\"value\\\":\\\"busybox\\\"}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_CONTAINER_IMAGE_NAME\\\",\\\"value\\\":\\\"busybox\\\"}}]\"} {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.111Z\",\"caller\":\"server/webhook.go:257\",\"msg\":\"writing response\"} Copy If there are no new entries on the logs, it means that the apiserver is not being able to communicate with the webhook service, this could be due to networking rules or security groups rejecting the communication. To check if the apiserver is not being able to communicate with the webhook you should inspect the apiserver logs for errors like: failed calling webhook \"metadata-injection.newrelic.com\": ERROR_REASON Copy To get the apiserver logs: Start a proxy to the Kubernetes API server by the executing the following command in a terminal window and keep it running. kubectl proxy --port=8001 Copy Create a new pod in your cluster, this will make the apiserver try to communicate with the webhook. The following command will create a busybox. kubectl create -f https://git.io/vPieo Copy Retrieve the apiserver logs. curl localhost:8001/logs/kube-apiserver.log > apiserver.log Copy Delete the busybox container. kubectl delete -f https://git.io/vPieo Copy Inspect the logs for errors. grep -E 'failed calling webhook' apiserver.log Copy Remember that one of the requirements for the metadata injection is that the apiserver must be allowed egress to the pods running on the cluster. If you encounter errors regarding connection timeouts or failed connections, make sure to check the security groups and firewall rules of the cluster. If there are no log entries in either the apiserver logs or the metadata injection deployment, it means that the webhook was not properly registered. Ensure the metadata injection setup job ran successfully by inspecting the output of: kubectl get job newrelic-metadata-setup Copy If the job is not completed, investigate the logs of the setup job: kubectl logs job/newrelic-metadata-setup Copy Ensure the CertificateSigningRequest is approved and issued by running: kubectl get csr newrelic-metadata-injection-svc.default Copy Ensure the TLS secret is present by running: kubectl get secret newrelic-metadata-injection-secret Copy Ensure the CA bundle is present in the mutating webhook configuration: kubectl get mutatingwebhookconfiguration newrelic-metadata-injection-cfg -o json Copy Ensure the TargetPort of the Service resource matches the Port of the Deployment's container: kubectl describe service/newrelic-metadata-injection-svc kubectl describe deployment/newrelic-metadata-injection-deployment Copy", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 239.92471, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "title": "Link your applications to Kubernetes", - "sections": "Link your applications to Kubernetes", - "tags": "Kubernetes integration", - "body": " the following commands: Delete the Kubernetes objects using the yaml file: kubectl delete -f k8s-metadata-injection-latest.yaml Copy Delete the TLS secret containing the certificate/key pair: kubectl delete secret/newrelic-metadata-injection-secret Copy Troubleshooting Follow these troubleshooting tips" - }, - "id": "617daead28ccbc662b7ffe23" - }, { "sections": [ "Kubernetes Data Ingest Analysis", @@ -30331,7 +30251,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 235.1036, + "_score": 235.28261, "_version": null, "_explanation": null, "sort": null, @@ -30386,7 +30306,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 233.32883, + "_score": 228.60898, "_version": null, "_explanation": null, "sort": null, @@ -30397,6 +30317,47 @@ "body": " metadata in APM or distributed tracing transactions Problem The creation of the secret by the k8s-webhook-cert-manager job used to fail due to the kubectl version used by the image when running in Kubernetes version 1.19.x, The new version 1.3.2 fixes this issue, therefore it is enough to run again" }, "id": "61fd3c9d196a675ff3e80980" + }, + { + "sections": [ + "Not seeing control plane data", + "Problem", + "Solution", + "Check that the master nodes have the correct labels", + "Check that the integration is running on the master nodes", + "Check that the control plane components have the required labels", + "Retrieve the verbose logs of one of the integrations running on a master node and check for the control plane components jobs", + "Manually query the metrics of the components" + ], + "title": "Not seeing control plane data", + "type": "docs", + "tags": [ + "Troubleshooting", + "Kubernetes integration", + "Integrations" + ], + "external_id": "02edaca82526773fcaef4adb97825d2349a404b4", + "image": "", + "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/troubleshooting/not-seeing-control-plane-data/", + "published_at": "2022-02-15T20:58:20Z", + "updated_at": "2021-10-24T03:07:45Z", + "document_type": "troubleshooting_doc", + "popularity": 1, + "body": "Problem You have completed the installation procedure for New Relic's Kubernetes integration, you are seeing Kubernetes data in your New Relic account but there is no data from any of the control plane components. Solution Check that the master nodes have the correct labels Execute the following commands to manually find the master nodes: kubectl get nodes -l node-role.kubernetes.io/master=\"\" Copy kubectl get nodes -l kubernetes.io/role=\"master\" Copy If the master nodes follow the labeling convention defined in the discovery of master nodes and control plane components documentation section, you should get some output like: NAME STATUS ROLES AGE VERSION ip-10-42-24-4.ec2.internal Ready master 42d v1.14.8 Copy If no nodes are found, there are two scenarios: Your master nodes don’t have the required labels that identify them as masters, in this case you need to add both labels to your master nodes. You’re in a managed cluster and your provider is handling the master nodes for you. In this case there is nothing you can do, since your provider is limiting the access to those nodes. Check that the integration is running on the master nodes Replace the placeholder in the following command with one of the node names returned in the previous step to get an integration pod running on a master node: kubectl get pods --field-selector spec.nodeName=NODE_NAME -l name=newrelic-infra --all-namespaces Copy The next command is the same, just that it selects the node for you: kubectl get pods --field-selector spec.nodeName=$(kubectl get nodes -l node-role.kubernetes.io/master=\"\" -o jsonpath=\"{.items[0].metadata.name}\") -l name=newrelic-infra --all-namespaces Copy If everything is correct you should get some output like: NAME READY STATUS RESTARTS AGE newrelic-infra-whvzt 1/1 Running 0 6d20h Copy If the integration is not running on your master nodes, check that the daemonset has all the desired instances running and ready. kubectl get daemonsets -l app=newrelic-infra --all-namespaces Copy Check that the control plane components have the required labels Refer to the discovery of master nodes and control plane components documentation section and look for the labels the integration uses to discover the components. Then run the following commands to see if there are any pods with such labels and the nodes where they are running: kubectl get pods -l k8s-app=kube-apiserver --all-namespaces Copy If there is component with the given label you should see something like: NAMESPACE NAME READY STATUS RESTARTS AGE kube-system kube-apiserver-ip-10-42-24-42.ec2.internal 1/1 Running 3 49d Copy The same should be done with the rest of the components: kubectl get pods -l k8s-app=etcd-manager-main --all-namespaces Copy kubectl get pods -l k8s-app=kube-scheduler --all-namespaces Copy kubectl get pods -l k8s-app=kube-kube-controller-manager --all-namespaces Copy Retrieve the verbose logs of one of the integrations running on a master node and check for the control plane components jobs To retrieve the logs, follow the instructions on get logs from pod running on a master node. The integration logs for every component the following message “Running job: COMPONENT_NAME”. Ex: Running job: scheduler Copy Running job: etcd Copy Running job: controller-manager Copy Running job: api-server Copy If you didn’t specify the ETCD_TLS_SECRET_NAME configuration option you’ll find the following message in the logs: Skipping job creation for component etcd: etcd requires TLS configuration, none given Copy If any error occurs while querying the metrics of any component it will be logged after the Running job message. Manually query the metrics of the components Refer to the discovery of master nodes and control plane components documentation section to get the endpoint of the control plane component you want to query. With the endpoint we can use the integration pod that’s running on the same node as the component to query. The following are examples on how to query the Kubernetes scheduler: kubectl exec -ti POD_NAME -- wget -O - localhost:10251/metrics Copy The following command does the same, but also chooses the pod for you: kubectl exec -ti $(kubectl get pods --all-namespaces --field-selector spec.nodeName=$(kubectl get nodes -l node-role.kubernetes.io/master=\"\" -o jsonpath=\"{.items[0].metadata.name}\") -l name=newrelic-infra -o jsonpath=\"{.items[0].metadata.name}\") -- wget -O - localhost:10251/metrics Copy If everything is correct you should get some metrics on the Prometheus format, something like: Connecting to localhost:10251 (127.0.0.1:10251) # HELP apiserver_audit_event_total Counter of audit events generated and sent to the audit backend. # TYPE apiserver_audit_event_total counter apiserver_audit_event_total 0 # HELP apiserver_audit_requests_rejected_total Counter of apiserver requests rejected due to an error in audit logging backend. # TYPE apiserver_audit_requests_rejected_total counter apiserver_audit_requests_rejected_total 0 # HELP apiserver_client_certificate_expiration_seconds Distribution of the remaining lifetime on the certificate used to authenticate a request. # TYPE apiserver_client_certificate_expiration_seconds histogram apiserver_client_certificate_expiration_seconds_bucket{le=\"0\"} 0 apiserver_client_certificate_expiration_seconds_bucket{le=\"1800\"} 0 apiserver_client_certificate_expiration_seconds_bucket{le=\"3600\"} 0 Copy", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 222.34695, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "title": "Not seeing control plane data", + "sections": "Not seeing control plane data", + "tags": "Kubernetes integration", + "body": "-manager-main --all-namespaces Copy kubectl get pods -l k8s-app=kube-scheduler --all-namespaces Copy kubectl get pods -l k8s-app=kube-kube-controller-manager --all-namespaces Copy Retrieve the verbose logs of one of the integrations running on a master node and check for the control plane components" + }, + "id": "617daf22196a67f585f7e101" } ], "/google-cloud-sql/139a99a9-d9b8-4b17-96b0-114c4434a023": [ @@ -30423,7 +30384,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 373.58154, + "_score": 351.044, "_version": null, "_explanation": null, "sort": null, @@ -30466,7 +30427,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 358.31213, + "_score": 332.26825, "_version": null, "_explanation": null, "sort": null, @@ -30505,7 +30466,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 343.37973, + "_score": 318.34332, "_version": null, "_explanation": null, "sort": null, @@ -30550,7 +30511,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 337.73837, + "_score": 317.3634, "_version": null, "_explanation": null, "sort": null, @@ -30590,7 +30551,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 334.72916, + "_score": 314.4875, "_version": null, "_explanation": null, "sort": null, @@ -30627,7 +30588,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 373.58154, + "_score": 351.0442, "_version": null, "_explanation": null, "sort": null, @@ -30670,7 +30631,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 358.31213, + "_score": 332.2685, "_version": null, "_explanation": null, "sort": null, @@ -30709,7 +30670,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 343.37973, + "_score": 318.34357, "_version": null, "_explanation": null, "sort": null, @@ -30754,7 +30715,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 337.73837, + "_score": 317.3636, "_version": null, "_explanation": null, "sort": null, @@ -30794,7 +30755,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 334.72916, + "_score": 314.48767, "_version": null, "_explanation": null, "sort": null, @@ -30831,7 +30792,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 373.58154, + "_score": 351.044, "_version": null, "_explanation": null, "sort": null, @@ -30874,7 +30835,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 358.31213, + "_score": 332.26825, "_version": null, "_explanation": null, "sort": null, @@ -30913,7 +30874,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 343.37973, + "_score": 318.34332, "_version": null, "_explanation": null, "sort": null, @@ -30958,7 +30919,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 337.73837, + "_score": 317.3634, "_version": null, "_explanation": null, "sort": null, @@ -30998,7 +30959,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 334.72916, + "_score": 314.4875, "_version": null, "_explanation": null, "sort": null, @@ -31044,7 +31005,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 270.62357, + "_score": 255.00923, "_version": null, "_explanation": null, "sort": null, @@ -31090,7 +31051,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 163.64397, + "_score": 152.59059, "_version": null, "_explanation": null, "sort": null, @@ -31148,7 +31109,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 87.59265, + "_score": 87.20079, "_version": null, "_explanation": null, "sort": null, @@ -31158,96 +31119,74 @@ "id": "617dacb6196a676aaef7dcf3" }, { + "image": "", + "url": "https://docs.newrelic.com/docs/release-notes/infrastructure-release-notes/cloud-integration-release-notes/bug-fixes-google-cloud-integrations/", "sections": [ - "Azure Redis Cache monitoring integration", - "Features", - "Activate integration", - "Configuration and polling", - "Find and use data", - "Metric data", - "Inventory data", - "EOL NOTICE", - "azure/rediscache/cache", - "azure/rediscache/shard" + "Bug fixes for Google Cloud integrations", + "Bug fixes" ], - "title": "Azure Redis Cache monitoring integration", + "published_at": "2022-02-14T12:07:31Z", + "title": "Bug fixes for Google Cloud integrations", + "updated_at": "2021-03-16T16:57:40Z", "type": "docs", - "tags": [ - "Azure integrations list", - "Microsoft Azure integrations", - "Integrations" - ], - "external_id": "86c0cb0a5eed3ffdcc00c41175da97e07179b81f", - "image": "", - "url": "https://docs.newrelic.com/docs/infrastructure/microsoft-azure-integrations/azure-integrations-list/azure-redis-cache-monitoring-integration/", - "published_at": "2022-02-15T21:15:43Z", - "updated_at": "2022-02-15T21:15:43Z", - "document_type": "page", + "external_id": "f20f2e1bef25ed423db48fc91f1b0e1a2590fe16", + "document_type": "release_notes", "popularity": 1, - "body": "New Relic infrastructure monitoring provides an integration for Microsoft Azure Redis Cache that reports data to New Relic. This document explains how to activate this integration and describes the data that can be captured. Features New Relic's integration for Azure Redis Cache reports metrics about your Azure Redis Cache service, such as the number of connected clients, the number of GET and SET commands used, and write/read bytes per second. It also collects data about the status and configuration of the service. You can monitor and alert on your Azure Redis Cache data from New Relic Infrastructure UI, and you can create custom queries and chart dashboards. Activate integration To enable this integration, follow standard procedures to activate your Azure service in New Relic. Configuration and polling You can change the polling frequency and filter data using configuration options. Default polling information for the Azure Functions integration: Default polling interval: 5 minutes Resolution: 1 data point per minute Find and use data To find your integration data, go to one.newrelic.com > Infrastructure > Azure and select the Azure Redis Cache integration. You can query and explore your data using the AzureRedisCacheSample event type and, when shards are configured in the service, the AzureRedisCacheShardSample event type. For more on how to find and use integration data, see Understand and use data. Metric data To view metrics reported by the Azure Redis Cache integration, query the AzureRedisCacheSample or AzureRedisCacheShardSample event type. Use the metadata associated with each metric to filter and facet the data being reported. For detailed metric information, see the Azure supported metrics documentation. For more details, see the Azure Redis Cache documentation. Metric Description Metadata cacheHits Number of successful key lookups. shardId cacheMisses Number of failed key lookups. shardId cacheReadBytesPerSecond Amount of data read from the cache in megabytes per second. shardId cacheWriteBytesPerSecond Amount of data written to the cache in megabytes per second. shardId connectedClients Number of client connections to the cache. shardId evictedKeys Number of items evicted from the cache due to the maxmemory limit. shardId expiredKeys Number of items expired from the cache. shardId getCommands Number of get operations from the cache. shardId operationsPerSecond Number of commands processed per second by the cache server. shardId processorTimePercent The CPU utilization of the Azure Redis Cache server as a percentage. shardId serverLoadPercent Percentage of cycles in which the Redis server is busy processing and not waiting idle for messages. shardId setCommands Number of set operations to the cache. shardId totalCommandsProcessed Total number of commands processed by the cache. shardId totalKeys Maximum number of keys in the cache for a given time period. shardId usedMemoryBytes Amount of cache memory used for key/value pairs in the cache, in megabytes. shardId usedMemoryRssBytes Amount of cache memory used, including fragmentation and metadata, in megabytes. shardId Inventory data EOL NOTICE After March 2022, we're discontinuing support for several capabilities, including inventory data for cloud integrations. For more details, including how you can easily prepare for this transition, see our Explorers Hub post. Inventory data is information about your system's state and configuration. This integration reports the following data: azure/rediscache/cache isPremium provisioningState hostName port sslPort redisVersion nonSslPort shardCount subnetId staticIP configuration skuName skuFamily skuCapacity id name type regionName tags azure/rediscache/shard Only some premium Redis Cache accounts will generate shard data. If that data is present, shardName will be reported in addition to the standard /cache inventory data.", + "body": "Bug fixes Some Google Kubernetes Engine entities had duplicate identifiers. In order to fix this issue, New Relic has regenerated the internal entityId and externalKey attributes for all containers in Insights events and Infrastructure inventory for this cloud integration. This change doesn't have any impact either on the integration default dashboard provided by New Relic Infrastructure or on the Insights queries. However, all entities related to Google Kubernetes Engine will be created again in Inventory. Accordingly, you might see duplicated entities for 48 hours, until the old identifiers expire. This might also cause an unexpected volume of Entity created events. Metadata for Google Compute Engine virtual machines was not being added to the metric events reported by the New Relic Infrastructure agent. In order to fix this issue, New Relic has regenerated the internal externalKey attribute for all virtual machines and disks in Insights events and Infrastructure inventory for this cloud integration. This change doesn't have any impact either on the integration default dashboard provided by New Relic Infrastructure or on the Insights queries. However, all entities related to Google Compute Engine will be created again in Inventory. Accordingly you might see duplicated entities for 48 hours, until the old identifiers expire. This might also cause an unexpected volume of Entity created events. For Google Cloud integrations, the value of the zone attribute was not reported consistently. As a consequence, some inventory attributes and event metadata, such as project, were not reported for some Google Cloud Storage buckets.", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 82.06964, + "_score": 77.90752, "_version": null, "_explanation": null, "sort": null, "highlight": { - "title": "Azure Redis Cache monitoring integration", - "sections": "Azure Redis Cache monitoring integration", - "tags": "Azure integrations list", - "body": "New Relic infrastructure monitoring provides an integration for Microsoft Azure Redis Cache that reports data to New Relic. This document explains how to activate this integration and describes the data that can be captured. Features New Relic's integration for Azure Redis Cache reports metrics" + "body": " expire. This might also cause an unexpected volume of Entity created events. Metadata for Google Compute Engine virtual machines was not being added to the metric events reported by the New Relic Infrastructure agent. In order to fix this issue, New Relic has regenerated the internal externalKey" }, - "id": "617da9a1e7b9d2841ec05b36" + "id": "603e8d90196a673bf1a83dab" }, { "sections": [ - "Azure Service Bus monitoring integration", + "Azure Redis Cache monitoring integration", "Features", "Activate integration", "Configuration and polling", "Find and use data", "Metric data", - "Namespace sample metrics", - "Queue sample metrics", - "Subscription sample metrics", - "Topic sample metrics", "Inventory data", "EOL NOTICE", - "Namespace inventory data", - "Queue inventory data", - "Subscription inventory data", - "Topic inventory data" + "azure/rediscache/cache", + "azure/rediscache/shard" ], - "title": "Azure Service Bus monitoring integration", + "title": "Azure Redis Cache monitoring integration", "type": "docs", "tags": [ "Azure integrations list", "Microsoft Azure integrations", "Integrations" ], - "external_id": "9d7f13f6d4df865f6c5a8b868c00407963e6e661", + "external_id": "86c0cb0a5eed3ffdcc00c41175da97e07179b81f", "image": "", - "url": "https://docs.newrelic.com/docs/infrastructure/microsoft-azure-integrations/azure-integrations-list/azure-service-bus-monitoring-integration/", - "published_at": "2022-02-15T19:41:34Z", - "updated_at": "2022-02-15T19:41:34Z", + "url": "https://docs.newrelic.com/docs/infrastructure/microsoft-azure-integrations/azure-integrations-list/azure-redis-cache-monitoring-integration/", + "published_at": "2022-02-15T21:15:43Z", + "updated_at": "2022-02-15T21:15:43Z", "document_type": "page", "popularity": 1, - "body": "New Relic infrastructure monitoring provides an integration for Microsoft Azure Service Bus that reports data from your Azure Service Bus service to New Relic. This document explains how to activate this integration and describes the data that can be captured. Features New Relic's integration for Azure Service Bus reports metric data about your Service Bus service, like the number of messages in the queue, the number of dead letter messages, and the byte size of the queue. It also collects data about the status and configuration of the service. You can monitor and alert on your Azure Service Bus data from New Relic, and you can create custom queries and chart dashboards. Activate integration To enable this integration follow standard procedures to activate your Azure service in New Relic. Configuration and polling You can change the polling frequency and filter data using configuration options. Default polling information for the Service Bus integration: Polling interval: 5 minutes Resolution: 1 data point per minute Find and use data To find your integration data, go to one.newrelic.com > Infrastructure > Azure and look for the integration. You can query and explore your data using these event types: Namespace data: AzureServiceBusNamespaceSample Queue data: AzureServiceBusQueueSample Subscription data: AzureServiceBusSubscriptionSample Topic data: AzureServiceBusTopicSample For more on how to find and use integration data, see Understand and use data. Metric data There are several sets of metrics reported by the Azure Service Bus integration. For more about how data is structured and reported to New Relic, see Understand and use integration data. Namespace sample metrics Namespace metrics are only collected if you have the Service Bus Messaging Premium tier. Metric Description successfulRequests Total successful requests for a namespace. serverErrors Server errors for Microsoft.ServiceBus. userErrors User errors for Microsoft.ServiceBus. throttledRequests Throttled requests for Microsoft.ServiceBus. incomingRequests Incoming requests for Microsoft.ServiceBus. incomingMessages Incoming messages for Microsoft.ServiceBus. outgoingMessages Outgoing messages for Microsoft.ServiceBus. activeConnections Total active connections for Microsoft.ServiceBus. connectionsOpened Count of open connections for Microsoft.ServiceBus. connectionsClosed Count of closed connections for Microsoft.ServiceBus. sizeBytes Size of a queue/topic in Bytes. messages Count of messages in a Queue/Topic. activeMessages Count of active messages in a Queue/Topic. deadletteredMessages The total number of messages that are dead lettered. scheduledMessages The total number of scheduled messages. CPUXNS Service Bus premium namespace CPU usage metric indicating maximum percentage used. WSXNS Service Bus premium namespace memory usage metric indicating maximum percentage used. namespaceMemoryUsage Service bus premium namespace memory usage percentage. Queue sample metrics Metric Description successfulRequests Total successful requests for a namespace. serverErrors Server errors for Microsoft.ServiceBus. userErrors User errors for Microsoft.ServiceBus. throttledRequests Throttled requests for Microsoft.ServiceBus. incomingRequests Incoming requests for Microsoft.ServiceBus. incomingMessages Incoming messages for Microsoft.ServiceBus. outgoingMessages Outgoing messages for Microsoft.ServiceBus. connectionsOpened Count of open connections for Microsoft.ServiceBus. connectionsClosed Count of closed connections for Microsoft.ServiceBus. sizeBytes Size of a queue in bytes. messages Count of messages in a queue/topic. activeMessages Count of active messages in a queue/topic. deadletteredMessages The total number of messages that are dead lettered. scheduledMessages The total number of scheduled messages. Subscription sample metrics Metric Description activeMessages Total number of active messages in the queue, topic, or subscription. deadLetterMessages The total number of messages that are dead lettered. messages The total number of messages in the queue. scheduledMessages The total number of scheduled messages. transferDeadLetterMessages The total number of messages transferred into dead letters. transferMessages The total number of messages transferred to another queue, topic, or subscription. Topic sample metrics Metric Description successfulRequests Total successful requests for a namespace. serverErrors Server errors for Microsoft.ServiceBus. userErrors User errors for Microsoft.ServiceBus. throttledRequests Throttled requests for Microsoft.ServiceBus. incomingRequests Incoming requests for Microsoft.ServiceBus. incomingMessages Incoming messages for Microsoft.ServiceBus. outgoingMessages Outgoing messages for Microsoft.ServiceBus. connectionsOpened Count of open connections for Microsoft.ServiceBus. connectionsClosed Count of closed connections for Microsoft.ServiceBus. sizeBytes Size of a topic in bytes. messages Count of messages in a queue/topic. activeMessages Count of active messages in a queue/topic. deadletteredMessages The total number of messages that are dead lettered. scheduledMessages The total number of scheduled messages. Inventory data EOL NOTICE After March 2022, we're discontinuing support for several capabilities, including inventory data for cloud integrations. For more details, including how you can easily prepare for this transition, see our Explorers Hub post. Inventory data is information about your system's state and configuration. For details on how to find and use inventory data, see Understand and use data. The Azure Service Bus integration reports this inventory data: Namespace inventory data azure/servicebus/namespace inventory: dnsLabel fqdn resourceGroupName (deprecates ResourceGroup) skuCapacity skuName skuTier Queue inventory data azure/servicebus/queue inventory: batchedOperationsEnabled deadLetteringEnabledForExpiredMessages defaultMessageTtlDuration deleteOnIdleDurationInMinutes duplicateDetectionEnabled duplicateMessageDetectionHistoryDuration expressEnabled lockDurationInSeconds maxDeliveryCountBeforeDeadLetteringMessage partitioningEnabled sessionEnabled status Subscription inventory data azure/servicebus/subscription inventory: batchedOperationsEnabled deadLetteringEnabledForExpiredMessages deadLetteringEnabledForFilterEvaluationFailedMessages defaultMessageTtlDuration deleteOnIdleDurationInMinutes lockDurationInSeconds maxDeliveryCountBeforeDeadLetteringMessage resourceGroupName sessionEnabled status Topic inventory data azure/servicebus/topic inventory: batchedOperationsEnabled defaultMessageTtlDuration deleteOnIdleDurationInMinutes duplicateDetectionEnabled duplicateMessageDetectionHistoryDuration expressEnabled partitioningEnabled resourceGroupName status", + "body": "New Relic infrastructure monitoring provides an integration for Microsoft Azure Redis Cache that reports data to New Relic. This document explains how to activate this integration and describes the data that can be captured. Features New Relic's integration for Azure Redis Cache reports metrics about your Azure Redis Cache service, such as the number of connected clients, the number of GET and SET commands used, and write/read bytes per second. It also collects data about the status and configuration of the service. You can monitor and alert on your Azure Redis Cache data from New Relic Infrastructure UI, and you can create custom queries and chart dashboards. Activate integration To enable this integration, follow standard procedures to activate your Azure service in New Relic. Configuration and polling You can change the polling frequency and filter data using configuration options. Default polling information for the Azure Functions integration: Default polling interval: 5 minutes Resolution: 1 data point per minute Find and use data To find your integration data, go to one.newrelic.com > Infrastructure > Azure and select the Azure Redis Cache integration. You can query and explore your data using the AzureRedisCacheSample event type and, when shards are configured in the service, the AzureRedisCacheShardSample event type. For more on how to find and use integration data, see Understand and use data. Metric data To view metrics reported by the Azure Redis Cache integration, query the AzureRedisCacheSample or AzureRedisCacheShardSample event type. Use the metadata associated with each metric to filter and facet the data being reported. For detailed metric information, see the Azure supported metrics documentation. For more details, see the Azure Redis Cache documentation. Metric Description Metadata cacheHits Number of successful key lookups. shardId cacheMisses Number of failed key lookups. shardId cacheReadBytesPerSecond Amount of data read from the cache in megabytes per second. shardId cacheWriteBytesPerSecond Amount of data written to the cache in megabytes per second. shardId connectedClients Number of client connections to the cache. shardId evictedKeys Number of items evicted from the cache due to the maxmemory limit. shardId expiredKeys Number of items expired from the cache. shardId getCommands Number of get operations from the cache. shardId operationsPerSecond Number of commands processed per second by the cache server. shardId processorTimePercent The CPU utilization of the Azure Redis Cache server as a percentage. shardId serverLoadPercent Percentage of cycles in which the Redis server is busy processing and not waiting idle for messages. shardId setCommands Number of set operations to the cache. shardId totalCommandsProcessed Total number of commands processed by the cache. shardId totalKeys Maximum number of keys in the cache for a given time period. shardId usedMemoryBytes Amount of cache memory used for key/value pairs in the cache, in megabytes. shardId usedMemoryRssBytes Amount of cache memory used, including fragmentation and metadata, in megabytes. shardId Inventory data EOL NOTICE After March 2022, we're discontinuing support for several capabilities, including inventory data for cloud integrations. For more details, including how you can easily prepare for this transition, see our Explorers Hub post. Inventory data is information about your system's state and configuration. This integration reports the following data: azure/rediscache/cache isPremium provisioningState hostName port sslPort redisVersion nonSslPort shardCount subnetId staticIP configuration skuName skuFamily skuCapacity id name type regionName tags azure/rediscache/shard Only some premium Redis Cache accounts will generate shard data. If that data is present, shardName will be reported in addition to the standard /cache inventory data.", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.643425, + "_score": 76.61224, "_version": null, "_explanation": null, "sort": null, "highlight": { - "title": "Azure Service Bus monitoring integration", - "sections": "Azure Service Bus monitoring integration", + "title": "Azure Redis Cache monitoring integration", + "sections": "Azure Redis Cache monitoring integration", "tags": "Azure integrations list", - "body": "New Relic infrastructure monitoring provides an integration for Microsoft Azure Service Bus that reports data from your Azure Service Bus service to New Relic. This document explains how to activate this integration and describes the data that can be captured. Features New Relic's integration" + "body": "New Relic infrastructure monitoring provides an integration for Microsoft Azure Redis Cache that reports data to New Relic. This document explains how to activate this integration and describes the data that can be captured. Features New Relic's integration for Azure Redis Cache reports metrics" }, - "id": "617d54a6e7b9d28fbdc06271" + "id": "617da9a1e7b9d2841ec05b36" } ], "/google-cloud-spanner/98957106-b7dc-43d7-99c7-f367a9879813": [ @@ -31274,7 +31213,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 373.58154, + "_score": 351.044, "_version": null, "_explanation": null, "sort": null, @@ -31317,7 +31256,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 358.31213, + "_score": 332.26825, "_version": null, "_explanation": null, "sort": null, @@ -31356,7 +31295,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 343.37973, + "_score": 318.34332, "_version": null, "_explanation": null, "sort": null, @@ -31401,7 +31340,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 337.73837, + "_score": 317.3634, "_version": null, "_explanation": null, "sort": null, @@ -31441,7 +31380,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 334.72916, + "_score": 314.4875, "_version": null, "_explanation": null, "sort": null, @@ -31457,207 +31396,208 @@ "/msmq/8a2e0510-8a26-4a61-bd41-512dff1e2f01": [ { "sections": [ - "Amazon SQS monitoring integration", - "Important", - "Features", - "Activate integration", - "Configuration and polling", - "Find and use data", - "Metric data", - "Inventory data", - "EOL NOTICE", - "Tip" + "RestSharp", + "What's included", + ".NET", + "Apdex Score", + "Memory Usage", + "Transaction Errors", + "RestSharp installation docs", + "What is RestSharp?", + "Get started!", + "More info" ], - "title": "Amazon SQS monitoring integration", - "type": "docs", + "title": "RestSharp", + "type": "quickstarts", "tags": [ - "AWS integrations list", - "Amazon integrations", - "Integrations" + "apm", + "dotnet" ], - "external_id": "24611759fe9586140c3f3dbf7b397977c1b6d249", - "image": "", - "url": "https://docs.newrelic.com/docs/infrastructure/amazon-integrations/aws-integrations-list/amazon-sqs-monitoring-integration/", - "published_at": "2022-02-15T09:49:25Z", - "updated_at": "2022-02-15T09:49:25Z", + "quick_start_name": "RestSharp", + "external_id": "8cef0b354cf8360c39901f03d075f8a175120920", + "image": "https://raw.githubusercontent.com/newrelic/newrelic-quickstarts/v0.96.0/quickstarts/dotnet/restsharp/dashboards/dotnet.png", + "url": "https://developer.newrelic.com/instant-observability/restsharp/4967824f-1a29-417f-88f2-093d30a41fc2/", + "published_at": "2022-02-07T01:42:21Z", + "updated_at": "2021-10-30T01:40:22Z", "document_type": "page", "popularity": 1, - "body": "Important Enable the AWS CloudWatch Metric Streams integration to monitor all CloudWatch metrics from your AWS services, including custom namespaces. Individual integrations are no longer our recommended option. New Relic infrastructure integrations include an integration for reporting your Amazon SQS data to New Relic. This document explains how to activate the integration and describes the data reported. Features Amazon Simple Queue Service (Amazon SQS) provides hosted queues for storing messages in transit. New Relic's SQS integration reports queue-related data such as the count of messages delivered, delayed, and received, message size, and other metrics and configuration/inventory data. SQS data is available in pre-built dashboards and you can also create custom queries and charts in New Relic One. You can also create alert conditions to notify you of changes in SQS data. Activate integration To enable this integration follow standard procedures to Connect AWS services to New Relic. Configuration and polling You can change the polling frequency and filter data using configuration options. Default polling information for the Amazon SQS integration: New Relic polling interval: 5 minutes Amazon CloudWatch data interval: 1 minute Find and use data To find your integration data, go to one.newrelic.com > Infrastructure > AWS and select one of the SQS integration links. You can query and explore your data using the QueueSample event type, with a provider value of SqsQueue. For more on how to use your data, see Understand integration data. Metric data This integration collects the following Amazon SQS metrics. For full metric descriptions, see Amazon's SQS documentation. Name Description approximateAgeOfOldestMessage The approximate age of the oldest non-deleted message in the queue, in seconds. For dead letter queues, this value is the longest time that a message has been in the queue. approximateNumberOfMessagesDelayed The approximate number of messages in the queue that are delayed and not available for reading immediately. This can happen when the queue is configured as a delay queue or when a message has been sent with a delay parameter. approximateNumberOfMessagesNotVisible The approximate number of messages that are \"in flight.\" Messages are considered in flight if they have been sent to a client but have not yet been deleted or have not yet reached the end of their visibility window. approximateNumberOfMessagesVisible The approximate number of messages available for retrieval from the queue. numberOfEmptyReceives The number of ReceiveMessage API calls that did not return a message. numberOfMessagesDeleted The number of messages deleted from the queue. numberOfMessagesReceived The number of messages returned by calls to the ReceiveMessage API action. numberOfMessagesSent The number of messages added to a queue. sentMessagesSize The size in bytes of messages added to a queue. Inventory data EOL NOTICE After March 2022, we're discontinuing support for several capabilities, including inventory data for cloud integrations. For more details, including how you can easily prepare for this transition, see our Explorers Hub post. This integration reports the following SQS configuration options as inventory data. For more about inventory data, see Understand integration data. Tip Data indicated with an asterisk * are only fetched if extended inventory collection is on. Name Description awsRegion The AWS region for the queue. delaySeconds* The number of seconds that a message placed into the queue should remain invisible to consumers before becoming available for consumption. Valid values: 0 and 900 (15 minutes). maximumMessageSize* The maximum amount of bytes a message can contain before Amazon SQS rejects it. For more information, see SetQueueAttributes. messageRetentionPeriod* The number of seconds Amazon SQS retains a message. For more information, see SetQueueAttributes. queue The name of the queue, which is a combination of the region, the AWS account ID and the customer specified queue name. For example, in https://sqs.us-east-1.amazonaws.com/01234567890/new-car-orders: us-east-1 is the region 01234567890 is the AWS account ID new-car-orders is the name of the queue queueArn* The Amazon Resource Name (ARN) for the queue. For example, in arn:aws:sqs:us-east-1:01234567890/new-car-orders the ARN is aws. queueUrl The URL for the queue. For example, https://sqs.us-east-1.amazonaws.com/01234567890/new-car-orders. receiveMessageWaitTimeSeconds* The time in seconds for which a ReceiveMessage call will wait for a message to arrive. Valid values: 0 to 20 visibilityTimeout* The visibility timeout for the queue, in seconds. Valid values: 0 to 43200 (12 hours). createdTimestamp* Returns the time when the queue was created in seconds. lastModifiedTimestamp* Returns the time when the queue was last changed in seconds. redrivePolicy* Returns the string that includes the parameters for dead letter queue functionality of the source queue.", + "body": "What's included .NET Dashboard Apdex Score This alert is triggered when the Apdex score is below 0.5 for 5 minutes Alert Memory Usage This alert is triggered when Memory usage is above 90% Alert Transaction Errors This alert is triggered when the the transactions fail more than 10% of the time in 5 minutes. Alert RestSharp installation docs Popular REST API client library for .NET that features auto-serialization, request type detection, a variety of authentications, and more. Doc What is RestSharp? Popular REST API client library for .NET that features auto-serialization, request type detection, a variety of authentications, and more. Get started! Leverage community expertise and instantly get value out of your telemetry data. This quickstart automatically instruments RestSharp with the New Relic .Net agent, and allows you to instantly monitor your .Net application with out-of-the-box dashboards and alerts. Further leverage New Relic's APM capabilities by setting up errors inbox , transaction tracing , and service maps . More info Check out the documentation to learn more about New Relic monitoring for RestSharp. RestSharp quickstart contains 1 dashboard . These interactive visualizations let you easily explore your data, understand context, and resolve problems faster. .NET .NET RestSharp observability quickstart contains 3 alerts . These alerts detect changes in key performance metrics. Integrate these alerts with your favorite tools (like Slack, PagerDuty, etc.) and New Relic will let you know when something needs your attention. Apdex Score Alert Type: STATIC This alert is triggered when the Apdex score is below 0.5 for 5 minutes Memory Usage Alert Type: STATIC This alert is triggered when Memory usage is above 90% Transaction Errors Alert Type: STATIC This alert is triggered when the the transactions fail more than 10% of the time in 5 minutes. RestSharp observability quickstart contains 1 data source . This is how you'll get your data into New Relic. RestSharp installation docs Popular REST API client library for .NET that features auto-serialization, request type detection, a variety of authentications, and more. Docs", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 93.29251, + "_score": 92.367485, "_version": null, "_explanation": null, "sort": null, "highlight": { - "body": " SQS data to New Relic. This document explains how to activate the integration and describes the data reported. Features Amazon Simple Queue Service (Amazon SQS) provides hosted queues for storing messages in transit. New Relic's SQS integration reports queue-related data such as the count" + "tags": "apm", + "body": " dashboards and alerts. Further leverage New Relic's APM capabilities by setting up errors inbox , transaction tracing , and service maps . More info Check out the documentation to learn more about New Relic monitoring for RestSharp. RestSharp quickstart contains 1 dashboard . These interactive" }, - "id": "617da50164441f41c4fbf2ea" + "id": "61566c0b28ccbc5878f2140b" }, { "sections": [ - "RestSharp", + "MonoRail", "What's included", ".NET", "Apdex Score", "Memory Usage", "Transaction Errors", - "RestSharp installation docs", - "What is RestSharp?", + "MonoRail installation docs", + "What is MonoRail?", "Get started!", "More info" ], - "title": "RestSharp", + "title": "MonoRail", "type": "quickstarts", "tags": [ "apm", "dotnet" ], - "quick_start_name": "RestSharp", - "external_id": "8cef0b354cf8360c39901f03d075f8a175120920", - "image": "https://raw.githubusercontent.com/newrelic/newrelic-quickstarts/v0.96.0/quickstarts/dotnet/restsharp/dashboards/dotnet.png", - "url": "https://developer.newrelic.com/instant-observability/restsharp/4967824f-1a29-417f-88f2-093d30a41fc2/", + "quick_start_name": "MonoRail", + "external_id": "dd70321906d1d6adfb8dad4fd289b7a4f70c9deb", + "image": "https://raw.githubusercontent.com/newrelic/newrelic-quickstarts/v0.96.0/quickstarts/dotnet/monorail/dashboards/dotnet.png", + "url": "https://developer.newrelic.com/instant-observability/monorail/3082c380-5cc1-48f7-976c-2b5157ecad92/", "published_at": "2022-02-07T01:42:21Z", - "updated_at": "2021-10-30T01:40:22Z", + "updated_at": "2021-10-29T01:52:31Z", "document_type": "page", "popularity": 1, - "body": "What's included .NET Dashboard Apdex Score This alert is triggered when the Apdex score is below 0.5 for 5 minutes Alert Memory Usage This alert is triggered when Memory usage is above 90% Alert Transaction Errors This alert is triggered when the the transactions fail more than 10% of the time in 5 minutes. Alert RestSharp installation docs Popular REST API client library for .NET that features auto-serialization, request type detection, a variety of authentications, and more. Doc What is RestSharp? Popular REST API client library for .NET that features auto-serialization, request type detection, a variety of authentications, and more. Get started! Leverage community expertise and instantly get value out of your telemetry data. This quickstart automatically instruments RestSharp with the New Relic .Net agent, and allows you to instantly monitor your .Net application with out-of-the-box dashboards and alerts. Further leverage New Relic's APM capabilities by setting up errors inbox , transaction tracing , and service maps . More info Check out the documentation to learn more about New Relic monitoring for RestSharp. RestSharp quickstart contains 1 dashboard . These interactive visualizations let you easily explore your data, understand context, and resolve problems faster. .NET .NET RestSharp observability quickstart contains 3 alerts . These alerts detect changes in key performance metrics. Integrate these alerts with your favorite tools (like Slack, PagerDuty, etc.) and New Relic will let you know when something needs your attention. Apdex Score Alert Type: STATIC This alert is triggered when the Apdex score is below 0.5 for 5 minutes Memory Usage Alert Type: STATIC This alert is triggered when Memory usage is above 90% Transaction Errors Alert Type: STATIC This alert is triggered when the the transactions fail more than 10% of the time in 5 minutes. RestSharp observability quickstart contains 1 data source . This is how you'll get your data into New Relic. RestSharp installation docs Popular REST API client library for .NET that features auto-serialization, request type detection, a variety of authentications, and more. Docs", + "body": "What's included .NET Dashboard Apdex Score This alert is triggered when the Apdex score is below 0.5 for 5 minutes Alert Memory Usage This alert is triggered when Memory usage is above 90% Alert Transaction Errors This alert is triggered when the the transactions fail more than 10% of the time in 5 minutes. Alert MonoRail installation docs Open source web application framework built for .NET designed to imitate some of the functions of Ruby on Rails. Doc What is MonoRail? Open source web application framework built for .NET designed to imitate some of the functions of Ruby on Rails. Get started! Leverage community expertise and instantly get value out of your telemetry data. This quickstart automatically instruments MonoRail with the New Relic .Net agent, and allows you to instantly monitor your .Net application with out-of-the-box dashboards and alerts. Further leverage New Relic's APM capabilities by setting up errors inbox , transaction tracing , and service maps . More info Check out the documentation to learn more about New Relic monitoring for MonoRail. MonoRail quickstart contains 1 dashboard . These interactive visualizations let you easily explore your data, understand context, and resolve problems faster. .NET .NET MonoRail observability quickstart contains 3 alerts . These alerts detect changes in key performance metrics. Integrate these alerts with your favorite tools (like Slack, PagerDuty, etc.) and New Relic will let you know when something needs your attention. Apdex Score Alert Type: STATIC This alert is triggered when the Apdex score is below 0.5 for 5 minutes Memory Usage Alert Type: STATIC This alert is triggered when Memory usage is above 90% Transaction Errors Alert Type: STATIC This alert is triggered when the the transactions fail more than 10% of the time in 5 minutes. MonoRail observability quickstart contains 1 data source . This is how you'll get your data into New Relic. MonoRail installation docs Open source web application framework built for .NET designed to imitate some of the functions of Ruby on Rails. Docs", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.12862, + "_score": 92.301125, "_version": null, "_explanation": null, "sort": null, "highlight": { "tags": "apm", - "body": " dashboards and alerts. Further leverage New Relic's APM capabilities by setting up errors inbox , transaction tracing , and service maps . More info Check out the documentation to learn more about New Relic monitoring for RestSharp. RestSharp quickstart contains 1 dashboard . These interactive" + "body": " started! Leverage community expertise and instantly get value out of your telemetry data. This quickstart automatically instruments MonoRail with the New Relic .Net agent, and allows you to instantly monitor your .Net application with out-of-the-box dashboards and alerts. Further leverage New Relic's APM" }, - "id": "61566c0b28ccbc5878f2140b" + "id": "6156699228ccbc057ef213f0" }, { "sections": [ - ".NET core", + "NancyFX", "What's included", ".NET", "Apdex Score", "Memory Usage", "Transaction Errors", - ".NET core installation docs", - "What is .NET core?", + "NancyFX installation docs", + "What is NancyFX?", "Get started!", "More info" ], - "title": ".NET core", + "title": "NancyFX", "type": "quickstarts", "tags": [ "apm", "dotnet" ], - "quick_start_name": ".NET core", - "external_id": "f62ee50c91be34a4a261f2f23f597e8360046f7d", - "image": "https://raw.githubusercontent.com/newrelic/newrelic-quickstarts/v0.96.0/quickstarts/dotnet/dotnet-core/dashboards/dotnet.png", - "url": "https://developer.newrelic.com/instant-observability/dotnet-core/cbffd8f3-11db-40d7-a723-50e744485651/", - "published_at": "2022-02-07T01:40:48Z", - "updated_at": "2021-10-29T01:52:30Z", + "quick_start_name": "NancyFX", + "external_id": "fd6185eb8d1d0678c3572cfce85ba4b61d108e2c", + "image": "https://raw.githubusercontent.com/newrelic/newrelic-quickstarts/v0.96.0/quickstarts/dotnet/nancyfx/dashboards/dotnet.png", + "url": "https://developer.newrelic.com/instant-observability/nancyfx/d676107b-9f7d-4770-bda0-440d94eeac50/", + "published_at": "2022-02-07T01:42:21Z", + "updated_at": "2021-10-29T01:52:31Z", "document_type": "page", "popularity": 1, - "body": "What's included .NET Dashboard Apdex Score This alert is triggered when the Apdex score is below 0.5 for 5 minutes Alert Memory Usage This alert is triggered when Memory usage is above 90% Alert Transaction Errors This alert is triggered when the the transactions fail more than 10% of the time in 5 minutes. Alert .NET core installation docs The cross-platform successor to .NET Framework for building websites, services, and console apps. Doc What is .NET core? The cross-platform successor to .NET Framework for building websites, services, and console apps. Get started! Leverage community expertise and instantly get value out of your telemetry data. This quickstart automatically instruments .NET core with the New Relic .Net agent, and allows you to instantly monitor your .Net application with out-of-the-box dashboards and alerts. Further leverage New Relic's APM capabilities by setting up errors inbox , transaction tracing , and service maps . More info Check out the documentation to learn more about New Relic monitoring for .NET core. .NET core quickstart contains 1 dashboard . These interactive visualizations let you easily explore your data, understand context, and resolve problems faster. .NET .NET .NET core observability quickstart contains 3 alerts . These alerts detect changes in key performance metrics. Integrate these alerts with your favorite tools (like Slack, PagerDuty, etc.) and New Relic will let you know when something needs your attention. Apdex Score Alert Type: STATIC This alert is triggered when the Apdex score is below 0.5 for 5 minutes Memory Usage Alert Type: STATIC This alert is triggered when Memory usage is above 90% Transaction Errors Alert Type: STATIC This alert is triggered when the the transactions fail more than 10% of the time in 5 minutes. .NET core observability quickstart contains 1 data source . This is how you'll get your data into New Relic. .NET core installation docs The cross-platform successor to .NET Framework for building websites, services, and console apps. Docs", + "body": "What's included .NET Dashboard Apdex Score This alert is triggered when the Apdex score is below 0.5 for 5 minutes Alert Memory Usage This alert is triggered when Memory usage is above 90% Alert Transaction Errors This alert is triggered when the the transactions fail more than 10% of the time in 5 minutes. Alert NancyFX installation docs Nancy is a lightweight, low-ceremony, framework for building HTTP based services on .NET Framework/Core and Mono. Doc What is NancyFX? Nancy is a lightweight, low-ceremony, framework for building HTTP based services on .NET Framework/Core and Mono. Get started! Leverage community expertise and instantly get value out of your telemetry data. This quickstart automatically instruments NancyFX with the New Relic .Net agent, and allows you to instantly monitor your .Net application with out-of-the-box dashboards and alerts. Further leverage New Relic's APM capabilities by setting up errors inbox , transaction tracing , and service maps . More info Check out the documentation to learn more about New Relic monitoring for NancyFX. NancyFX quickstart contains 1 dashboard . These interactive visualizations let you easily explore your data, understand context, and resolve problems faster. .NET .NET NancyFX observability quickstart contains 3 alerts . These alerts detect changes in key performance metrics. Integrate these alerts with your favorite tools (like Slack, PagerDuty, etc.) and New Relic will let you know when something needs your attention. Apdex Score Alert Type: STATIC This alert is triggered when the Apdex score is below 0.5 for 5 minutes Memory Usage Alert Type: STATIC This alert is triggered when Memory usage is above 90% Transaction Errors Alert Type: STATIC This alert is triggered when the the transactions fail more than 10% of the time in 5 minutes. NancyFX observability quickstart contains 1 data source . This is how you'll get your data into New Relic. NancyFX installation docs Nancy is a lightweight, low-ceremony, framework for building HTTP based services on .NET Framework/Core and Mono. Docs", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.301125, "_version": null, "_explanation": null, "sort": null, "highlight": { "tags": "apm", - "body": " expertise and instantly get value out of your telemetry data. This quickstart automatically instruments .NET core with the New Relic .Net agent, and allows you to instantly monitor your .Net application with out-of-the-box dashboards and alerts. Further leverage New Relic's APM capabilities by setting up" + "body": " started! Leverage community expertise and instantly get value out of your telemetry data. This quickstart automatically instruments NancyFX with the New Relic .Net agent, and allows you to instantly monitor your .Net application with out-of-the-box dashboards and alerts. Further leverage New Relic's APM" }, - "id": "61566c0b196a673c3fb70d56" + "id": "61566c0b64441fae590995ff" }, { "sections": [ - "FubuMVC", + ".NET core", "What's included", ".NET", "Apdex Score", "Memory Usage", "Transaction Errors", - "FubuMVC installation docs", - "What is FubuMVC?", + ".NET core installation docs", + "What is .NET core?", "Get started!", "More info" ], - "title": "FubuMVC", + "title": ".NET core", "type": "quickstarts", "tags": [ "apm", "dotnet" ], - "quick_start_name": "FubuMVC", - "external_id": "f6bb7ca8dfa57e2c233d44c3b9b51b81fe294e79", - "image": "https://raw.githubusercontent.com/newrelic/newrelic-quickstarts/v0.96.0/quickstarts/dotnet/fubumvc/dashboards/dotnet.png", - "url": "https://developer.newrelic.com/instant-observability/fubumvc/dbce65a2-5f9a-48cb-a957-966338a065d3/", - "published_at": "2022-02-07T01:42:21Z", + "quick_start_name": ".NET core", + "external_id": "f62ee50c91be34a4a261f2f23f597e8360046f7d", + "image": "https://raw.githubusercontent.com/newrelic/newrelic-quickstarts/v0.96.0/quickstarts/dotnet/dotnet-core/dashboards/dotnet.png", + "url": "https://developer.newrelic.com/instant-observability/dotnet-core/cbffd8f3-11db-40d7-a723-50e744485651/", + "published_at": "2022-02-07T01:40:48Z", "updated_at": "2021-10-29T01:52:30Z", "document_type": "page", "popularity": 1, - "body": "What's included .NET Dashboard Apdex Score This alert is triggered when the Apdex score is below 0.5 for 5 minutes Alert Memory Usage This alert is triggered when Memory usage is above 90% Alert Transaction Errors This alert is triggered when the the transactions fail more than 10% of the time in 5 minutes. Alert FubuMVC installation docs A .Net framework for enabling teams to efficiently build server-side systems. Doc What is FubuMVC? A .Net framework for enabling teams to efficiently build server-side systems. Get started! Leverage community expertise and instantly get value out of your telemetry data. This quickstart automatically instruments FubuMVC with the New Relic .Net agent, and allows you to instantly monitor your .Net application with out-of-the-box dashboards and alerts. Further leverage New Relic's APM capabilities by setting up errors inbox , transaction tracing , and service maps . More info Check out the documentation to learn more about New Relic monitoring for FubuMVC. FubuMVC quickstart contains 1 dashboard . These interactive visualizations let you easily explore your data, understand context, and resolve problems faster. .NET .NET FubuMVC observability quickstart contains 3 alerts . These alerts detect changes in key performance metrics. Integrate these alerts with your favorite tools (like Slack, PagerDuty, etc.) and New Relic will let you know when something needs your attention. Apdex Score Alert Type: STATIC This alert is triggered when the Apdex score is below 0.5 for 5 minutes Memory Usage Alert Type: STATIC This alert is triggered when Memory usage is above 90% Transaction Errors Alert Type: STATIC This alert is triggered when the the transactions fail more than 10% of the time in 5 minutes. FubuMVC observability quickstart contains 1 data source . This is how you'll get your data into New Relic. FubuMVC installation docs A .Net framework for enabling teams to efficiently build server-side systems. Docs", + "body": "What's included .NET Dashboard Apdex Score This alert is triggered when the Apdex score is below 0.5 for 5 minutes Alert Memory Usage This alert is triggered when Memory usage is above 90% Alert Transaction Errors This alert is triggered when the the transactions fail more than 10% of the time in 5 minutes. Alert .NET core installation docs The cross-platform successor to .NET Framework for building websites, services, and console apps. Doc What is .NET core? The cross-platform successor to .NET Framework for building websites, services, and console apps. Get started! Leverage community expertise and instantly get value out of your telemetry data. This quickstart automatically instruments .NET core with the New Relic .Net agent, and allows you to instantly monitor your .Net application with out-of-the-box dashboards and alerts. Further leverage New Relic's APM capabilities by setting up errors inbox , transaction tracing , and service maps . More info Check out the documentation to learn more about New Relic monitoring for .NET core. .NET core quickstart contains 1 dashboard . These interactive visualizations let you easily explore your data, understand context, and resolve problems faster. .NET .NET .NET core observability quickstart contains 3 alerts . These alerts detect changes in key performance metrics. Integrate these alerts with your favorite tools (like Slack, PagerDuty, etc.) and New Relic will let you know when something needs your attention. Apdex Score Alert Type: STATIC This alert is triggered when the Apdex score is below 0.5 for 5 minutes Memory Usage Alert Type: STATIC This alert is triggered when Memory usage is above 90% Transaction Errors Alert Type: STATIC This alert is triggered when the the transactions fail more than 10% of the time in 5 minutes. .NET core observability quickstart contains 1 data source . This is how you'll get your data into New Relic. .NET core installation docs The cross-platform successor to .NET Framework for building websites, services, and console apps. Docs", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30112, "_version": null, "_explanation": null, "sort": null, "highlight": { "tags": "apm", - "body": " telemetry data. This quickstart automatically instruments FubuMVC with the New Relic .Net agent, and allows you to instantly monitor your .Net application with out-of-the-box dashboards and alerts. Further leverage New Relic's APM capabilities by setting up errors inbox , transaction tracing" + "body": " expertise and instantly get value out of your telemetry data. This quickstart automatically instruments .NET core with the New Relic .Net agent, and allows you to instantly monitor your .Net application with out-of-the-box dashboards and alerts. Further leverage New Relic's APM capabilities by setting up" }, - "id": "6156672e196a67559fb70d8d" + "id": "61566c0b196a673c3fb70d56" }, { "sections": [ - "MonoRail", + "FubuMVC", "What's included", ".NET", "Apdex Score", "Memory Usage", "Transaction Errors", - "MonoRail installation docs", - "What is MonoRail?", + "FubuMVC installation docs", + "What is FubuMVC?", "Get started!", "More info" ], - "title": "MonoRail", + "title": "FubuMVC", "type": "quickstarts", "tags": [ "apm", "dotnet" ], - "quick_start_name": "MonoRail", - "external_id": "dd70321906d1d6adfb8dad4fd289b7a4f70c9deb", - "image": "https://raw.githubusercontent.com/newrelic/newrelic-quickstarts/v0.96.0/quickstarts/dotnet/monorail/dashboards/dotnet.png", - "url": "https://developer.newrelic.com/instant-observability/monorail/3082c380-5cc1-48f7-976c-2b5157ecad92/", + "quick_start_name": "FubuMVC", + "external_id": "f6bb7ca8dfa57e2c233d44c3b9b51b81fe294e79", + "image": "https://raw.githubusercontent.com/newrelic/newrelic-quickstarts/v0.96.0/quickstarts/dotnet/fubumvc/dashboards/dotnet.png", + "url": "https://developer.newrelic.com/instant-observability/fubumvc/dbce65a2-5f9a-48cb-a957-966338a065d3/", "published_at": "2022-02-07T01:42:21Z", - "updated_at": "2021-10-29T01:52:31Z", + "updated_at": "2021-10-29T01:52:30Z", "document_type": "page", "popularity": 1, - "body": "What's included .NET Dashboard Apdex Score This alert is triggered when the Apdex score is below 0.5 for 5 minutes Alert Memory Usage This alert is triggered when Memory usage is above 90% Alert Transaction Errors This alert is triggered when the the transactions fail more than 10% of the time in 5 minutes. Alert MonoRail installation docs Open source web application framework built for .NET designed to imitate some of the functions of Ruby on Rails. Doc What is MonoRail? Open source web application framework built for .NET designed to imitate some of the functions of Ruby on Rails. Get started! Leverage community expertise and instantly get value out of your telemetry data. This quickstart automatically instruments MonoRail with the New Relic .Net agent, and allows you to instantly monitor your .Net application with out-of-the-box dashboards and alerts. Further leverage New Relic's APM capabilities by setting up errors inbox , transaction tracing , and service maps . More info Check out the documentation to learn more about New Relic monitoring for MonoRail. MonoRail quickstart contains 1 dashboard . These interactive visualizations let you easily explore your data, understand context, and resolve problems faster. .NET .NET MonoRail observability quickstart contains 3 alerts . These alerts detect changes in key performance metrics. Integrate these alerts with your favorite tools (like Slack, PagerDuty, etc.) and New Relic will let you know when something needs your attention. Apdex Score Alert Type: STATIC This alert is triggered when the Apdex score is below 0.5 for 5 minutes Memory Usage Alert Type: STATIC This alert is triggered when Memory usage is above 90% Transaction Errors Alert Type: STATIC This alert is triggered when the the transactions fail more than 10% of the time in 5 minutes. MonoRail observability quickstart contains 1 data source . This is how you'll get your data into New Relic. MonoRail installation docs Open source web application framework built for .NET designed to imitate some of the functions of Ruby on Rails. Docs", + "body": "What's included .NET Dashboard Apdex Score This alert is triggered when the Apdex score is below 0.5 for 5 minutes Alert Memory Usage This alert is triggered when Memory usage is above 90% Alert Transaction Errors This alert is triggered when the the transactions fail more than 10% of the time in 5 minutes. Alert FubuMVC installation docs A .Net framework for enabling teams to efficiently build server-side systems. Doc What is FubuMVC? A .Net framework for enabling teams to efficiently build server-side systems. Get started! Leverage community expertise and instantly get value out of your telemetry data. This quickstart automatically instruments FubuMVC with the New Relic .Net agent, and allows you to instantly monitor your .Net application with out-of-the-box dashboards and alerts. Further leverage New Relic's APM capabilities by setting up errors inbox , transaction tracing , and service maps . More info Check out the documentation to learn more about New Relic monitoring for FubuMVC. FubuMVC quickstart contains 1 dashboard . These interactive visualizations let you easily explore your data, understand context, and resolve problems faster. .NET .NET FubuMVC observability quickstart contains 3 alerts . These alerts detect changes in key performance metrics. Integrate these alerts with your favorite tools (like Slack, PagerDuty, etc.) and New Relic will let you know when something needs your attention. Apdex Score Alert Type: STATIC This alert is triggered when the Apdex score is below 0.5 for 5 minutes Memory Usage Alert Type: STATIC This alert is triggered when Memory usage is above 90% Transaction Errors Alert Type: STATIC This alert is triggered when the the transactions fail more than 10% of the time in 5 minutes. FubuMVC observability quickstart contains 1 data source . This is how you'll get your data into New Relic. FubuMVC installation docs A .Net framework for enabling teams to efficiently build server-side systems. Docs", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30112, "_version": null, "_explanation": null, "sort": null, "highlight": { "tags": "apm", - "body": " started! Leverage community expertise and instantly get value out of your telemetry data. This quickstart automatically instruments MonoRail with the New Relic .Net agent, and allows you to instantly monitor your .Net application with out-of-the-box dashboards and alerts. Further leverage New Relic's APM" + "body": " telemetry data. This quickstart automatically instruments FubuMVC with the New Relic .Net agent, and allows you to instantly monitor your .Net application with out-of-the-box dashboards and alerts. Further leverage New Relic's APM capabilities by setting up errors inbox , transaction tracing" }, - "id": "6156699228ccbc057ef213f0" + "id": "6156672e196a67559fb70d8d" } ], "/full-stack-observability/24da1e3a-035d-493c-a881-72c42b601027": [ @@ -31688,7 +31628,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 1480.8654, + "_score": 1458.3213, "_version": null, "_explanation": null, "sort": null, @@ -31732,7 +31672,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 1480.8318, + "_score": 1458.2908, "_version": null, "_explanation": null, "sort": null, @@ -31771,7 +31711,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 1299.704, + "_score": 1301.3774, "_version": null, "_explanation": null, "sort": null, @@ -31825,7 +31765,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 1004.7544, + "_score": 997.9115, "_version": null, "_explanation": null, "sort": null, @@ -31868,7 +31808,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 885.94196, + "_score": 885.35394, "_version": null, "_explanation": null, "sort": null, @@ -31910,7 +31850,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.12862, + "_score": 92.36895, "_version": null, "_explanation": null, "sort": null, @@ -31951,7 +31891,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -31992,7 +31932,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -32033,7 +31973,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -32074,7 +32014,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -32117,7 +32057,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.12862, + "_score": 92.36895, "_version": null, "_explanation": null, "sort": null, @@ -32158,7 +32098,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -32199,7 +32139,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -32240,7 +32180,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -32281,7 +32221,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -32316,7 +32256,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 301.65002, + "_score": 283.12827, "_version": null, "_explanation": null, "sort": null, @@ -32352,7 +32292,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 291.86832, + "_score": 274.42047, "_version": null, "_explanation": null, "sort": null, @@ -32398,7 +32338,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 287.31323, + "_score": 266.0613, "_version": null, "_explanation": null, "sort": null, @@ -32437,7 +32377,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 285.95203, + "_score": 264.79092, "_version": null, "_explanation": null, "sort": null, @@ -32483,7 +32423,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 284.11005, + "_score": 263.1209, "_version": null, "_explanation": null, "sort": null, @@ -32528,7 +32468,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.12862, + "_score": 92.36895, "_version": null, "_explanation": null, "sort": null, @@ -32569,7 +32509,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -32610,7 +32550,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -32651,7 +32591,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -32692,7 +32632,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -32735,7 +32675,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.12862, + "_score": 92.36895, "_version": null, "_explanation": null, "sort": null, @@ -32776,7 +32716,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -32817,7 +32757,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -32858,7 +32798,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -32899,7 +32839,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -32942,7 +32882,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.12862, + "_score": 92.36895, "_version": null, "_explanation": null, "sort": null, @@ -32983,7 +32923,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -33024,7 +32964,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -33065,7 +33005,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -33106,7 +33046,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -33154,7 +33094,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 240.86488, + "_score": 223.28033, "_version": null, "_explanation": null, "sort": null, @@ -33206,7 +33146,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 226.33606, + "_score": 210.89719, "_version": null, "_explanation": null, "sort": null, @@ -33240,7 +33180,7 @@ "external_id": "509f5fb8490b652df4c6f31ec06b403c3393530e", "image": "", "url": "https://docs.newrelic.com/docs/infrastructure/amazon-integrations/aws-integrations-list/aws-elb-classic-monitoring-integration/", - "published_at": "2022-02-14T11:39:18Z", + "published_at": "2022-02-16T01:42:02Z", "updated_at": "2022-02-14T11:39:18Z", "document_type": "page", "popularity": 1, @@ -33248,7 +33188,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 212.7544, + "_score": 200.11423, "_version": null, "_explanation": null, "sort": null, @@ -33296,7 +33236,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 201.80838, + "_score": 189.49722, "_version": null, "_explanation": null, "sort": null, @@ -33341,7 +33281,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 200.57974, + "_score": 189.1182, "_version": null, "_explanation": null, "sort": null, @@ -33385,7 +33325,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.12862, + "_score": 92.36895, "_version": null, "_explanation": null, "sort": null, @@ -33426,7 +33366,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -33467,7 +33407,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -33508,7 +33448,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -33549,7 +33489,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -33584,7 +33524,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 301.65002, + "_score": 283.12827, "_version": null, "_explanation": null, "sort": null, @@ -33625,7 +33565,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 292.01764, + "_score": 274.48984, "_version": null, "_explanation": null, "sort": null, @@ -33661,7 +33601,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 291.86832, + "_score": 274.42047, "_version": null, "_explanation": null, "sort": null, @@ -33707,7 +33647,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 287.31323, + "_score": 266.0613, "_version": null, "_explanation": null, "sort": null, @@ -33746,7 +33686,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 285.95203, + "_score": 264.79092, "_version": null, "_explanation": null, "sort": null, @@ -33790,7 +33730,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -33831,7 +33771,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -33872,7 +33812,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -33913,7 +33853,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -33954,7 +33894,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -33997,7 +33937,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -34038,7 +33978,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -34079,7 +34019,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -34120,7 +34060,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -34161,7 +34101,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -34205,7 +34145,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 116.13811, + "_score": 116.47097, "_version": null, "_explanation": null, "sort": null, @@ -34247,7 +34187,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 116.13811, + "_score": 116.47097, "_version": null, "_explanation": null, "sort": null, @@ -34289,7 +34229,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 116.13807, + "_score": 116.470924, "_version": null, "_explanation": null, "sort": null, @@ -34331,7 +34271,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 116.052864, + "_score": 116.38686, "_version": null, "_explanation": null, "sort": null, @@ -34367,7 +34307,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 114.4525, + "_score": 114.805626, "_version": null, "_explanation": null, "sort": null, @@ -34409,7 +34349,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -34450,7 +34390,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -34491,7 +34431,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -34532,7 +34472,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -34573,7 +34513,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -34616,7 +34556,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -34657,7 +34597,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -34698,7 +34638,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -34739,7 +34679,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -34780,7 +34720,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -34823,7 +34763,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -34864,7 +34804,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -34905,7 +34845,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -34946,7 +34886,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -34987,7 +34927,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -35030,7 +34970,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -35071,7 +35011,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -35112,7 +35052,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -35153,7 +35093,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -35194,7 +35134,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -35238,7 +35178,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 116.13811, + "_score": 116.47097, "_version": null, "_explanation": null, "sort": null, @@ -35280,7 +35220,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 116.13811, + "_score": 116.47097, "_version": null, "_explanation": null, "sort": null, @@ -35322,7 +35262,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 116.13807, + "_score": 116.470924, "_version": null, "_explanation": null, "sort": null, @@ -35364,7 +35304,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 116.052864, + "_score": 116.38686, "_version": null, "_explanation": null, "sort": null, @@ -35400,7 +35340,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 114.4525, + "_score": 114.805626, "_version": null, "_explanation": null, "sort": null, @@ -35448,7 +35388,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 85.161606, + "_score": 80.60554, "_version": null, "_explanation": null, "sort": null, @@ -35490,7 +35430,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 71.0705, + "_score": 68.374596, "_version": null, "_explanation": null, "sort": null, @@ -35547,7 +35487,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 62.040863, + "_score": 61.079285, "_version": null, "_explanation": null, "sort": null, @@ -35596,7 +35536,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 59.399574, + "_score": 56.211124, "_version": null, "_explanation": null, "sort": null, @@ -35631,7 +35571,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 59.397423, + "_score": 56.209385, "_version": null, "_explanation": null, "sort": null, @@ -35675,7 +35615,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -35716,7 +35656,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -35757,7 +35697,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -35798,7 +35738,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -35839,7 +35779,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -35882,7 +35822,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -35923,7 +35863,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -35964,7 +35904,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -36005,7 +35945,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -36046,7 +35986,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -36085,7 +36025,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 106.91437, + "_score": 106.742256, "_version": null, "_explanation": null, "sort": null, @@ -36127,7 +36067,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 105.70291, + "_score": 105.52081, "_version": null, "_explanation": null, "sort": null, @@ -36170,7 +36110,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 102.09158, + "_score": 101.89656, "_version": null, "_explanation": null, "sort": null, @@ -36212,7 +36152,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 97.79186, + "_score": 97.94975, "_version": null, "_explanation": null, "sort": null, @@ -36258,7 +36198,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 97.7918, + "_score": 97.94969, "_version": null, "_explanation": null, "sort": null, @@ -36303,7 +36243,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 116.13811, + "_score": 116.47097, "_version": null, "_explanation": null, "sort": null, @@ -36345,7 +36285,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 116.13811, + "_score": 116.47097, "_version": null, "_explanation": null, "sort": null, @@ -36387,7 +36327,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 116.13807, + "_score": 116.470924, "_version": null, "_explanation": null, "sort": null, @@ -36429,7 +36369,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 116.052864, + "_score": 116.38686, "_version": null, "_explanation": null, "sort": null, @@ -36465,7 +36405,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 114.4525, + "_score": 114.805626, "_version": null, "_explanation": null, "sort": null, @@ -36508,7 +36448,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 116.13811, + "_score": 116.47097, "_version": null, "_explanation": null, "sort": null, @@ -36550,7 +36490,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 116.13811, + "_score": 116.47097, "_version": null, "_explanation": null, "sort": null, @@ -36592,7 +36532,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 116.13807, + "_score": 116.470924, "_version": null, "_explanation": null, "sort": null, @@ -36634,7 +36574,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 116.052864, + "_score": 116.38686, "_version": null, "_explanation": null, "sort": null, @@ -36670,7 +36610,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 114.4525, + "_score": 114.805626, "_version": null, "_explanation": null, "sort": null, @@ -36712,7 +36652,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -36753,7 +36693,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -36794,7 +36734,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -36835,7 +36775,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -36876,7 +36816,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -36919,7 +36859,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -36960,7 +36900,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -37001,7 +36941,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -37042,7 +36982,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -37083,7 +37023,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -37126,7 +37066,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -37167,7 +37107,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -37208,7 +37148,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -37249,7 +37189,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -37290,7 +37230,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -37333,7 +37273,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -37374,7 +37314,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -37415,7 +37355,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -37456,7 +37396,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -37497,7 +37437,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -37533,7 +37473,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 287.14062, + "_score": 270.89844, "_version": null, "_explanation": null, "sort": null, @@ -37586,7 +37526,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 261.90628, + "_score": 247.6502, "_version": null, "_explanation": null, "sort": null, @@ -37624,7 +37564,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 255.75034, + "_score": 238.05206, "_version": null, "_explanation": null, "sort": null, @@ -37668,7 +37608,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 207.61093, + "_score": 195.96695, "_version": null, "_explanation": null, "sort": null, @@ -37708,7 +37648,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 185.30818, + "_score": 181.57454, "_version": null, "_explanation": null, "sort": null, @@ -37749,7 +37689,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -37790,7 +37730,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -37831,7 +37771,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -37872,7 +37812,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -37913,7 +37853,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -37956,7 +37896,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -37997,7 +37937,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -38038,7 +37978,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -38079,7 +38019,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -38120,7 +38060,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -38163,7 +38103,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -38204,7 +38144,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -38245,7 +38185,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -38286,7 +38226,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -38327,7 +38267,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -38371,7 +38311,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 116.13811, + "_score": 116.47097, "_version": null, "_explanation": null, "sort": null, @@ -38413,7 +38353,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 116.13811, + "_score": 116.47097, "_version": null, "_explanation": null, "sort": null, @@ -38455,7 +38395,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 116.13807, + "_score": 116.470924, "_version": null, "_explanation": null, "sort": null, @@ -38497,7 +38437,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 116.052864, + "_score": 116.38686, "_version": null, "_explanation": null, "sort": null, @@ -38533,7 +38473,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 114.4525, + "_score": 114.805626, "_version": null, "_explanation": null, "sort": null, @@ -38580,7 +38520,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.30777, + "_score": 168.5256, "_version": null, "_explanation": null, "sort": null, @@ -38631,7 +38571,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.3023, + "_score": 168.52115, "_version": null, "_explanation": null, "sort": null, @@ -38689,7 +38629,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 145.08545, + "_score": 142.9581, "_version": null, "_explanation": null, "sort": null, @@ -38743,7 +38683,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 144.11217, + "_score": 136.87427, "_version": null, "_explanation": null, "sort": null, @@ -38784,7 +38724,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 143.57912, + "_score": 136.43945, "_version": null, "_explanation": null, "sort": null, @@ -38824,7 +38764,7 @@ "external_id": "900cd7db78e550d26984051c5ba9da7f2238b0cc", "image": "", "url": "https://docs.newrelic.com/docs/infrastructure/amazon-integrations/aws-integrations-list/aws-elasticsearch-monitoring-integration/", - "published_at": "2022-02-14T11:41:38Z", + "published_at": "2022-02-16T01:42:03Z", "updated_at": "2022-02-14T11:41:37Z", "document_type": "page", "popularity": 1, @@ -38832,7 +38772,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 130.57867, + "_score": 123.030624, "_version": null, "_explanation": null, "sort": null, @@ -38874,7 +38814,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 123.29282, + "_score": 115.955444, "_version": null, "_explanation": null, "sort": null, @@ -38916,7 +38856,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 82.231346, + "_score": 82.228, "_version": null, "_explanation": null, "sort": null, @@ -38956,7 +38896,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.40832, "_version": null, "_explanation": null, "sort": null, @@ -38997,7 +38937,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.40805, "_version": null, "_explanation": null, "sort": null, @@ -39046,7 +38986,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 110.682076, + "_score": 108.95038, "_version": null, "_explanation": null, "sort": null, @@ -39089,7 +39029,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 80.831955, + "_score": 80.94208, "_version": null, "_explanation": null, "sort": null, @@ -39131,7 +39071,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -39172,7 +39112,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -39213,7 +39153,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -39260,7 +39200,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 129.4728, + "_score": 128.03642, "_version": null, "_explanation": null, "sort": null, @@ -39299,7 +39239,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 121.09124, + "_score": 120.536, "_version": null, "_explanation": null, "sort": null, @@ -39330,7 +39270,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 118.92081, + "_score": 118.37877, "_version": null, "_explanation": null, "sort": null, @@ -39373,7 +39313,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 117.15877, + "_score": 116.63826, "_version": null, "_explanation": null, "sort": null, @@ -39387,54 +39327,47 @@ }, { "sections": [ - "Install the Kubernetes integration using Helm", - "Compatibility and requirements", - "Install Kubernetes integration with Helm", - "Installing and configuring nri-bundle with Helm", - "Tip", - "Install with Helm 2 and nri-bundle (legacy)", - "Installation instructions for Helm 2", - "Important", - "Helm configuration options", - "Configure the integration", - "Change the scrape interval", - "Upgrade using Helm", - "Monitor services running on Kubernetes", - "Use your Kubernetes data", - "Reduce data ingest", - "New Relic Infrastructure", - "Prometheus OpenMetrics Integration", - "New Relic Logging", - "New Relic Pixie Integration", - "Uninstall Kubernetes integration" + "Set up your Prometheus remote write integration", + "Set up the integration", + "Map Prometheus and New Relic metric types", + "Override metric type mappings", + "Set allow or deny lists for sent metrics", + "Customize remote write behavior", + "X-License Key", + "prometheus_server URL parameter", + "Optimize throughput and memory consumption", + "Troubleshoot error messages", + "Remove the integration" ], - "title": "Install the Kubernetes integration using Helm", + "title": "Set up your Prometheus remote write integration", "type": "docs", "tags": [ - "Installation", - "Kubernetes integration", + "Install and configure remote write", + "Prometheus integrations", "Integrations" ], - "external_id": "29d42af98d41e7e4e7be86f0150254e132cb6b6a", + "external_id": "2b83e518967d4375d0530d239067a0c49c42ad3a", "image": "", - "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/installation/install-kubernetes-integration-using-helm/", - "published_at": "2022-02-15T20:53:53Z", - "updated_at": "2022-02-15T20:53:53Z", + "url": "https://docs.newrelic.com/docs/infrastructure/prometheus-integrations/install-configure-remote-write/set-your-prometheus-remote-write-integration/", + "published_at": "2022-02-14T10:24:51Z", + "updated_at": "2022-01-22T08:48:16Z", "document_type": "page", "popularity": 1, - "body": "Helm is a package manager on top of Kubernetes. It facilitates installation, upgrades, or revision tracking, and it manages dependencies for the services that you install in Kubernetes. To install the integration using Helm, we recommend our Kubernetes automated installer, which will prompt for some configuration options and autopopulate secrets and values for you. Additionally, our automated installer also allows installing our integration as plain manifests rather than a Helm release. See Kubernetes integration: install and configure for more details about how to use our automated installer. Start the installer This page describes in more depth how to install and configure the New Relic integration without using the automated installer. Compatibility and requirements Make sure Helm is installed on your machine. Version 3 of the Kubernetes Integration requires Helm version 3. If you are still using Helm 2, you can still install the legacy version of the integration. To install the Kubernetes integration using Helm, you will need your New Relic account license key and your Kubernetes cluster's name: Find and copy your New Relic license key. Choose a display name for your cluster. For example, you could use the output of: bash Copy $ kubectl config current-context Note these values somewhere safe, as you will need them later during the installation process. Install Kubernetes integration with Helm New Relic has several charts for the different components which offer different features for the platform: newrelic-infrastructure-v3: Contains the main Kubernetes integration and the infrastructure agent. This is the core component for the New Relic Kubernetes experience, responsible for reporting most of the data that is surfaced in the Kubernetes Dashboard and the Kubernetes Cluster Explorer. newrelic-logging: Provides a DaemonSet with New Relic's Fluent Bit output plugin to easily forward your logs to New Relic. nri-kube-events: Collects and reports cluster events (such as kubectl get events) to New Relic. nri-prometheus: New Relic's Prometheus OpenMetrics Integration, automatically scrapes Prometheus endpoints present in the cluser and reports metrics to New Relic. nri-metadata-injection: Sets up a minimal MutatingAdmissionWebhook that injects a couple of environment variables in the containers. These contain metadata about the cluster and New Relic installation and will be later picked up by applications instrumented using APM, allowing to correlate APM and infrastructure data. nri-statsd: New Relic StatsD integration. For convenience, New Relic provides the nri-bundle chart, which pulls a selectable set of the charts mentioned above. nri-bundle can also install Kube State Metrics and Pixie for you if needed. While it is possible to install those charts separately, we strongly recommend using the nri-bundle chart for Kubernetes deployments, as it ensures that values across all the charts are consistent and provides full control over which components are installed, as well as the possibility to configure all of them as Helm dependencies. This is the same chart that is used and referenced by our automated installer. Installing and configuring nri-bundle with Helm Ensure you are using the appropriate context in the machine where you will run Helm and kubectl: You can check the available contexts with: bash Copy $ kubectl config get-contexts And switch to the desired context using: bash Copy $ kubectl config use-context _CONTEXT_NAME_ Add the New Relic Helm charts repo: bash Copy $ helm repo add newrelic https://helm-charts.newrelic.com Create a file named values-newrelic.yaml, which will be used to define your configuration: global: licenseKey: _YOUR_NEW_RELIC_LICENSE_KEY_ cluster: _K8S_CLUSTER_NAME_ prometheus: # Automatically scrape prometheus metrics for annotated services in the cluster # Collecting prometheus metrics for large clusters might impact data usage significantly enabled: true lowDataMode: true # Skip ingesting cluster-level metrics webhook: # Deploy our webhook to link APM and Kubernetes entities enabled: true kubeEvents: # Report Kubernetes events enabled: true logging: # Report logs for containers running in the cluster enabled: true ksm: # Deploy kube-state-metrics in the cluster. # Set this to true unless it is already deployed. enabled: true Copy Make sure everything is configured properly in the chart by running the following command. Notice that we are specifying --dry-run and --debug, so nothing will be installed in this step: bash Copy $ helm upgrade --install newrelic newrelic/nri-bundle \\ > --namespace newrelic --create-namespace \\ > -f values-newrelic.yaml \\ > --devel \\ > --dry-run \\ > --debug Tip By specifying --devel, you will be installing the version 3 of our solution, currently in Beta and scheduled to be generally available during Spring 2022. We strongly encourage you to try it out as it includes significant improvements over the v2. See what's changed. Please notice and adjust the following flags: global.licenseKey=YOUR_NEW_RELIC_LICENSE_KEY: Must be set to a valid License Key for your account. global.cluster=K8S_CLUSTER_NAME: Is used to identify the cluster in the New Relic UI, so should be a descriptive value not used by any other Kubernetes cluster configured in your New Relic account. ksm.enabled=true: Setting this to true will automatically install Kube State Metrics (KSM) for you, which is required for our integration to run. You can set this to false if KSM is already present in your cluster, even if it is on a different namespace. prometheus.enabled=true: Will deploy our Prometheus OpenMetrics integration, which automatically collects data from Prometheus endpoints present in the cluster. webhook.enabled=true: Will install our minimal webhook, which adds environment variables that, in turn, allows linking applications instrumented with New Relic APM to Kubernetes. Our Kubernetes charts have a comprehensive set of flags and tunables that can be edited to better fit your particular needs. Please check the Configure the integration section below to see what can be changed. Install the Kubernetes integration by running the command without --debug and --dry-run: bash Copy $ helm upgrade --install newrelic newrelic/nri-bundle \\ > --namespace newrelic --create-namespace \\ > -f values-newrelic.yaml \\ > --devel Check that pods are being deployed and reach a stable state: bash Copy $ kubectl -n newrelic get pods -w You should see: One newrelic-nrk8s-ksm pod. One newrelic-nrk8s-kubelet pod for each node in your cluster. One newrelic-nrk8s-control-plane pod for each master node in your cluster, if any. One newrelic-kube-state-metrics pod, if you included KSM with our installation. One newrelic-nri-kube-events pod, if you enabled Kubernetes events reporting. One newrelic-nri-prometheus pod, if you enabled the Prometheus integration. One newrelic-newrelic-logging pod for each node in your cluster, if you enabled the Logging integration. Install with Helm 2 and nri-bundle (legacy) Installation instructions for Helm 2 Important The last version the nri-bundle chart supporting Helm 2 is 3.2.11. Please upgrade to Helm 3 to install the latest version. We will be removing support for Helm 2 installations soon. To install using Helm 2: Add the New Relic Helm charts repo: bash Copy $ helm repo add newrelic https://helm-charts.newrelic.com Create a namespace for newrelic: bash Copy $ kubectl create namespace newrelic Make sure everything is configured properly in the chart by running the following command. This step uses the --dry-run and --debug switches and therefore the agent is not installed. bash Copy $ helm upgrade --install newrelic newrelic/nri-bundle \\ > --version 3.2.11 \\ > --dry-run \\ > --debug \\ > --namespace newrelic \\ > --set global.licenseKey=_YOUR_NEW_RELIC_LICENSE_KEY_ \\ > --set global.cluster=_K8S_CLUSTER_NAME_ \\ > --set ksm.enabled=true \\ > --set newrelic-infrastructure.privileged=true \\ > --set prometheus.enabled=true \\ > --set webhook.enabled=true \\ > --set kubeEvents.enabled=true \\ > --set logging.enabled=true Install the New Relic Kubernetes integration by running the same command without --dry-run and --debug bash Copy $ helm upgrade --install newrelic newrelic/nri-bundle \\ > --version 3.2.11 \\ > --namespace newrelic \\ > --set global.licenseKey=_YOUR_NEW_RELIC_LICENSE_KEY_ \\ > --set global.cluster=_K8S_CLUSTER_NAME_ \\ > --set ksm.enabled=true \\ > --set newrelic-infrastructure.privileged=true \\ > --set prometheus.enabled=true \\ > --set webhook.enabled=true \\ > --set kubeEvents.enabled=true \\ > --set logging.enabled=true Wait a few seconds, then check that the DaemonSet and pods have been created: Check that pods are being deployed and reach a stable state: bash Copy $ kubectl -n newrelic get pods -w Make sure you see a DaemonSet, and one pod per node. Helm configuration options When you install or upgrade the Kubernetes integration with Helm using the command line, you can pass your configuration variables with the --set flag. bash Copy $ helm install newrelic/newrelic-infrastructure \\ > --set licenseKey=_YOUR_NEW_RELIC_LICENSE_KEY_ \\ > --set cluster=_YOUR_CLUSTER_NAME_ A full list of the configuration parameters can be found in the newrelic-infrastructure chart README Configure the integration Our nri-bundle chart. whose installation instructions can be found above, acts as a wrapper or a meta-package for a couple of other charts, which are the ones containing the components for our solution. By offering such a wrapper we can provide a controlled set of our components with versions that we know are compatible with each other, while keeping the component's charts relatively simple. To configure the individual integration components, you must use Helm's dependency system, which simply means that configuration for the children charts must be put under a section with the name of said chart. For example, to configure the newrelic-infrastructure chart, you would add the following to the values-newrelic.yaml: global: licenseKey: _YOUR_NEW_RELIC_LICENSE_KEY_ cluster: _K8S_CLUSTER_NAME_ # ... Other settings as shown above # Configuration for newrelic-infrastructure newrelic-infrastructure: verboseLog: true # Enable debug logs privileged: false # Install with minimal privileges # Other options from https://github.com/newrelic/helm-charts/tree/master/charts/newrelic-infrastructure-v3 Copy The full list of flags that can be tweaked can be found in our chart's repository: newrelic-infrastructure Configure debug logs, privilege mode, control plane monitoring, etc. nri-kube-events nri-metadata-injection Configure how the webhook for APM linkage is deployed. nri-prometheus Configure which Prometheus endpoints are scraped. newrelic-logging Configure which logs are sent to New Relic. Tip Remember that when specifying options for these charts, you must put them under the chart name in your values-newrelic.yaml. Change the scrape interval The Kubernetes Integration v3 and above allows changing the interval at which metrics are gathered from the cluster. This allows choosing a tradeoff between data resolution and usage. We recommend choosing an interval between 15 and 30 seconds for optimal experience. In order to change the scrape interval, add the following to your values-newrelic.yaml, under the newrelic-infratructure section: common: config: interval: 25s Copy So it ends up looking like: global: licenseKey: _YOUR_NEW_RELIC_LICENSE_KEY_ cluster: _K8S_CLUSTER_NAME_ # ... Other settings as shown above # Configuration for newrelic-infrastructure newrelic-infrastructure: # ... Other settings as shown above common: config: interval: 25s Copy Important Setting interval to values larger than 40s is not allowed. A full list of the settings that can be modified can be found at the chart's README. Upgrade using Helm To update your Kubernetes integration installed via Helm: Update the local chart repository: bash Copy $ helm repo update Update the release by running again the appropriate helm upgrade --install ... command in the section above bash Copy $ helm upgrade --install newrelic newrelic/nri-bundle \\ > --namespace newrelic --create-namespace \\ > -f values-newrelic.yaml \\ > --devel Monitor services running on Kubernetes After having installed our Kubernetes integration, you can start instrumenting the services that run in your cluster. To learn more about how to do this, please check our Monitor services running on Kubernetes page. Use your Kubernetes data To learn more about how to use your Kubernetes data, please head to our detailed Find and use your Kubernetes data pages. Reduce data ingest Our charts support setting an option to reduce the amount of data ingested at the cost of dropping detailed information. To enable it, set global.lowDataMode to true in the nri-bundle chart. lowDataMode affects three specific components of the nri-bundle chart outlined below. New Relic Infrastructure If lowDataMode is enabled, the default scrape interval changes from 15s to 30s. You can also specify a custom value for it using config.interval, which will take preference over lowDataMode. Prometheus OpenMetrics Integration If lowDataMode is enabled, the following metrics are excluded by default as they are already collected and used by the New Relic Kubernetes Integration. - kube_ - container_ - machine_ - cadvisor_ Copy New Relic Logging If lowDataMode is enabled, Labels and Annotations are set to Off in the Filter section of the fluent-bit.conf file. This means that this detail will be dropped from the container log files which reduces the overall data ingest into New Relic. The following fields are retained: Allowlist_key container_name Allowlist_key namespace_name Allowlist_key pod_name Allowlist_key stream Allowlist_key log Copy Low Data Mode Log Example Complete Log Record [ { \"cluster_name\": \"api-test\", \"kubernetes\": { \"annotations\": { \"kubernetes.io/psp\": \"eks.privileged\" }, \"container_hash\": \"fryckbos/test@sha256:5b098eaf3c7d5b3585eb10cebee63665b6208bea31ef31a3f0856c5ffdda644b\", \"container_image\": \"fryckbos/test:latest\", \"container_name\": \"newrelic-logging\", \"docker_id\": \"134e1daf63761baa15e035b08b7aea04518a0f0e50af4215131a50c6a379a072\", \"host\": \"ip-192-168-17-123.ec2.internal\", \"labels\": { \"app\": \"newrelic-logging\", \"app.kubernetes.io/name\": \"newrelic-logging\", \"controller-revision-hash\": \"84db95db86\", \"pod-template-generation\": \"1\", \"release\": \"nri-bundle\" }, \"namespace_name\": \"nrlogs\", \"pod_id\": \"54556e3e-719c-46b5-af69-020b75d69bf1\", \"pod_name\": \"nri-bundle-newrelic-logging-jxnbj\" }, \"message\": \"[2021/09/14 12:30:49] [ info] [engine] started (pid=1)\\n\", \"plugin\": { \"source\": \"kubernetes\", \"type\": \"fluent-bit\", \"version\": \"1.8.1\" }, \"stream\": \"stderr\", \"time\": \"2021-09-14T12:30:49.138824971Z\", \"timestamp\": 1631622649138 } ] Copy Log Record after enabling lowDataMode. [ { \"cluster_name\": \"api-test\", \"container_name\": \"newrelic-logging\", \"namespace_name\": \"nrlogs\", \"pod_name\": \"nri-bundle-newrelic-logging-jxnbj\", \"message\": \"[2021/09/14 12:30:49] [ info] [engine] started (pid=1)\\n\", \"stream\": \"stderr\", \"timestamp\": 1631622649138 } ] Copy New Relic Pixie Integration If lowDataMode is enabled, the newrelic-pixie integration performs heavier sampling on Pixie spans and reduces the collection interval from 10 seconds to 15 seconds. lowDataMode settings: HTTP_SPAN_LIMIT: 750 DB_SPAN_LIMIT: 250 COLLECT_INTERVAL_SEC: 15 Copy The default settings for these parameters and others can be found in the newrelic-pixie-integration Github repo. Uninstall Kubernetes integration To uninstall the Kubernetes integration using Helm, run the following command: bash Copy $ helm uninstall newrelic -n newrelic", + "body": "You can get Prometheus data flowing in New Relic with just a few simple steps. Once you integrate, your data will be visible in query-based dashboards (and other query results), often within about five minutes. This page covers basic setup for the remote write integration, as well as a few common troubleshooting topics. For information on integrating Prometheus servers in a high availability (HA) configuration, see our Prometheus high availability documentation. Set up the integration Go to the Prometheus remote write setup launcher in New Relic One, and complete these steps to add Prometheus data. Add Prometheus data Enter a name for the Prometheus server to be connected and your remote_write URL. Important: The name you enter for the server will create an attribute on your data. It will also be the name that identifies which Prometheus server is sending data to New Relic. Add a new remote_write URL to your Prometheus YML file. Add this information under global_config in the file, at the same indentation level as the global section. Use the following syntax: Prometheus v2.26 and newer remote_write: - url: https://metric-api.newrelic.com/prometheus/v1/write?prometheus_server=YOUR_DATA_SOURCE_NAME authorization: credentials: YOUR_LICENSE_KEY Copy Prometheus older than v2.26 remote_write: - url: https://metric-api.newrelic.com/prometheus/v1/write?prometheus_server=YOUR_DATA_SOURCE_NAME bearer_token:YOUR_LICENSE_KEY Copy OR Any Prometheus version remote_write: - url: https://metric-api.newrelic.com/prometheus/v1/write?X-License-Key=YOUR_LICENSE_KEY&prometheus_server=YOUR_DATA_SOURCE_NAME Copy This approach passes credentials in the URL. We don't recommend using it unless one of these other approaches doesn't work in your environment. European Union accounts: If you're connecting from the EU, use the following URL: https://metric-api.eu.newrelic.com/prometheus/v1/write Copy Kubernetes and Helm remote write integrations: Add the remote write URL to your Helm values.yaml file. Replace remoteWrite: [] with two lines similar to the following example. Be sure to use your remote write URL and use indentation that matches the rest of the file: remoteWrite: - url: https://metric-api.newrelic.com/prometheus/v1/write?prometheus_server=YOUR_DATA_SOURCE_NAME bearer_token:YOUR_LICENSE_KEY Copy Restart your Prometheus server. View your data in the New Relic UI. For example, use the remote write dashboard we automatically create when you set up your integration. Map Prometheus and New Relic metric types The Prometheus remote write protocol doesn't include metric type information or other helpful metric metadata when sending metrics to New Relic, so we infer the metric type based on Prometheus naming conventions. Metrics not following these naming conventions may not be mapped correctly. We map Prometheus metrics types into New Relic metric types based on Prometheus metric naming conventions as follows: metricName_bucket is stored as a New Relic count metric type. metricName_count is stored as a New Relic count metric type. metricName_total is stored as a New Relic count metric type. metricName_sum is stored as a New Relic summary metric type. Everything else is stored as a New Relic gauge metric type. Override metric type mappings If you have metrics that don't follow Prometheus naming conventions, you can configure remote write to tag the metric with a newrelic_metric_type label that indicates the metric type. This label is stripped when received by New Relic. Example: You have a counter metric named my_counter, which does not have our naming convention suffix of _bucket, _count or _total. In this situation, your metric would be identified as a gauge rather than a counter. To correct this, add the following relabel configuration to your prometheus.yml: - url: https://metric-api.newrelic.com/prometheus/v1/write?X-License-Key=... write_relabel_configs: - source_labels: [__name__] regex: ^my_counter$ target_label: newrelic_metric_type replacement: \"counter\" action: replace Copy This rule matches any metric with the name my_counter and adds a newrelic_metric_type label that identifies it as a counter. You can use the following (case sensitive!) values as the replacement value: counter gauge summary When a newrelic_metric_type label is present on a metric received and set to one of the valid values, New Relic will assign the indicated type to the metric (and strip the label) before downstream consumption in the data pipeline. If you have multiple metrics that don't follow the above naming conventions, you can add multiple rules with each rule matching different source labels. Set allow or deny lists for sent metrics If you need greater control over the data you send to New Relic, you can send a subset of your metrics. To do this, configure remote-write with the write_relabel_configs parameter with a subparameter action value of keep or deny. In this example, you'll only send the metrics that match the regular expression. Unmatched metrics won't be sent. Alternatively, you can use action: drop to drop all of the metrics that match the regular expression. - url: https://metric-api.newrelic.com/prometheus/v1/write?X-License-Key=... write_relabel_configs: - source_labels: [__name__] regex: \"coredns_(.*)|etcd_(.*)\" action: keep Copy This Kubernetes example uses this Helm chart's values.yaml file. If you're using a different Helm chart, please check its remoteWrite documentation (for example, some Helm files use camelcase writeRelabelConfigs instead). remoteWrite: - url: https://metric-api.newrelic.com/prometheus/v1/write?X-License-Key=... write_relabel_configs: - source_labels: [__name__] regex: \"coredns_(.*)|etcd_(.*)\" action: keep Copy Customize remote write behavior You can customize the following parameters if you are writing to more than one account in New Relic or are connecting more than one Prometheus data source to the same account in New Relic. For more information, see the docs on remote write tuning. X-License Key Your account's license key is not an API key. The license key is used for authentication and to identify which account to write data into. If you are configuring Prometheus to write into different New Relic accounts, use a different key on each remote write URL. prometheus_server URL parameter The prometheus_server parameter is a label or attribute used to add to stats that are written to NRDB. Use this same label when configuring your Grafana data source to limit results to just those from a particular prometheus_server. Optimize throughput and memory consumption Remote write increases the total memory consumption of your Prometheus servers. If you're experiencing issues we recommend the following: Increase max_samples_per_send for higher throughput workloads, along a proportional increase in capacity. If memory consumption is still a problem, try limiting the number of max_shards per server. Troubleshoot error messages If you receive an integration error message from New Relic or error messages in your Prometheus server logs after restarting your Prometheus server, review our remote write troubleshooting documentation. This includes fixing common errors, such as missing or incorrect characters, bad requests, request entity too large, and rate limit errors. Remove the integration When you remove the Prometheus remote write integration, this stops new data from flowing, but it will not purge or remove any historical data. To remove the integration, remove the configuration code snippet from your Prometheus YML file, then restart the server.", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 100.98851, + "_score": 95.261734, "_version": null, "_explanation": null, "sort": null, "highlight": { - "sections": "Prometheus OpenMetrics Integration", - "body": " and the Kubernetes Cluster Explorer. newrelic-logging: Provides a DaemonSet with New Relic's Fluent Bit output plugin to easily forward your logs to New Relic. nri-kube-events: Collects and reports cluster events (such as kubectl get events) to New Relic. nri-prometheus: New Relic's Prometheus OpenMetrics" + "title": "Set up your Prometheus remote write integration", + "sections": "Set up your Prometheus remote write integration", + "tags": "Prometheus integrations", + "body": " parameter The prometheus_server parameter is a label or attribute used to add to stats that are written to NRDB. Use this same label when configuring your Grafana data source to limit results to just those from a particular prometheus_server. Optimize throughput and memory consumption Remote write" }, - "id": "617d5841196a67bb40f7c1de" + "id": "617d515264441fc9eafbe18f" } ], "/spray-can/2907733c-6b3d-4044-97e2-2677889017b8": [ @@ -39468,7 +39401,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -39509,7 +39442,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -39550,7 +39483,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -39591,7 +39524,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -39632,7 +39565,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -39675,7 +39608,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -39716,7 +39649,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -39757,7 +39690,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -39798,7 +39731,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -39839,7 +39772,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -39882,7 +39815,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -39923,7 +39856,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -39964,7 +39897,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -40005,7 +39938,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -40046,7 +39979,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -40089,7 +40022,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -40130,7 +40063,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -40171,7 +40104,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -40212,7 +40145,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -40253,7 +40186,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -40296,7 +40229,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -40337,7 +40270,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -40378,7 +40311,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -40419,7 +40352,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -40460,7 +40393,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -40503,7 +40436,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -40544,7 +40477,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -40585,7 +40518,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -40626,7 +40559,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -40667,7 +40600,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -40710,7 +40643,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -40751,7 +40684,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -40792,7 +40725,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -40833,7 +40766,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -40874,7 +40807,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -40939,7 +40872,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 160.40402, + "_score": 160.71904, "_version": null, "_explanation": null, "sort": null, @@ -40980,7 +40913,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.40832, "_version": null, "_explanation": null, "sort": null, @@ -41021,7 +40954,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.40805, "_version": null, "_explanation": null, "sort": null, @@ -41062,7 +40995,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.37999, "_version": null, "_explanation": null, "sort": null, @@ -41103,7 +41036,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.379814, "_version": null, "_explanation": null, "sort": null, @@ -41146,7 +41079,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -41187,7 +41120,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -41228,7 +41161,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -41269,7 +41202,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -41310,7 +41243,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -41353,7 +41286,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -41394,7 +41327,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -41435,7 +41368,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -41476,7 +41409,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -41517,7 +41450,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -41560,7 +41493,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -41601,7 +41534,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -41642,7 +41575,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -41683,7 +41616,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -41724,7 +41657,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -41773,7 +41706,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 240.86511, + "_score": 223.28017, "_version": null, "_explanation": null, "sort": null, @@ -41825,7 +41758,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 226.33626, + "_score": 210.89703, "_version": null, "_explanation": null, "sort": null, @@ -41859,7 +41792,7 @@ "external_id": "509f5fb8490b652df4c6f31ec06b403c3393530e", "image": "", "url": "https://docs.newrelic.com/docs/infrastructure/amazon-integrations/aws-integrations-list/aws-elb-classic-monitoring-integration/", - "published_at": "2022-02-14T11:39:18Z", + "published_at": "2022-02-16T01:42:02Z", "updated_at": "2022-02-14T11:39:18Z", "document_type": "page", "popularity": 1, @@ -41867,7 +41800,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 212.75455, + "_score": 200.1141, "_version": null, "_explanation": null, "sort": null, @@ -41915,7 +41848,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 201.80853, + "_score": 189.4971, "_version": null, "_explanation": null, "sort": null, @@ -41960,7 +41893,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 200.57988, + "_score": 189.11807, "_version": null, "_explanation": null, "sort": null, @@ -42003,7 +41936,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -42044,7 +41977,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -42085,7 +42018,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -42126,7 +42059,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -42167,7 +42100,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -42216,7 +42149,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.50939, + "_score": 80.223404, "_version": null, "_explanation": null, "sort": null, @@ -42258,7 +42191,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -42299,7 +42232,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -42340,7 +42273,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -42381,7 +42314,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -42425,7 +42358,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 110.59566, + "_score": 102.69989, "_version": null, "_explanation": null, "sort": null, @@ -42467,7 +42400,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 109.75413, + "_score": 101.91952, "_version": null, "_explanation": null, "sort": null, @@ -42509,7 +42442,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 109.7467, + "_score": 101.91374, "_version": null, "_explanation": null, "sort": null, @@ -42551,7 +42484,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 105.93249, + "_score": 98.52911, "_version": null, "_explanation": null, "sort": null, @@ -42594,7 +42527,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 101.185425, + "_score": 95.344696, "_version": null, "_explanation": null, "sort": null, @@ -42630,7 +42563,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 1259.9567, + "_score": 1262.2787, "_version": null, "_explanation": null, "sort": null, @@ -42649,7 +42582,7 @@ "Tip", "Important" ], - "published_at": "2022-02-15T01:41:58Z", + "published_at": "2022-02-16T01:44:29Z", "title": "Instrument your cluster", "updated_at": "2021-11-06T01:49:38Z", "type": "developer", @@ -42660,7 +42593,7 @@ "body": "lab This procedure is part of a lab that teaches you how to monitor your Kubernetes cluster with Pixie. Each procedure in the lab builds upon the last, so make sure you've completed the last procedure, Explore your cluster, before starting this one. As the developer of TinyHat.me, you need to have visibility into your cluster. You need to know how healthy your application is. You need to know when things go wrong. But you've put it off for so long because instrumenting Kubernetes is hard and time-consuming. This is one of the things that makes Pixie so valuable. Pixie is a CNCF open source Kubernetes monitoring solution that provides: Automatic and instant baseline observability of your cluster Actionable, code-level insights of your applications With Pixie's auto-telemetry, you'll instrument your cluster in minutes to get dynamic data such as protocol traces, resource metrics, and app metrics from your cluster—all without an agent! Tip If you haven't signed into New Relic, do that now so you're ready to install Pixie into your cluster. Step 1 of 10 Open New Relic. On the right side of the upper navigation bar, click Add more data: Step 2 of 10 Click Guided install: This walks you through the installation process. Step 3 of 10 Click Kubernetes to let New Relic guide you through instrumenting your Kubernetes cluster: Step 4 of 10 Click Begin installation: Step 5 of 10 Select your account, name your cluster \"tiny-hat\", and click Continue: This specifies that TinyHat.me, and all its services, should live in a New Relic cluster called \"tiny-hat\" in the account you selected. Step 6 of 10 Leave the default choices on the next screen. These provide a range of observability features for your cluster, including our infrastructure agent which gives you a high level overview of the health of your cluster. Notably, the default options include \"Instant service-level insights, full-body requests, and application profiles through Pixie\" which you focus on in this lab. Click Continue: In the next screen, you see a command for installing our Kubernetes integration into your cluster. Step 7 of 10 Click Copy command: Now you're ready to install Pixie into your cluster. Step 8 of 10 Switch back to your terminal and paste the Helm command. Step 9 of 10 While you're installing Pixie, switch back to New Relic and click Continue to progress your guided installation to the final step. Here, you see a message that says \"Listening for data\": In a few minutes, Helm will have fully installed the necessary charts. You should see a message with the name, deployed date, namespace, and more: bash Copy NAME: newrelic-bundle LAST DEPLOYED: Thu Sep 23 13:50:24 2021 NAMESPACE: newrelic STATUS: deployed REVISION: 1 TEST SUITE: None Soon after that, the New Relic page updates to tell you that we're receiving data from your cluster: Step 10 of 10 Click Kubernetes cluster explorer to see your nodes, pods, deployments and a host of other data about your cluster, all in one view: Important You may see an error message, \"We're receiving incomplete data for this cluster.\" Please wait a few more minutes and refresh the page to see your cluster. In minutes, you were able to instrument your entire cluster without having to install language-specific agents or specify detailed cluster information! On top of all the data you see in the cluster explorer, click a pod or a node to dig deeper and see the granular data that the infrastructure agent was able to access near-instantly: lab This procedure is part of a lab that teaches you how to monitor your Kubernetes cluster with Pixie. Now that you've instrumented your cluster, use Pixie to debug your application.", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 57.33723, + "_score": 57.40007, "_version": null, "_explanation": null, "sort": null, @@ -42721,7 +42654,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 119.89099, + "_score": 113.07818, "_version": null, "_explanation": null, "sort": null, @@ -42761,7 +42694,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 115.529335, + "_score": 109.01546, "_version": null, "_explanation": null, "sort": null, @@ -42799,7 +42732,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 57.655106, + "_score": 57.56888, "_version": null, "_explanation": null, "sort": null, @@ -42839,7 +42772,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 56.1378, + "_score": 56.029602, "_version": null, "_explanation": null, "sort": null, @@ -42876,7 +42809,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 55.427326, + "_score": 55.355293, "_version": null, "_explanation": null, "sort": null, @@ -42924,7 +42857,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 205.69461, + "_score": 193.91898, "_version": null, "_explanation": null, "sort": null, @@ -42967,7 +42900,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 198.78757, + "_score": 186.70761, "_version": null, "_explanation": null, "sort": null, @@ -42981,129 +42914,126 @@ }, { "sections": [ - "Amazon DocumentDB monitoring integration", - "Important", - "Activate integration", - "Configuration and polling", - "Find and use data", - "Metric data", - "DocumentDB Cluster data", - "DocumentDB ClusterByRole data", - "DocumentDB Instance data" + "AWS NeptuneDB", + "What's included", + "AWS NeptuneDB installation docs", + "What is AWS NeptuneDB?", + "Get started!", + "More info" ], - "title": "Amazon DocumentDB monitoring integration", - "type": "docs", + "title": "AWS NeptuneDB", + "type": "quickstarts", "tags": [ - "AWS integrations list", - "Amazon integrations", - "Integrations" + "aws", + "amazon web services", + "database" ], - "external_id": "2254fd18215db2b24649f91f164707a6b0a253fb", + "quick_start_name": "AWS NeptuneDB", + "external_id": "b2abfe8f8dfa53e1f09b4d4f07c1fae48d25ec39", "image": "", - "url": "https://docs.newrelic.com/docs/infrastructure/amazon-integrations/aws-integrations-list/aws-documentdb-monitoring-integration/", - "published_at": "2022-02-14T10:53:00Z", - "updated_at": "2022-02-14T10:52:59Z", + "url": "https://developer.newrelic.com/instant-observability/aws-neptunedb/3ed7e509-9e82-4357-b076-82e30eeb0d1d/", + "published_at": "2022-02-07T01:45:29Z", + "updated_at": "2021-10-06T14:00:38Z", "document_type": "page", "popularity": 1, - "body": "Important Enable the AWS CloudWatch Metric Streams integration to monitor all CloudWatch metrics from your AWS services, including custom namespaces. Individual integrations are no longer our recommended option. New Relic offers an integration for reporting your Amazon DocumentDB data. This document explains how to activate this integration and describes the data that can be reported. Activate integration To enable this integration follow standard procedures to Connect AWS services to New Relic. Configuration and polling You can change the polling frequency and filter data using configuration options. Default polling information for the Amazon DocumentDB integration: New Relic polling interval: 5 minutes Amazon CloudWatch data interval: 1 minute Find and use data To find your integration data, go to one.newrelic.com > Infrastructure > AWS and select an integration. You can query and explore your data using the following event type: Entity Event Type Provider Cluster AwsDocDbClusterSample AwsDocDbCluster ClusterByRole AwsDocDbClusterByRoleSample AwsDocDbClusterByRole Instance AwsDocDbInstanceSample AwsDocDbInstance For more on how to use your data, see Understand and use integration data. Metric data This integration collects Amazon DocumentDB data for Cluster, ClusterByRole, and Instance. DocumentDB Cluster data Metric Unit Description CPUUtilization Percent The percentage of CPU used by an instance. DatabaseConnections Count The number of connections to an instance. FreeLocalStorage Bytes This metric reports the amount of storage available to each instance for temporary tables and logs. This value depends on the instance class. You can increase the amount of free storage space for an instance by choosing a larger instance class for your instance. FreeableMemory Bytes The amount of available random access memory, in bytes. WriteIOPS CountPerSecond The average number of disk write I/O operations per second. Read and write IOPS are reported separately, on 1-minute intervals. ReadIOPS CountPerSecond The average number of disk read I/O operations per second. Amazon DocumentDB reports read and write IOPS separately, and on one-minute intervals. WriteLatency Milliseconds The average amount of time, in milliseconds, taken per disk I/O operation. ReadLatency Milliseconds The average amount of time taken per disk I/O operation, in milliseconds. WriteThroughput Bytes The average number of bytes written to disk per second. ReadThroughput Bytes The average number of bytes read from disk per second. NetworkThroughput Bytes The amount of network throughput, in bytes per second, both received from and transmitted to clients by each instance in the Amazon DocumentDB cluster. This throughput doesn't include network traffic between instances in the cluster and the cluster volume. NetworkTransmitThroughput Bytes The amount of network throughput, in bytes per second, sent to clients by each instance in the cluster. This throughput doesn't include network traffic between instances in the cluster and the cluster volume. NetworkReceiveThroughput Bytes The amount of network throughput, in bytes per second, received from clients by each instance in the cluster. This throughput doesn't include network traffic between instances in the cluster and the cluster volume. EngineUptime Seconds The amount of time, in seconds, that the instance has been running. SwapUsage Bytes The amount of swap space used on the instance. DiskQueueDepth Count The number of outstanding read/write requests waiting to access the disk. BufferCacheHitRatio Percent The percentage of requests that are served by the buffer cache. BackupRetentionPeriodStorageUsed Bytes The total amount of backup storage in GiB used to support the point-in-time restore feature within the Amazon DocumentDB's retention window. Included in the total reported by the TotalBackupStorageBilled metric. Computed separately for each Amazon DocumentDB cluster. SnapshotStorageUsed Bytes The total amount of backup storage in GiB consumed by all snapshots for a given Amazon DocumentDB cluster outside its backup retention window. Included in the total reported by the TotalBackupStorageBilled metric. Computed separately for each Amazon DocumentDB cluster. TotalBackupStorageBilled Bytes The total amount of backup storage in GiB for which you are billed for a given Amazon DocumentDB cluster. Includes the backup storage measured by the BackupRetentionPeriodStorageUsed and SnapshotStorageUsed metrics. Computed separately for each Amazon DocumentDB cluster. DBInstanceReplicaLag Milliseconds The amount of lag, in milliseconds, when replicating updates from the primary instance to a replica instance. DBClusterReplicaLagMaximum Milliseconds The maximum amount of lag, in milliseconds, between the primary instance and each Amazon DocumentDB instance in the cluster. DBClusterReplicaLagMinimum Milliseconds The minimum amount of lag, in milliseconds, between the primary instance and each replica instance in the cluster. VolumeBytesUsed Bytes The amount of storage, in bytes, used by your cluster, in bytes. This value affects the cost of the cluster (for pricing information, see the Amazon DocumentDB product page). VolumeReadIOPs Count The average number of billed read I/O operations from a cluster volume, reported at 5-minute intervals. Billed read operations are calculated at the cluster volume level, aggregated from all instances in the cluster, and then reported at 5-minute intervals. The value is calculated by taking the value of the read operations metric over a 5-minute period. You can determine the amount of billed read operations per second by taking the value of the billed read operations metric and dividing by 300 seconds. For example, if the billed read operations returns 13,686, then the billed read operations per second is 45 (13,686 / 300 = 45.62). You accrue billed read operations for queries that request database pages that are not present in the buffer cache and therefore must be loaded from storage. You might see spikes in billed read operations as query results are read from storage and then loaded into the buffer cache. VolumeWriteIOPs Count The average number of billed write I/O operations from a cluster volume, reported at 5-minute intervals. Billed write operations are calculated at the cluster volume level, aggregated from all instances in the cluster, and then reported at 5-minute intervals. The value is calculated by taking the value of the write operations metric over a 5-minute period. You can determine the amount of billed write operations per second by taking the value of the billed write operations metric and dividing by 300 seconds. For example, if the billed write operations returns 13,686, then the billed write operations per second is 45 (13,686 / 300 = 45.62). DocumentDB ClusterByRole data Metric Unit Description CPUUtilization Percent The percentage of CPU used by an instance. DatabaseConnections Count The number of connections to an instance. FreeLocalStorage Bytes This metric reports the amount of storage available to each instance for temporary tables and logs. This value depends on the instance class. You can increase the amount of free storage space for an instance by choosing a larger instance class for your instance. FreeableMemory Bytes The amount of available random access memory, in bytes. WriteIOPS CountPerSecond The average number of disk write I/O operations per second. Read and write IOPS are reported separately, on 1-minute intervals. ReadIOPS CountPerSecond The average number of disk read I/O operations per second. Amazon DocumentDB reports read and write IOPS separately, and on one-minute intervals. WriteLatency Milliseconds The average amount of time, in milliseconds, taken per disk I/O operation. ReadLatency Milliseconds The average amount of time taken per disk I/O operation, in milliseconds. WriteThroughput Bytes The average number of bytes written to disk per second. ReadThroughput Bytes The average number of bytes read from disk per second. NetworkThroughput Bytes The amount of network throughput, in bytes per second, both received from and transmitted to clients by each instance in the Amazon DocumentDB cluster. This throughput doesn't include network traffic between instances in the cluster and the cluster volume. NetworkTransmitThroughput Bytes The amount of network throughput, in bytes per second, sent to clients by each instance in the cluster. This throughput doesn't include network traffic between instances in the cluster and the cluster volume. NetworkReceiveThroughput Bytes The amount of network throughput, in bytes per second, received from clients by each instance in the cluster. This throughput doesn't include network traffic between instances in the cluster and the cluster volume. EngineUptime Seconds The amount of time, in seconds, that the instance has been running. SwapUsage Bytes The amount of swap space used on the instance. DiskQueueDepth Count The number of outstanding read/write requests waiting to access the disk. BufferCacheHitRatio Percent The percentage of requests that are served by the buffer cache. BackupRetentionPeriodStorageUsed Bytes The total amount of backup storage in GiB used to support the point-in-time restore feature within the Amazon DocumentDB's retention window. Included in the total reported by the TotalBackupStorageBilled metric. Computed separately for each Amazon DocumentDB cluster. SnapshotStorageUsed Bytes The total amount of backup storage in GiB consumed by all snapshots for a given Amazon DocumentDB cluster outside its backup retention window. Included in the total reported by the TotalBackupStorageBilled metric. Computed separately for each Amazon DocumentDB cluster. TotalBackupStorageBilled Bytes The total amount of backup storage in GiB for which you are billed for a given Amazon DocumentDB cluster. Includes the backup storage measured by the BackupRetentionPeriodStorageUsed and SnapshotStorageUsed metrics. Computed separately for each Amazon DocumentDB cluster. DBInstanceReplicaLag Milliseconds The amount of lag, in milliseconds, when replicating updates from the primary instance to a replica instance. DBClusterReplicaLagMaximum Milliseconds The maximum amount of lag, in milliseconds, between the primary instance and each Amazon DocumentDB instance in the cluster. DBClusterReplicaLagMinimum Milliseconds The minimum amount of lag, in milliseconds, between the primary instance and each replica instance in the cluster. VolumeBytesUsed Bytes The amount of storage, in bytes, used by your cluster, in bytes. This value affects the cost of the cluster (for pricing information, see the Amazon DocumentDB product page). VolumeReadIOPs Count The average number of billed read I/O operations from a cluster volume, reported at 5-minute intervals. Billed read operations are calculated at the cluster volume level, aggregated from all instances in the cluster, and then reported at 5-minute intervals. The value is calculated by taking the value of the read operations metric over a 5-minute period. You can determine the amount of billed read operations per second by taking the value of the billed read operations metric and dividing by 300 seconds. For example, if the billed read operations returns 13,686, then the billed read operations per second is 45 (13,686 / 300 = 45.62). You accrue billed read operations for queries that request database pages that are not present in the buffer cache and therefore must be loaded from storage. You might see spikes in billed read operations as query results are read from storage and then loaded into the buffer cache. VolumeWriteIOPs Count The average number of billed write I/O operations from a cluster volume, reported at 5-minute intervals. Billed write operations are calculated at the cluster volume level, aggregated from all instances in the cluster, and then reported at 5-minute intervals. The value is calculated by taking the value of the write operations metric over a 5-minute period. You can determine the amount of billed write operations per second by taking the value of the billed write operations metric and dividing by 300 seconds. For example, if the billed write operations returns 13,686, then the billed write operations per second is 45 (13,686 / 300 = 45.62). DocumentDB Instance data Metric Unit Description CPUUtilization Percent The percentage of CPU used by an instance. DatabaseConnections Count The number of connections to an instance. FreeLocalStorage Bytes This metric reports the amount of storage available to each instance for temporary tables and logs. This value depends on the instance class. You can increase the amount of free storage space for an instance by choosing a larger instance class for your instance. FreeableMemory Bytes The amount of available random access memory, in bytes. WriteIOPS CountPerSecond The average number of disk write I/O operations per second. Read and write IOPS are reported separately, on 1-minute intervals. ReadIOPS CountPerSecond The average number of disk read I/O operations per second. Amazon DocumentDB reports read and write IOPS separately, and on one-minute intervals. WriteLatency Milliseconds The average amount of time, in milliseconds, taken per disk I/O operation. ReadLatency Milliseconds The average amount of time taken per disk I/O operation, in milliseconds. WriteThroughput Bytes The average number of bytes written to disk per second. ReadThroughput Bytes The average number of bytes read from disk per second. NetworkThroughput Bytes The amount of network throughput, in bytes per second, both received from and transmitted to clients by each instance in the Amazon DocumentDB cluster. This throughput doesn't include network traffic between instances in the cluster and the cluster volume. NetworkTransmitThroughput Bytes The amount of network throughput, in bytes per second, sent to clients by each instance in the cluster. This throughput doesn't include network traffic between instances in the cluster and the cluster volume. NetworkReceiveThroughput Bytes The amount of network throughput, in bytes per second, received from clients by each instance in the cluster. This throughput doesn't include network traffic between instances in the cluster and the cluster volume. EngineUptime Seconds The amount of time, in seconds, that the instance has been running. SwapUsage Bytes The amount of swap space used on the instance. DiskQueueDepth Count The number of outstanding read/write requests waiting to access the disk. BufferCacheHitRatio Percent The percentage of requests that are served by the buffer cache. BackupRetentionPeriodStorageUsed Bytes The total amount of backup storage in GiB used to support the point-in-time restore feature within the Amazon DocumentDB's retention window. Included in the total reported by the TotalBackupStorageBilled metric. Computed separately for each Amazon DocumentDB cluster. SnapshotStorageUsed Bytes The total amount of backup storage in GiB consumed by all snapshots for a given Amazon DocumentDB cluster outside its backup retention window. Included in the total reported by the TotalBackupStorageBilled metric. Computed separately for each Amazon DocumentDB cluster. TotalBackupStorageBilled Bytes The total amount of backup storage in GiB for which you are billed for a given Amazon DocumentDB cluster. Includes the backup storage measured by the BackupRetentionPeriodStorageUsed and SnapshotStorageUsed metrics. Computed separately for each Amazon DocumentDB cluster. DBInstanceReplicaLag Milliseconds The amount of lag, in milliseconds, when replicating updates from the primary instance to a replica instance. DBClusterReplicaLagMaximum Milliseconds The maximum amount of lag, in milliseconds, between the primary instance and each Amazon DocumentDB instance in the cluster. DBClusterReplicaLagMinimum Milliseconds The minimum amount of lag, in milliseconds, between the primary instance and each replica instance in the cluster. VolumeBytesUsed Bytes The amount of storage, in bytes, used by your cluster, in bytes. This value affects the cost of the cluster (for pricing information, see the Amazon DocumentDB product page). VolumeReadIOPs Count The average number of billed read I/O operations from a cluster volume, reported at 5-minute intervals. Billed read operations are calculated at the cluster volume level, aggregated from all instances in the cluster, and then reported at 5-minute intervals. The value is calculated by taking the value of the read operations metric over a 5-minute period. You can determine the amount of billed read operations per second by taking the value of the billed read operations metric and dividing by 300 seconds. For example, if the billed read operations returns 13,686, then the billed read operations per second is 45 (13,686 / 300 = 45.62). You accrue billed read operations for queries that request database pages that are not present in the buffer cache and therefore must be loaded from storage. You might see spikes in billed read operations as query results are read from storage and then loaded into the buffer cache. VolumeWriteIOPs Count The average number of billed write I/O operations from a cluster volume, reported at 5-minute intervals. Billed write operations are calculated at the cluster volume level, aggregated from all instances in the cluster, and then reported at 5-minute intervals. The value is calculated by taking the value of the write operations metric over a 5-minute period. You can determine the amount of billed write operations per second by taking the value of the billed write operations metric and dividing by 300 seconds. For example, if the billed write operations returns 13,686, then the billed write operations per second is 45 (13,686 / 300 = 45.62).", + "body": "What's included AWS NeptuneDB installation docs Monitor AWS NeptuneDB by connecting AWS to New Relic. Doc What is AWS NeptuneDB? Develop applications on top of interconnected datasets with this AWS graph database service. Get started! Start monitoring AWS NeptuneDB by connecting Amazon Web Services (AWS) to New Relic! Check out our AWS NeptuneDB documentation to instrument your cloud service and manage the stability, scalability, and reliability of your systems with New Relic's infrastructure monitoring capabilities. More info Check out the documentation to learn more about New Relic monitoring for AWS NeptuneDB. This quickstart doesn't include any dashboards . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo This quickstart doesn't include any alerts . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo AWS NeptuneDB observability quickstart contains 1 data source . This is how you'll get your data into New Relic. AWS NeptuneDB installation docs Monitor AWS NeptuneDB by connecting AWS to New Relic. Docs", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 186.26059, + "_score": 177.22, "_version": null, "_explanation": null, "sort": null, "highlight": { - "title": "Amazon DocumentDB monitoring integration", - "sections": "Amazon DocumentDB monitoring integration", - "tags": "AWS integrations list", - "body": "Important Enable the AWS CloudWatch Metric Streams integration to monitor all CloudWatch metrics from your AWS services, including custom namespaces. Individual integrations are no longer our recommended option. New Relic offers an integration for reporting your Amazon DocumentDB data" + "title": "AWS NeptuneDB", + "sections": "AWS NeptuneDB", + "tags": "amazon web services", + "quick_start_name": "AWS NeptuneDB", + "body": "What's included AWS NeptuneDB installation docs Monitor AWS NeptuneDB by connecting AWS to New Relic. Doc What is AWS NeptuneDB? Develop applications on top of interconnected datasets with this AWS graph database service. Get started! Start monitoring AWS NeptuneDB by connecting Amazon Web Services" }, - "id": "617da7ad64441fbd20fbc7da" + "id": "61566b7ae7b9d2cb4f8de366" }, { "sections": [ - "Azure Database for MySQL monitoring integration", - "Features", - "Activate integration", - "Configuration and polling", - "Find and use data", - "Metric data", - "Database sample metrics", - "Inventory data", - "EOL NOTICE", - "azure/mysql/server/" + "AWS Redshift", + "What's included", + "AWS Redshift installation docs", + "What is AWS Redshift?", + "Get started!", + "More info" ], - "title": "Azure Database for MySQL monitoring integration", - "type": "docs", + "title": "AWS Redshift", + "type": "quickstarts", "tags": [ - "Azure integrations list", - "Microsoft Azure integrations", - "Integrations" + "aws", + "amazon web services", + "database" ], - "external_id": "4a13cd9222a15aaf7b28c1c84afd0483394cf7c7", + "quick_start_name": "AWS Redshift", + "external_id": "7999452d7826e8472874958be84ba17a906df0bd", "image": "", - "url": "https://docs.newrelic.com/docs/infrastructure/microsoft-azure-integrations/azure-integrations-list/azure-database-mysql-monitoring-integration/", - "published_at": "2022-02-15T19:30:32Z", - "updated_at": "2022-02-15T19:30:32Z", + "url": "https://developer.newrelic.com/instant-observability/aws-redshift/cfceb579-a15e-4a13-84ab-72bd0ffd69e8/", + "published_at": "2022-02-09T01:46:48Z", + "updated_at": "2021-10-06T14:00:36Z", "document_type": "page", "popularity": 1, - "body": "New Relic's integrations include an integration for reporting your Microsoft Azure Database for MySQL metrics and other data to New Relic. This document explains how to activate the integration and describes the data reported. Features New Relic gathers database data from the Azure Database for MySQL service, which provides fully managed, enterprise-ready MySQL Community database as a service. The service provides high availability, elastic scaling, automatic backups, and data protection at-rest and in-motion. Using New Relic, you can: View Azure Database for MySQL data in pre-built dashboards. Run custom queries and visualize the data in New Relic One. Create alert conditions to notify you of changes in data. Activate integration Follow standard procedures to activate your Azure service in New Relic Infrastructure. Configuration and polling You can change the polling frequency and filter data using configuration options. New Relic queries your Azure Database services according to a default polling interval, which varies depending on the integration. For Azure Database for MySQL integrations: Polling interval: 5 minutes (maximum recommended polling frequency: 1 hour) Resolution: 1 data point per minute Find and use data To explore your integration data, go to one.newrelic.com > Infrastructure > Azure > (select an integration). Data about a single database is attached to the AzureMySqlServerSample event type, with a provider value of AzureMySqlServer. Metric data This integration collects the following metric data. Database sample metrics Metric Description activeConnections Count of active connections. backupStorageUsedBytes Backup storage used, in bytes. connectionsFailed Count of failed connections. cpuPercent Percentage of CPU used. memoryPercent Percentage of memory used. networkEgressBytes Network Out across active connections, in bytes. networkIngressBytes Network In across active connections, in bytes. secondsBehindMaster Replication lag, in seconds. serverlogStorageLimitBytes Server log storage limit, in bytes. serverlogStoragePercent Percentage of server log storage used. serverlogStorageUsageBytes Server log storage used, in bytes. storageLimitBytes Amount of storage available, in bytes. storagePercent Percentage of available storage used. storageUsedBytes Amount of storage used, in bytes. Inventory data EOL NOTICE After March 2022, we're discontinuing support for several capabilities, including inventory data for cloud integrations. For more details, including how you can easily prepare for this transition, see our Explorers Hub post. This integration collects the following inventory data about your system's state and configuration. azure/mysql/server/ administratorLogin configuration databaseNames databases domainName earliestRestoreDate firewalls geoRedundantBackup isDataWarehouse isReplica MasterServerid maxConnections name regionName replicaCapacity resourceGroupName skuCapacity skuFamily skuName skuTier sslEnforcement storageAutoGrow tags type userVisibleState version", + "body": "What's included AWS Redshift installation docs Monitor AWS Redshift by connecting AWS to New Relic. Doc What is AWS Redshift? Fully managed data warehouse used to analyze all your data using standard SQL and your existing Amazon Business Intelligence tools. Get started! Start monitoring AWS Redshift by connecting Amazon Web Services (AWS) to New Relic! Check out our AWS Redshift documentation to instrument your cloud service and manage the stability, scalability, and reliability of your systems with New Relic's infrastructure monitoring capabilities. More info Check out the documentation to learn more about New Relic monitoring for AWS Redshift. This quickstart doesn't include any dashboards . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo This quickstart doesn't include any alerts . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo AWS Redshift observability quickstart contains 1 data source . This is how you'll get your data into New Relic. AWS Redshift installation docs Monitor AWS Redshift by connecting AWS to New Relic. Docs", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 182.84044, + "_score": 177.22, "_version": null, "_explanation": null, "sort": null, "highlight": { - "title": "Azure Database for MySQL monitoring integration", - "sections": "Azure Database for MySQL monitoring integration", - "body": " and filter data using configuration options. New Relic queries your Azure Database services according to a default polling interval, which varies depending on the integration. For Azure Database for MySQL integrations: Polling interval: 5 minutes (maximum recommended polling frequency: 1 hour) Resolution: 1" + "title": "AWS Redshift", + "sections": "AWS Redshift", + "tags": "amazon web services", + "quick_start_name": "AWS Redshift", + "body": " Redshift by connecting Amazon Web Services (AWS) to New Relic! Check out our AWS Redshift documentation to instrument your cloud service and manage the stability, scalability, and reliability of your systems with New Relic's infrastructure monitoring capabilities. More info Check out the documentation" }, - "id": "617da965196a67a0f5f7ce12" + "id": "61566b7b28ccbc01f9f2141e" }, { "sections": [ - "Azure Database for PostgreSQL monitoring integration", - "Features", - "Activate integration", - "Configuration and polling", - "View and use data", - "Metric data", - "Database sample metrics", - "Inventory data", - "EOL NOTICE", - "azure/postgresql/server" + "AWS ElastiCache", + "What's included", + "AWS ElastiCache installation docs", + "What is AWS ElastiCache?", + "Get started!", + "More info" ], - "title": "Azure Database for PostgreSQL monitoring integration", - "type": "docs", + "title": "AWS ElastiCache", + "type": "quickstarts", "tags": [ - "Azure integrations list", - "Microsoft Azure integrations", - "Integrations" + "aws", + "amazon web services", + "database" ], - "external_id": "8771456c828d7db008d61950004f786af1f23e1b", + "quick_start_name": "AWS ElastiCache", + "external_id": "83e73cb32d583d7f01d0a5c41245c024de875e68", "image": "", - "url": "https://docs.newrelic.com/docs/infrastructure/microsoft-azure-integrations/azure-integrations-list/azure-database-postgresql-monitoring-integration/", - "published_at": "2022-02-15T19:30:58Z", - "updated_at": "2022-02-15T19:30:58Z", + "url": "https://developer.newrelic.com/instant-observability/aws-elasticache/5c1e7b31-df21-4acb-b72c-0a8786b88301/", + "published_at": "2022-02-07T01:45:28Z", + "updated_at": "2021-10-06T13:59:38Z", "document_type": "page", "popularity": 1, - "body": "New Relic's integrations include an integration for reporting your Microsoft Azure Database for PostgreSQL metrics and other data to New Relic. This document explains how to activate the integration and describes the data reported. Features New Relic gathers database data from the Azure Database for PostgreSQL service, which provides fully managed, enterprise-ready PostgreSQL Community database as a service. The service provides high availability, elastic scaling, automatic backups, and data protection at-rest and in-motion. Using New Relic, you can: View Azure Database for PostgreSQL data in pre-built Infrastructure dashboards. Run custom queries and visualize the data. Create alert conditions to notify you of changes in data. Activate integration Follow standard procedures to activate your Azure service in New Relic. Configuration and polling New Relic queries your Azure Database services according to a default polling interval, which varies depending on the integration. For Azure Database for PostgreSQL integrations: Polling interval: 5 minutes (maximum recommended polling frequency: 1 hour) Resolution: 1 data point per minute You can change the polling frequency and filter data using configuration options. View and use data To explore your integration data: Go to one.newrelic.com > Infrastructure > Azure > (select an integration). You can query and explore your data using the AzurePostgreSqlServerSample event type, with a provider value of AzurePostgreSqlServer. Metric data This integration collects the following metric data. Database sample metrics Metric Description activeConnections Count of active connections. backupStorageUsedBytes Backup storage used, in bytes. connectionsFailed Count of failed connections. cpuPercent Percentage of CPU used. ioConsumptionPercent Percentage of I/O consumption used. memoryPercent Percentage of memory used. networkEgressBytes Network Out across active connections, in bytes. networkIngressBytes Network In across active connections, in bytes. pgReplicaLogDelayBytes Replication lag, in bytes. pgReplicaLogDelaySeconds Replication lag, in seconds. serverlogStorageLimitBytes Server log storage limit, in bytes. serverlogStoragePercent Percentage of server log storage used. serverlogStorageUsageBytes Server log storage used, in bytes. storageLimitBytes Amount of storage available, in bytes. storagePercent Percentage of available storage used. storageUsedBytes Amount of storage used, in bytes. Inventory data EOL NOTICE After March 2022, we're discontinuing support for several capabilities, including inventory data for cloud integrations. For more details, including how you can easily prepare for this transition, see our Explorers Hub post. This integration collects the following inventory data about your system's state and configuration. For more information, see the Microsoft Azure PostgreSQL documentation. azure/postgresql/server administratorLogin configuration databaseNames databases, including id, name, type, charset, and collation domainName earliestRestoreDate firewalls, including id, name, type, startIpAddress, and endIpAddress geoRedundantBackup isReplica MasterServerid (only for a replica server) maxConnections name regionName replicaCapacity (only for a replica server) replicationRole (only for a replica server) resourceGroupName skuCapacity skuFamily skuName skuTier sslEnforcement storageAutoGrow tags type userVisibleState version", + "body": "What's included AWS ElastiCache installation docs Monitor AWS ElastiCache by connecting AWS to New Relic. Doc What is AWS ElastiCache? Deploy, operate, and scale an in-memory data store or cache in the cloud. Get started! Start monitoring AWS ElastiCache by connecting Amazon Web Services (AWS) to New Relic! Check out our AWS ElastiCache documentation to instrument your cloud service and manage the stability, scalability, and reliability of your systems with New Relic's infrastructure monitoring capabilities. More info Check out the documentation to learn more about New Relic monitoring for AWS ElastiCache. This quickstart doesn't include any dashboards . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo This quickstart doesn't include any alerts . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo AWS ElastiCache observability quickstart contains 1 data source . This is how you'll get your data into New Relic. AWS ElastiCache installation docs Monitor AWS ElastiCache by connecting AWS to New Relic. Docs", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 182.00241, + "_score": 177.21994, "_version": null, "_explanation": null, "sort": null, "highlight": { - "title": "Azure Database for PostgreSQL monitoring integration", - "sections": "Azure Database for PostgreSQL monitoring integration", - "body": " services according to a default polling interval, which varies depending on the integration. For Azure Database for PostgreSQL integrations: Polling interval: 5 minutes (maximum recommended polling frequency: 1 hour) Resolution: 1 data point per minute You can change the polling frequency and filter" + "title": "AWS ElastiCache", + "sections": "AWS ElastiCache", + "tags": "amazon web services", + "quick_start_name": "AWS ElastiCache", + "body": "What's included AWS ElastiCache installation docs Monitor AWS ElastiCache by connecting AWS to New Relic. Doc What is AWS ElastiCache? Deploy, operate, and scale an in-memory data store or cache in the cloud. Get started! Start monitoring AWS ElastiCache by connecting Amazon Web Services (AWS" }, - "id": "617d542ee7b9d203c5c05971" + "id": "61566912196a679b5eb70d4d" } ], "/codestream/29bd9a4a-1c19-4219-9694-0942f6411ce7": [ @@ -43146,7 +43076,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 69.896, + "_score": 65.391045, "_version": null, "_explanation": null, "sort": null, @@ -43226,7 +43156,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 51.22518, + "_score": 50.366966, "_version": null, "_explanation": null, "sort": null, @@ -43261,7 +43191,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 38.652534, + "_score": 38.507614, "_version": null, "_explanation": null, "sort": null, @@ -43301,7 +43231,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 35.34933, + "_score": 35.45433, "_version": null, "_explanation": null, "sort": null, @@ -43348,7 +43278,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 34.9342, + "_score": 35.04619, "_version": null, "_explanation": null, "sort": null, @@ -43382,7 +43312,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 449.1781, + "_score": 422.13013, "_version": null, "_explanation": null, "sort": null, @@ -43430,7 +43360,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 436.77216, + "_score": 411.23004, "_version": null, "_explanation": null, "sort": null, @@ -43466,7 +43396,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 291.86853, + "_score": 274.42047, "_version": null, "_explanation": null, "sort": null, @@ -43512,7 +43442,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 287.31348, + "_score": 266.0613, "_version": null, "_explanation": null, "sort": null, @@ -43551,7 +43481,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 285.9523, + "_score": 264.79092, "_version": null, "_explanation": null, "sort": null, @@ -43595,7 +43525,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -43636,7 +43566,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -43677,7 +43607,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -43718,7 +43648,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -43759,7 +43689,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -43802,7 +43732,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -43843,7 +43773,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -43884,7 +43814,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -43925,7 +43855,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -43966,7 +43896,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -44009,7 +43939,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -44050,7 +43980,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -44091,7 +44021,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -44132,7 +44062,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -44173,7 +44103,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -44216,7 +44146,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -44257,7 +44187,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -44298,7 +44228,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -44339,7 +44269,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -44380,7 +44310,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -44423,7 +44353,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 96.08901, + "_score": 96.32915, "_version": null, "_explanation": null, "sort": null, @@ -44464,7 +44394,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 96.08896, + "_score": 96.32909, "_version": null, "_explanation": null, "sort": null, @@ -44505,7 +44435,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 96.08889, + "_score": 96.329025, "_version": null, "_explanation": null, "sort": null, @@ -44586,7 +44516,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 82.05713, + "_score": 80.56319, "_version": null, "_explanation": null, "sort": null, @@ -44600,38 +44530,44 @@ }, { "sections": [ - "NerdGraph tutorial: APM agent configuration examples", - "Configure server-side configuration", - "Retrieve settings" + "CXF", + "What's included", + "Java", + "High CPU Utilization", + "Transaction Errors", + "CXF installation docs", + "What is CXF?", + "Get started!", + "More info" ], - "title": "NerdGraph tutorial: APM agent configuration examples", - "type": "docs", + "title": "CXF", + "type": "quickstarts", "tags": [ - "Examples", - "NerdGraph", - "APIs" + "apm", + "java" ], - "external_id": "219a704e974c1c3cb1223fc90e50dcdf09709587", - "image": "", - "url": "https://docs.newrelic.com/docs/apis/nerdgraph/examples/apm-config-nerdgraph/", - "published_at": "2022-02-14T06:02:53Z", - "updated_at": "2022-02-14T06:02:53Z", + "quick_start_name": "CXF", + "external_id": "87744f16eda48b2be62854b3b596e9a4488d9eff", + "image": "https://raw.githubusercontent.com/newrelic/newrelic-quickstarts/v0.96.0/quickstarts/java/cxf/dashboards/java.png", + "url": "https://developer.newrelic.com/instant-observability/cxf/7c880505-ed7d-4f2d-98b6-13fc2a3fa114/", + "published_at": "2022-02-04T02:08:47Z", + "updated_at": "2021-10-30T13:50:39Z", "document_type": "page", "popularity": 1, - "body": "This doc is a place for examples of configuring APM agents using our NerdGraph API. Configure server-side configuration Note that for APM agents to use configuration values changed via NerdGraph, server side configuration must be enabled. For requirements, see server-side config requirements. Here's an example query returning the status of the server side configuration setting for a given entity. query ExampleReadQuery { actor { entity(guid:\"ZjY1ODgxfEFQTXxBUFBYSUNBVElPTnz0ODEwMTY3NzZ\") { ...on ApmApplicationEntity { apmSettings { apmConfig { useServerSideConfig } } } } } } Copy Here's an example of disabling server-side configuration. Note that settings uses an array, which may be helpful if you want to update multiple entities. mutation ExampleUpdateQuery(guid: \"ZjY1ODgxfEFQTXxBUFBYSUNBVElPTnz0ODEwMTY3NzZ\", settings: { apmConfig: { useServerSideConfig: false }} ) { apmSettings { apmConfig { useServerSideConfig } } errors { description } } } Copy For how to find an entity's GUID, see Find entity data. Retrieve settings Here's an example of returning an entity's transaction tracer settings: query ExampleReadQuery { actor { entity(guid:\"ZjY1ODgxfEFQTXxBUFBYSUNBVElPTnz0ODEwMTY3NzZ\") { ... on ApmApplicationEntity { guid name apmSettings { transactionTracer { enabled explainEnabled explainThresholdType explainThresholdValue } } } } } } Copy", + "body": "What's included Java Dashboard High CPU Utilization This alert is triggered when the CPU Utilization is above 90%. Alert Transaction Errors This alert is triggered when the the transactions fail more than 10% of the time in 5 minutes. Alert CXF installation docs Open source service framework for building and developing on front-end APIs. Doc What is CXF? Open source service framework for building and developing on front-end APIs. Get started! Leverage community expertise and instantly get value out of your telemetry data. This quickstart automatically instruments CXF with the New Relic Java agent, and allows you to instantly monitor your Java application with out-of-the-box dashboards and alerts. Further leverage New Relic's APM capabilities by setting up errors inbox , transaction tracing , and service maps . More info Check out the documentation to learn more about New Relic monitoring for CXF. CXF quickstart contains 1 dashboard . These interactive visualizations let you easily explore your data, understand context, and resolve problems faster. Java Java CXF observability quickstart contains 2 alerts . These alerts detect changes in key performance metrics. Integrate these alerts with your favorite tools (like Slack, PagerDuty, etc.) and New Relic will let you know when something needs your attention. High CPU Utilization Alert Type: STATIC This alert is triggered when the CPU Utilization is above 90%. Transaction Errors Alert Type: STATIC This alert is triggered when the the transactions fail more than 10% of the time in 5 minutes. CXF observability quickstart contains 1 data source . This is how you'll get your data into New Relic. CXF installation docs Open source service framework for building and developing on front-end APIs. Docs", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 79.167244, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, "highlight": { - "title": "NerdGraph tutorial: APM agent configuration examples", - "sections": "NerdGraph tutorial: APM agent configuration examples", - "body": "This doc is a place for examples of configuring APM agents using our NerdGraph API. Configure server-side configuration Note that for APM agents to use configuration values changed via NerdGraph, server side configuration must be enabled. For requirements, see server-side config requirements. Here" + "sections": "Java", + "tags": "apm", + "body": " agent, and allows you to instantly monitor your Java application with out-of-the-box dashboards and alerts. Further leverage New Relic's APM capabilities by setting up errors inbox , transaction tracing , and service maps . More info Check out the documentation to learn more about New Relic" }, - "id": "61c5c9b928ccbcbd9607c57d" + "id": "615667bf64441f38dd099655" } ], "/weblogic/dc6e140e-79fd-43ab-b113-26376e534e63": [ @@ -44665,7 +44601,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -44706,7 +44642,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -44747,7 +44683,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -44788,7 +44724,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -44829,7 +44765,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -44873,7 +44809,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 116.13811, + "_score": 116.47097, "_version": null, "_explanation": null, "sort": null, @@ -44915,7 +44851,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 116.13811, + "_score": 116.47097, "_version": null, "_explanation": null, "sort": null, @@ -44957,7 +44893,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 116.13807, + "_score": 116.470924, "_version": null, "_explanation": null, "sort": null, @@ -44999,7 +44935,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 116.052864, + "_score": 116.38686, "_version": null, "_explanation": null, "sort": null, @@ -45035,7 +44971,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 114.4525, + "_score": 114.805626, "_version": null, "_explanation": null, "sort": null, @@ -45077,7 +45013,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -45118,7 +45054,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -45159,7 +45095,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -45200,7 +45136,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -45241,7 +45177,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -45280,7 +45216,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 266.97388, + "_score": 252.82178, "_version": null, "_explanation": null, "sort": null, @@ -45335,7 +45271,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 266.02216, + "_score": 251.52829, "_version": null, "_explanation": null, "sort": null, @@ -45373,7 +45309,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 195.18622, + "_score": 184.83228, "_version": null, "_explanation": null, "sort": null, @@ -45418,7 +45354,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 166.38992, + "_score": 164.57996, "_version": null, "_explanation": null, "sort": null, @@ -45442,7 +45378,7 @@ "3. Triage your errors", "4. Manage your triaged errors" ], - "published_at": "2022-02-15T01:40:34Z", + "published_at": "2022-02-16T01:43:31Z", "title": "Resolve Errors Faster with Full Stack Error Tracking", "updated_at": "2022-02-12T01:48:33Z", "type": "developer", @@ -45453,7 +45389,7 @@ "body": "You're one of the developers of an eCommerce website called Geek's Movie Shop, and recently, you introduced some new features. Before you push your changes to production where all your users will have access to them, you want to discover as many errors as you can in your development environment. Then you can decide which ones to fix and which ones to ignore. Errors Inbox is the perfect tool to help you do this. Learning Objectives In this lab, you: Spin up Geek's Movie Shop in your development environment Set up a workload for Errors Inbox Resolve and ignore errors in your inbox Assign unresolved errors Filter errors in your inbox by status Integrate Errors Inbox with Jira, CodeStream, or Slack Requirements Create a free New Relic account Install Docker Procedures 1. Spin up your application Set up your your environment to deploy Geek's Movie Shop. 5 min 2. Set up Errors Inbox Set up Errors Inbox in New Relic 5 min 3. Triage your errors Track and triage errors across your stack with Errors Inbox 5 min 4. Manage your triaged errors Managed your triaged errors in Errors Inbox 5 min", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 149.23486, + "_score": 142.37854, "_version": null, "_explanation": null, "sort": null, @@ -45497,7 +45433,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.40832, "_version": null, "_explanation": null, "sort": null, @@ -45538,7 +45474,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.40805, "_version": null, "_explanation": null, "sort": null, @@ -45579,7 +45515,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.37999, "_version": null, "_explanation": null, "sort": null, @@ -45620,7 +45556,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.379814, "_version": null, "_explanation": null, "sort": null, @@ -45661,7 +45597,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.379814, "_version": null, "_explanation": null, "sort": null, @@ -45704,7 +45640,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -45745,7 +45681,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -45786,7 +45722,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -45827,7 +45763,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -45868,7 +45804,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -45911,7 +45847,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -45952,7 +45888,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -45993,7 +45929,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -46034,7 +45970,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -46075,7 +46011,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -46158,7 +46094,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 101.08684, + "_score": 99.36145, "_version": null, "_explanation": null, "sort": null, @@ -46196,7 +46132,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 89.00305, + "_score": 82.84302, "_version": null, "_explanation": null, "sort": null, @@ -46235,7 +46171,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.40832, "_version": null, "_explanation": null, "sort": null, @@ -46276,7 +46212,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.40805, "_version": null, "_explanation": null, "sort": null, @@ -46317,7 +46253,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.37999, "_version": null, "_explanation": null, "sort": null, @@ -46353,7 +46289,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 301.65027, + "_score": 283.12827, "_version": null, "_explanation": null, "sort": null, @@ -46389,7 +46325,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 291.86853, + "_score": 274.42047, "_version": null, "_explanation": null, "sort": null, @@ -46435,7 +46371,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 287.31348, + "_score": 266.0613, "_version": null, "_explanation": null, "sort": null, @@ -46474,7 +46410,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 285.9523, + "_score": 264.79092, "_version": null, "_explanation": null, "sort": null, @@ -46520,7 +46456,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 284.1103, + "_score": 263.1209, "_version": null, "_explanation": null, "sort": null, @@ -46557,7 +46493,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 301.65027, + "_score": 283.12827, "_version": null, "_explanation": null, "sort": null, @@ -46593,7 +46529,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 291.86853, + "_score": 274.42047, "_version": null, "_explanation": null, "sort": null, @@ -46639,7 +46575,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 287.31348, + "_score": 266.0613, "_version": null, "_explanation": null, "sort": null, @@ -46678,7 +46614,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 285.9523, + "_score": 264.79092, "_version": null, "_explanation": null, "sort": null, @@ -46724,7 +46660,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 284.1103, + "_score": 263.1209, "_version": null, "_explanation": null, "sort": null, @@ -46768,7 +46704,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -46809,7 +46745,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -46850,7 +46786,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -46891,7 +46827,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -46932,7 +46868,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -46975,7 +46911,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -47016,7 +46952,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -47057,7 +46993,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -47098,7 +47034,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -47139,7 +47075,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -47182,7 +47118,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -47223,7 +47159,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -47264,7 +47200,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -47305,7 +47241,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -47346,7 +47282,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -47389,7 +47325,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -47430,7 +47366,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -47471,7 +47407,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -47512,7 +47448,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -47553,7 +47489,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -47596,7 +47532,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -47637,7 +47573,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -47678,7 +47614,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -47719,7 +47655,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -47760,7 +47696,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -47804,7 +47740,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.12862, + "_score": 92.36895, "_version": null, "_explanation": null, "sort": null, @@ -47845,7 +47781,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -47886,7 +47822,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -47927,7 +47863,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -47968,7 +47904,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.06133, + "_score": 92.30259, "_version": null, "_explanation": null, "sort": null, @@ -48015,7 +47951,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.30777, + "_score": 168.52728, "_version": null, "_explanation": null, "sort": null, @@ -48066,7 +48002,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.3023, + "_score": 168.52283, "_version": null, "_explanation": null, "sort": null, @@ -48109,7 +48045,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 146.85614, + "_score": 144.60301, "_version": null, "_explanation": null, "sort": null, @@ -48167,7 +48103,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 145.08545, + "_score": 142.95915, "_version": null, "_explanation": null, "sort": null, @@ -48221,7 +48157,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 144.11217, + "_score": 136.8765, "_version": null, "_explanation": null, "sort": null, @@ -48258,7 +48194,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 301.65027, + "_score": 283.12827, "_version": null, "_explanation": null, "sort": null, @@ -48294,7 +48230,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 291.86853, + "_score": 274.42047, "_version": null, "_explanation": null, "sort": null, @@ -48340,7 +48276,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 287.31348, + "_score": 266.0613, "_version": null, "_explanation": null, "sort": null, @@ -48379,7 +48315,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 285.9523, + "_score": 264.79092, "_version": null, "_explanation": null, "sort": null, @@ -48425,7 +48361,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 284.1103, + "_score": 263.1209, "_version": null, "_explanation": null, "sort": null, @@ -48470,7 +48406,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 110.59566, + "_score": 102.699814, "_version": null, "_explanation": null, "sort": null, @@ -48512,7 +48448,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 109.75413, + "_score": 101.91943, "_version": null, "_explanation": null, "sort": null, @@ -48554,7 +48490,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 109.7467, + "_score": 101.913666, "_version": null, "_explanation": null, "sort": null, @@ -48596,7 +48532,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 105.93249, + "_score": 98.52904, "_version": null, "_explanation": null, "sort": null, @@ -48639,7 +48575,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 101.185425, + "_score": 95.344635, "_version": null, "_explanation": null, "sort": null, @@ -48682,7 +48618,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 101.58116, + "_score": 101.49906, "_version": null, "_explanation": null, "sort": null, @@ -48724,7 +48660,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 101.58048, + "_score": 101.49839, "_version": null, "_explanation": null, "sort": null, @@ -48767,7 +48703,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 95.7446, + "_score": 90.728516, "_version": null, "_explanation": null, "sort": null, @@ -48813,7 +48749,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 95.74297, + "_score": 90.72719, "_version": null, "_explanation": null, "sort": null, @@ -48842,7 +48778,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.19047, + "_score": 76.975914, "_version": null, "_explanation": null, "sort": null, @@ -48854,70 +48790,6 @@ ], "/cribl-logstream/e67f2859-80c1-4234-bbcf-bcbeeb31d70d": [], "/lambda-go/0ec4a7a7-9f9a-4c1d-af3d-07803619711b": [ - { - "sections": [ - "Troubleshoot enabling serverless monitoring of AWS Lambda", - "Problem", - "Solution", - "Recommended: Attach your CloudWatch logs to the ticket", - "Important" - ], - "title": "Troubleshoot enabling serverless monitoring of AWS Lambda", - "type": "docs", - "tags": [ - "Troubleshooting", - "AWS Lambda monitoring", - "Serverless function monitoring" - ], - "external_id": "73f864add78be5efb2429485506dc5a679a9820e", - "image": "", - "url": "https://docs.newrelic.com/docs/serverless-function-monitoring/aws-lambda-monitoring/troubleshooting/troubleshoot-enabling-serverless-monitoring-aws-lambda/", - "published_at": "2022-02-15T17:56:10Z", - "updated_at": "2022-02-15T17:56:09Z", - "document_type": "troubleshooting_doc", - "popularity": 1, - "body": "Problem You're attempting to enable serverless monitoring for AWS Lambda and are having an issue or error. Solution There are two common problems related to not completing all of the enablement procedures: Not seeing data on CloudWatch metrics UI page. This means the AWS integration step wasn't completed. Not seeing data on Troubleshooting category UI pages. If you aren't seeing data on the Distributed tracing, Errors, and Invocations UI tabs, this means the APM agent instrumentation step wasn't completed. Besides these basic enablement problems, there are some additional problems that may cause an issue: CloudWatch error \"HTTP error 401: unauthorized.\" This is due to an incorrect API Key. The --nr-api-keyargument in the Configure AWS enable step takes your user key, which is different from the REST API key. Custom metrics are missing. Lambda monitoring is not compatible with our custom metrics. Use custom attributes to add metadata. Invocations missing. To see invocation breakdown details, distributed tracing must be enabled as part of the Lambda instrumentation step. Distributed tracing is required so that span details can be displayed in the invocation details pane. You've completed the installation, integration, and instrumentation steps correctly, and your function is sending logs to CloudWatch but you're not seeing traces for specific dependencies (or any traces) in the UI. This may result from the order of layer merging (if you're using our Lambda layers) or from the order of import (if you're instrumenting manually): If you're instrumenting with layers: make sure in your function configuration that the New Relic layer is merged before other layers (though if your function uses webpack, the New Relic layer should be merged after the webpack layer). If you're instrumenting a Node function manually, make sure that logging is enabled, and that your function imports newrelic before it imports any dependencies you expect to monitor. If none of these solutions help you, contact our support team. The following information will help you when you talk to support technicians: Has the Lambda function appeared in the UI before? If so, what is the name of the function? If some data for the Lambda function is appearing in the UI, what specific data is appearing? What APM language agent are you using to instrument the function? Recommended: Attach your CloudWatch logs to the ticket To provide our support team with logging information when opening a ticket: Invoke the function in AWS Lambda. Click on the logs link after your function runs. This will take you to the CloudWatch logs in AWS. On the left-hand sidebar in AWS, under Logs, click on Insights. Select your function and also the newrelic-log-ingestion stream. Apply an appropriate Time Filter, and a log entry limit (the default of 20 may not be enough). Under Actions select Copy query results (ASCII). Paste the copied text into a new text file, then save and upload the text file to the ticket. Important The NR_LAMBDA_MONITORING payload contains all the information the agent attempts to send up, including metrics, events, some AWS account metadata, invocations and errors data. Note that some of that data (for example, our legacy metrics) will not make it to our UI because our ingest pipeline does not consume them.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 119.62621, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "title": "Troubleshoot enabling serverless monitoring of AWS Lambda", - "sections": "Troubleshoot enabling serverless monitoring of AWS Lambda", - "tags": "Serverless function monitoring", - "body": "Problem You're attempting to enable serverless monitoring for AWS Lambda and are having an issue or error. Solution There are two common problems related to not completing all of the enablement procedures: Not seeing data on CloudWatch metrics UI page. This means the AWS integration step wasn't" - }, - "id": "603ea6bb64441f85284e889b" - }, - { - "image": "https://docs.newrelic.com/static/6decabb9d8cd5dc3e18f2f647f7c7cdd/c1b63/arrow-step-diagram-trans.png", - "url": "https://docs.newrelic.com/docs/distributed-tracing/concepts/quick-start/", - "sections": [ - "Distributed tracing setup options" - ], - "published_at": "2022-02-14T03:23:44Z", - "title": "Distributed tracing setup options", - "updated_at": "2022-02-14T03:23:44Z", - "type": "docs", - "external_id": "44df1a2d07693a41fa23c9bba9473ce8ebabe47e", - "document_type": "page", - "popularity": 1, - "body": "We recommend you do an initial setup of distributed tracing and consider the advanced Infinite Tracing feature if you are not getting the data you need. Also, if you are currently using New Relic APM agents and would like to enable distributed tracing, see our planning guide. Ready to get started? If you don't already have one, sign up for a New Relic account. It's free, forever! To set up distributed tracing, you'll complete three general steps: Identify services: Identify and write down the endpoints, services, languages, and systems that are used to complete this request (you'll need this information in the next step). If you have an environment diagram like the following, you could use it to create a list of services handling requests: Instrument services: Instrument each service you identify so it can send your trace data. Some tools, such as APM agents, instrument services automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile monitoring Kamon OpenTelemetry Trace API: generic format Trace API: Zipkin format View traces: After you instrument the services, generate some traffic in your application, and then go to the New Relic UI to see your trace data.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 116.38019, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "body": " automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile" - }, - "id": "61d8b6a664441fbe9700cc16" - }, { "sections": [ "Mux", @@ -48949,7 +48821,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 112.697266, + "_score": 112.87141, "_version": null, "_explanation": null, "sort": null, @@ -48990,7 +48862,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 112.6965, + "_score": 112.87066, "_version": null, "_explanation": null, "sort": null, @@ -49000,6 +48872,70 @@ }, "id": "61566dd8e7b9d279cf8de386" }, + { + "sections": [ + "Troubleshoot enabling serverless monitoring of AWS Lambda", + "Problem", + "Solution", + "Recommended: Attach your CloudWatch logs to the ticket", + "Important" + ], + "title": "Troubleshoot enabling serverless monitoring of AWS Lambda", + "type": "docs", + "tags": [ + "Troubleshooting", + "AWS Lambda monitoring", + "Serverless function monitoring" + ], + "external_id": "73f864add78be5efb2429485506dc5a679a9820e", + "image": "", + "url": "https://docs.newrelic.com/docs/serverless-function-monitoring/aws-lambda-monitoring/troubleshooting/troubleshoot-enabling-serverless-monitoring-aws-lambda/", + "published_at": "2022-02-15T17:56:10Z", + "updated_at": "2022-02-15T17:56:09Z", + "document_type": "troubleshooting_doc", + "popularity": 1, + "body": "Problem You're attempting to enable serverless monitoring for AWS Lambda and are having an issue or error. Solution There are two common problems related to not completing all of the enablement procedures: Not seeing data on CloudWatch metrics UI page. This means the AWS integration step wasn't completed. Not seeing data on Troubleshooting category UI pages. If you aren't seeing data on the Distributed tracing, Errors, and Invocations UI tabs, this means the APM agent instrumentation step wasn't completed. Besides these basic enablement problems, there are some additional problems that may cause an issue: CloudWatch error \"HTTP error 401: unauthorized.\" This is due to an incorrect API Key. The --nr-api-keyargument in the Configure AWS enable step takes your user key, which is different from the REST API key. Custom metrics are missing. Lambda monitoring is not compatible with our custom metrics. Use custom attributes to add metadata. Invocations missing. To see invocation breakdown details, distributed tracing must be enabled as part of the Lambda instrumentation step. Distributed tracing is required so that span details can be displayed in the invocation details pane. You've completed the installation, integration, and instrumentation steps correctly, and your function is sending logs to CloudWatch but you're not seeing traces for specific dependencies (or any traces) in the UI. This may result from the order of layer merging (if you're using our Lambda layers) or from the order of import (if you're instrumenting manually): If you're instrumenting with layers: make sure in your function configuration that the New Relic layer is merged before other layers (though if your function uses webpack, the New Relic layer should be merged after the webpack layer). If you're instrumenting a Node function manually, make sure that logging is enabled, and that your function imports newrelic before it imports any dependencies you expect to monitor. If none of these solutions help you, contact our support team. The following information will help you when you talk to support technicians: Has the Lambda function appeared in the UI before? If so, what is the name of the function? If some data for the Lambda function is appearing in the UI, what specific data is appearing? What APM language agent are you using to instrument the function? Recommended: Attach your CloudWatch logs to the ticket To provide our support team with logging information when opening a ticket: Invoke the function in AWS Lambda. Click on the logs link after your function runs. This will take you to the CloudWatch logs in AWS. On the left-hand sidebar in AWS, under Logs, click on Insights. Select your function and also the newrelic-log-ingestion stream. Apply an appropriate Time Filter, and a log entry limit (the default of 20 may not be enough). Under Actions select Copy query results (ASCII). Paste the copied text into a new text file, then save and upload the text file to the ticket. Important The NR_LAMBDA_MONITORING payload contains all the information the agent attempts to send up, including metrics, events, some AWS account metadata, invocations and errors data. Note that some of that data (for example, our legacy metrics) will not make it to our UI because our ingest pipeline does not consume them.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 111.578674, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "title": "Troubleshoot enabling serverless monitoring of AWS Lambda", + "sections": "Troubleshoot enabling serverless monitoring of AWS Lambda", + "tags": "Serverless function monitoring", + "body": "Problem You're attempting to enable serverless monitoring for AWS Lambda and are having an issue or error. Solution There are two common problems related to not completing all of the enablement procedures: Not seeing data on CloudWatch metrics UI page. This means the AWS integration step wasn't" + }, + "id": "603ea6bb64441f85284e889b" + }, + { + "image": "https://docs.newrelic.com/static/6decabb9d8cd5dc3e18f2f647f7c7cdd/c1b63/arrow-step-diagram-trans.png", + "url": "https://docs.newrelic.com/docs/distributed-tracing/concepts/quick-start/", + "sections": [ + "Distributed tracing setup options" + ], + "published_at": "2022-02-14T03:23:44Z", + "title": "Distributed tracing setup options", + "updated_at": "2022-02-14T03:23:44Z", + "type": "docs", + "external_id": "44df1a2d07693a41fa23c9bba9473ce8ebabe47e", + "document_type": "page", + "popularity": 1, + "body": "We recommend you do an initial setup of distributed tracing and consider the advanced Infinite Tracing feature if you are not getting the data you need. Also, if you are currently using New Relic APM agents and would like to enable distributed tracing, see our planning guide. Ready to get started? If you don't already have one, sign up for a New Relic account. It's free, forever! To set up distributed tracing, you'll complete three general steps: Identify services: Identify and write down the endpoints, services, languages, and systems that are used to complete this request (you'll need this information in the next step). If you have an environment diagram like the following, you could use it to create a list of services handling requests: Instrument services: Instrument each service you identify so it can send your trace data. Some tools, such as APM agents, instrument services automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile monitoring Kamon OpenTelemetry Trace API: generic format Trace API: Zipkin format View traces: After you instrument the services, generate some traffic in your application, and then go to the New Relic UI to see your trace data.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 110.14922, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "body": " automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile" + }, + "id": "61d8b6a664441fbe9700cc16" + }, { "image": "", "url": "https://docs.newrelic.com/docs/serverless-function-monitoring/aws-lambda-monitoring/enable-lambda-monitoring/configure-serverless-monitoring-aws-lambda/", @@ -49020,7 +48956,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 71.72955, + "_score": 67.22069, "_version": null, "_explanation": null, "sort": null, @@ -49054,7 +48990,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 384.0298, + "_score": 360.8441, "_version": null, "_explanation": null, "sort": null, @@ -49095,7 +49031,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 379.47803, + "_score": 351.81073, "_version": null, "_explanation": null, "sort": null, @@ -49135,7 +49071,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 347.05762, + "_score": 326.0235, "_version": null, "_explanation": null, "sort": null, @@ -49180,7 +49116,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 332.5344, + "_score": 312.43033, "_version": null, "_explanation": null, "sort": null, @@ -49216,7 +49152,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 291.84717, + "_score": 274.4306, "_version": null, "_explanation": null, "sort": null, @@ -49260,7 +49196,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 118.27251, + "_score": 110.55866, "_version": null, "_explanation": null, "sort": null, @@ -49286,7 +49222,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 66.86857, + "_score": 66.49602, "_version": null, "_explanation": null, "sort": null, @@ -49324,7 +49260,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 58.920143, + "_score": 58.871048, "_version": null, "_explanation": null, "sort": null, @@ -49353,7 +49289,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 55.762688, + "_score": 55.720135, "_version": null, "_explanation": null, "sort": null, @@ -49387,7 +49323,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 49.779163, + "_score": 49.761604, "_version": null, "_explanation": null, "sort": null, @@ -49422,7 +49358,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 1259.9567, + "_score": 1262.2787, "_version": null, "_explanation": null, "sort": null, @@ -49441,7 +49377,7 @@ "Tip", "Important" ], - "published_at": "2022-02-15T01:41:58Z", + "published_at": "2022-02-16T01:44:29Z", "title": "Instrument your cluster", "updated_at": "2021-11-06T01:49:38Z", "type": "developer", @@ -49452,7 +49388,7 @@ "body": "lab This procedure is part of a lab that teaches you how to monitor your Kubernetes cluster with Pixie. Each procedure in the lab builds upon the last, so make sure you've completed the last procedure, Explore your cluster, before starting this one. As the developer of TinyHat.me, you need to have visibility into your cluster. You need to know how healthy your application is. You need to know when things go wrong. But you've put it off for so long because instrumenting Kubernetes is hard and time-consuming. This is one of the things that makes Pixie so valuable. Pixie is a CNCF open source Kubernetes monitoring solution that provides: Automatic and instant baseline observability of your cluster Actionable, code-level insights of your applications With Pixie's auto-telemetry, you'll instrument your cluster in minutes to get dynamic data such as protocol traces, resource metrics, and app metrics from your cluster—all without an agent! Tip If you haven't signed into New Relic, do that now so you're ready to install Pixie into your cluster. Step 1 of 10 Open New Relic. On the right side of the upper navigation bar, click Add more data: Step 2 of 10 Click Guided install: This walks you through the installation process. Step 3 of 10 Click Kubernetes to let New Relic guide you through instrumenting your Kubernetes cluster: Step 4 of 10 Click Begin installation: Step 5 of 10 Select your account, name your cluster \"tiny-hat\", and click Continue: This specifies that TinyHat.me, and all its services, should live in a New Relic cluster called \"tiny-hat\" in the account you selected. Step 6 of 10 Leave the default choices on the next screen. These provide a range of observability features for your cluster, including our infrastructure agent which gives you a high level overview of the health of your cluster. Notably, the default options include \"Instant service-level insights, full-body requests, and application profiles through Pixie\" which you focus on in this lab. Click Continue: In the next screen, you see a command for installing our Kubernetes integration into your cluster. Step 7 of 10 Click Copy command: Now you're ready to install Pixie into your cluster. Step 8 of 10 Switch back to your terminal and paste the Helm command. Step 9 of 10 While you're installing Pixie, switch back to New Relic and click Continue to progress your guided installation to the final step. Here, you see a message that says \"Listening for data\": In a few minutes, Helm will have fully installed the necessary charts. You should see a message with the name, deployed date, namespace, and more: bash Copy NAME: newrelic-bundle LAST DEPLOYED: Thu Sep 23 13:50:24 2021 NAMESPACE: newrelic STATUS: deployed REVISION: 1 TEST SUITE: None Soon after that, the New Relic page updates to tell you that we're receiving data from your cluster: Step 10 of 10 Click Kubernetes cluster explorer to see your nodes, pods, deployments and a host of other data about your cluster, all in one view: Important You may see an error message, \"We're receiving incomplete data for this cluster.\" Please wait a few more minutes and refresh the page to see your cluster. In minutes, you were able to instrument your entire cluster without having to install language-specific agents or specify detailed cluster information! On top of all the data you see in the cluster explorer, click a pod or a node to dig deeper and see the granular data that the infrastructure agent was able to access near-instantly: lab This procedure is part of a lab that teaches you how to monitor your Kubernetes cluster with Pixie. Now that you've instrumented your cluster, use Pixie to debug your application.", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 57.33723, + "_score": 57.40007, "_version": null, "_explanation": null, "sort": null, @@ -49487,7 +49423,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 125.995674, + "_score": 126.224014, "_version": null, "_explanation": null, "sort": null, @@ -49533,7 +49469,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 85.16167, + "_score": 80.60472, "_version": null, "_explanation": null, "sort": null, @@ -49575,7 +49511,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 71.07054, + "_score": 68.373886, "_version": null, "_explanation": null, "sort": null, @@ -49632,7 +49568,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 60.36673, + "_score": 59.388115, "_version": null, "_explanation": null, "sort": null, @@ -49681,7 +49617,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 59.39962, + "_score": 56.21028, "_version": null, "_explanation": null, "sort": null, @@ -49722,7 +49658,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 137.09021, + "_score": 128.3942, "_version": null, "_explanation": null, "sort": null, @@ -49769,7 +49705,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 112.84833, + "_score": 106.65842, "_version": null, "_explanation": null, "sort": null, @@ -49814,7 +49750,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 111.01732, + "_score": 104.5747, "_version": null, "_explanation": null, "sort": null, @@ -49823,52 +49759,6 @@ }, "id": "617daf64e7b9d21e06c040d4" }, - { - "sections": [ - "Amazon RDS monitoring integration", - "Important", - "Features", - "Activate integration", - "Configuration and polling", - "Find and use data", - "Metric data", - "RDS cluster metric data", - "RDS instance common metric data", - "RDS cluster instance data", - "RDS stand-alone instance data", - "Inventory data", - "EOL NOTICE", - "Tip", - "aws/rds/cluster", - "aws/rds/instance" - ], - "title": "Amazon RDS monitoring integration", - "type": "docs", - "tags": [ - "AWS integrations list", - "Amazon integrations", - "Integrations" - ], - "external_id": "7d6fc5eb9cc9f738f85e744f0fd6df72d13a6598", - "image": "", - "url": "https://docs.newrelic.com/docs/infrastructure/amazon-integrations/aws-integrations-list/aws-rds-monitoring-integration/", - "published_at": "2022-02-14T10:49:31Z", - "updated_at": "2022-02-14T10:49:31Z", - "document_type": "page", - "popularity": 1, - "body": "Important Enable the AWS CloudWatch Metric Streams integration to monitor all CloudWatch metrics from your AWS services, including custom namespaces. Individual integrations are no longer our recommended option. New Relic infrastructure integrations include an integration for reporting your Amazon Web Services RDS data to New Relic. This document explains how to activate this integration and describes the data that can be reported. New Relic also offers an integration for enhanced RDS monitoring. Features Amazon's Relational Database Service (RDS) is a web service that makes it easier to set up, operate, and scale a relational database in the cloud. It provides cost-efficient, resizable capacity for an industry-standard relational database and manages common database administration tasks. New Relic's RDS monitoring integration gathers metric and configuration data for the relational databases associated with your Amazon RDS account. Your RDS data is available in pre-built dashboards and you can also create custom queries and charts in New Relic One. You can create alert conditions for RDS data, and use the reported data to plan for future RDS capacity. Activate integration To enable this integration follow standard procedures to Connect AWS services to New Relic. Configuration and polling You can change the polling frequency and filter data using configuration options. Default polling information for the Amazon RDS integration: New Relic polling interval: 5 minutes Amazon CloudWatch data interval: 1 minute for RDS instances; 5 minutes for RDS clusters Find and use data To find your integration data, go to one.newrelic.com > Infrastructure > AWS and select one of the RDS integration links. You can query and explore your data using the DatastoreSample event type, with a provider value of RdsDbInstance for DB instances, or RdsDbCluster for Aurora DB clusters. For more on how to use your data, see Understand and use integration data. Metric data This integration collects Amazon RDS data for clusters and also for instances. Some RDS instance metrics are distinct for cluster instances and stand-alone instances. RDS cluster metric data Name Description volumeBytesUsed Volume bytes used. volumeReadIops Volume read IOPs. volumeWriteIops Volume write IOPs. RDS instance common metric data Metric Description cpuCreditUsage The number of CPU credits consumed during the specified period. cpuCreditBalance The number of CPU credits that an instance has accumulated. cpuUtilization The percentage of CPU used by a DB instance. databaseConnections The number of connections to an instance. diskQueueDepth The number of outstanding read/write requests waiting to access the disk. freeableMemoryBytes The amount of available random access memory, in bytes. maximumUsedTransactionIDs The age of the oldest unvacuumed transaction ID, in transactions. networkReceiveThroughput The amount of network throughput received from clients by each instance in the Aurora MySQL DB cluster, in bytes per second. networkTransmitThroughput The amount of network throughput sent to clients by each instance in the Aurora DB cluster, in bytes per second. readLatency The average amount of time taken per disk I/O operation, in seconds. readThroughput The average number of bytes read from disk per second. swapUsageBytes The amount of swap space used on the Aurora PostgreSQL DB instance, in bytes. transactionLogsDiskUsageBytes The amount of disk space occupied by transaction logs on the Aurora PostgreSQL DB instance, in bytes. RDS cluster instance data Metric Description activeTransactions The average number of current transactions executing on an Aurora database instance per second. auroraReplicaLag For an Aurora Replica, the amount of lag when replicating updates from the primary instance, in milliseconds. auroraReplicaLagMaximum The maximum amount of lag between the primary instance and each Aurora DB instance in the DB cluster, in milliseconds. auroraReplicaLagMinimum The minimum amount of lag between the primary instance and each Aurora DB instance in the DB cluster, in milliseconds. backtrackChangeRecordsCreationRate The number of backtrack change records created over five minutes for your DB cluster. backtrackChangeRecordsStored The actual number of backtrack change records used by your DB cluster. backtrackWindowActual The difference between the target backtrack window and the actual backtrack window. backtrackWindowAlert The number of times that the actual backtrack window is smaller than the target backtrack window for a given period of time. blockedTransactions The average number of transactions in the database that are blocked per second. bufferCacheHitRatio The percentage of requests that are served by the buffer cache. commitLatency The amount of latency for commit operations, in milliseconds. commitThroughput The average number of commit operations per second. ddlLatency The amount of latency for data definition language (DDL) requests, in milliseconds—for example, create, alter, and drop requests. ddlThroughput The average number of DDL requests per second. deadlocks The average number of deadlocks in the database per second. deleteLatency The amount of latency for delete queries, in milliseconds. deleteThroughput The average number of delete queries per second. dmlLatency The amount of latency for inserts, updates, and deletes, in milliseconds. dmlThroughput The average number of inserts, updates, and deletes per second. engineUptime The amount of time that the instance has been running, in seconds. failedSqlStatements This metric, displayed under the SQL category of the latest metrics view in the Amazon RDS console, does not apply to Amazon Aurora. freeLocalStorageBytes The amount of storage available for temporary tables and logs, in bytes. insertLatency The amount of latency for insert queries, in milliseconds. insertThroughput The average number of insert queries per second. loginFailures The average number of failed login attempts per second. networkThroughput The amount of network throughput both received from and transmitted to clients by each instance in the Aurora MySQL DB cluster, in bytes per second. queries The average number of queries executed per second. rdsToAuroraPostgreSQLReplicaLag The amount of lag in seconds when replicating updates from the primary RDS PostgreSQL instance to other nodes in the cluster. resultSetCacheHitRatio The percentage of requests that are served by the Resultset cache. selectLatency The amount of latency for select queries, in milliseconds. selectThroughput The average number of select queries per second. totalConnections Total number of connections to the database instance. updateLatency The amount of latency for update queries, in milliseconds. updateThroughput The average number of update queries per second. volumeReadIOPs The number of billed read I/O operations from a cluster volume, reported at 5-minute intervals. volumeWriteIOPs The number of write disk I/O operations to the cluster volume, reported at 5-minute intervals. RDS stand-alone instance data Metric Description allocatedStorageBytes Specifies the allocated storage size, in bytes. Note this data doesn't come from an AWS CloudWatch metric, but from the allocatedStorage inventory attribute. binLogDiskUsageBytes The amount of disk space occupied by binary logs on the master, in bytes. diskQueueDepth The number of outstanding read/write requests waiting to access the disk. freeStorageSpaceBytes The amount of storage available for tables and logs, in bytes. maximumUsedTransactionIDs The age of the oldest unvacuumed transaction ID, in transactions. oldestReplicationSlotLag Shows how far behind in seconds the most lagging replica is in terms of WAL data received. readIops The average number of disk I/O operations per second. readLatency The average amount of time taken per disk I/O operation. readThroughput The average number of bytes read from disk per second. replicaLag The amount of lag when replicating updates from the primary instance, in milliseconds. replicationSlotDiskUsage The amount of disk space occupied by replication slots. swapUsageBytes The amount of swap space used on the Aurora PostgreSQL DB instance, in bytes. transactionLogsDiskUsage The amount of disk space occupied by transaction logs on the Aurora PostgreSQL DB instance. transactionLogsGeneration How much storage is being used for WAL data. writeIops The average number of disk I/O operations per second. writeLatency The average amount of time taken per disk I/O operation. writeThroughput The average number of bytes written to disk per second. Inventory data EOL NOTICE After March 2022, we're discontinuing support for several capabilities, including inventory data for cloud integrations. For more details, including how you can easily prepare for this transition, see our Explorers Hub post. This integration supports the following RDS inventory data. For more about inventory data, see Understand integration data. Tip Tags (indicated with an *) are only fetched when tags collection is on. aws/rds/cluster Name Description allocatedStorage The storage space allocated to this instance, in gibibytes. autoMinorVersionUpgrade Boolean. If true, indicates that minor version patches are applied to this instance automatically. availabilityZone The RDS availability zone for this instance. awsRegion The RDS region for this instance. backupRetentionPeriod Count of the number of days RDS preserves automated backups for this instance. caCertificateIdentifier The CA certificate for this instance. dbInstanceClass The DB instance class for this instance. dbInstancePort Integer representing the port where this instance listens for traffic. If the instance is part of a cluster, this port may not be the port assigned to the overall database cluster. dbInstanceStatus The DB instance status for this instance. dbName The meaning of this configuration option depends on the database engine: Amazon Aurora, MariaDB, MySQL, PostgreSQL, and SQL Server: Indicates the name specified when the instance was created. Oracle: Indicates the Oracle System ID (SID) for this instance. dbParameterGroups JSON string that lists DB parameter groups for this instance. dbSecurityGroups JSON string that lists DB security groups for this instance. Only collects data from the DBSecurityGroup.Name and DBSecurityGroup.Status sub-elements. dbSubnetGroup JSON string that lists details about the instance's DB subnet group, including the name and description for each subnet in the subnet group. engine The name of the database engine running on this instance. engineVersion The version identifier for the database engine running on this instance. licenseModel The license identifier for the database engine running on this instance. For example, general-public-license. multiAz Boolean. If true, indicates this instance is a multi-availability-zone deployment. optionGroupMemberships JSON string that lists any option group memberships for this instance. publiclyAccessible Boolean. If true, indicates an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. If false, indicates an internal instance with a DNS name that resolves to a private IP address. secondaryAvailabilityZone If present, indicates the name of the secondary availability zone for a multi-availability-zone deployment. storageEncrypted Boolean. If true, indicates that the database on this instance is encrypted. storageType Indicates the RDS storage type for this instance. vpcSecurityGroups JSON string that lists the VPC security groups for this instance. tags* Cluster tags. aws/rds/instance Name Description awsRegion The AWS region that the instance was provisioned in. dbInstanceIdentifier Contains a user-supplied database identifier. This identifier is the unique key that identifies a DB instance. allocatedStorage Specifies the allocated storage size, in gibibytes. autoMinorVersionUpgrade Indicates that minor version patches are applied automatically. availabilityZone Specifies the name of the Availability Zone the DB instance is located in. backupRetentionPeriod Specifies the number of days for which automatic DB snapshots are retained. caCertificateIdentifier The identifier of the CA certificate for this DB instance. characterSetName If present, specifies the name of the character set that this instance is associated with. dbClusterIdentifier If the DB instance is a member of a DB cluster, contains the name of the DB cluster that the DB instance is member of. dbInstanceClass Contains the name of the compute and memory capacity class of the DB instance. dbInstancePort Specifies the port that the DB instance listens on. If the DB instance is part of a DB cluster, this can be a different port than the DB cluster port. dbInstanceStatus Specifies the current state of this database. dbName Contains the name of the initial database of this instance that was provided at create time, if one was specified when the DB instance was created. This same name is returned for the life of the DB instance. dbParameterGroups Provides the list of DB parameter groups applied to this DB instance. dbSecurityGroups Provides List of DB security group elements. dbParameterGroups Provides the list of DB parameter groups. dbSubnetGroup Specifies information on the subnet group associated with the DB instance, including the name, description, and subnets in the subnet group. endpoint Specifies the connection endpoint. engine Provides the name of the database engine to be used for this DB instance. engineVersion Indicates the database engine version. kmsKeyId If StorageEncrypted is true, the AWS KMS key identifier for the encrypted DB instance. licenseModel License model information for this DB instance. masterUsername Contains the master username for the DB instance. multiAz Specifies if the DB instance is a Multi-AZ deployment. optionGroupMemberships Provides the list of option group memberships for this DB instance. preferredBackupWindow Specifies the daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod. preferredMaintenanceWindow Specifies the weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC). publiclyAccessible Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address. readReplicaDdInstanceIdentifiers Contains one or more identifiers of the Read Replicas associated with this DB instance. readReplicaSourceDbInstanceIdentifier Contains the identifier of the source DB instance if this DB instance is a Read Replica. secondaryAvailabilityZone If present, specifies the name of the secondary Availability Zone for a DB instance with multi-AZ support. storageEncrypted Specifies whether the DB instance is encrypted. storageType Specifies the storage type associated with DB instance. tdeCredentialArn The ARN from the key store with which the instance is associated for TDE encryption. vpcSecurityGroups Provides a list of VPC security group elements that the DB instance belongs to. clusterInstance If the instance is a cluster or not. tags* Instance tags.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 104.74988, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "body": "Important Enable the AWS CloudWatch Metric Streams integration to monitor all CloudWatch metrics from your AWS services, including custom namespaces. Individual integrations are no longer our recommended option. New Relic infrastructure integrations include an integration for reporting your Amazon" - }, - "id": "617d6da1196a67bae7f7c325" - }, { "image": "", "url": "https://docs.newrelic.com/whats-new/2021/10/instant-observability-10-13-21/", @@ -49890,7 +49780,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 103.58332, + "_score": 102.71758, "_version": null, "_explanation": null, "sort": null, @@ -49898,6 +49788,51 @@ "body": ". The Gigamon Hawk hybrid-cloud visibility and analytics platform provides access to - and extracts intelligence from all network traffic. The Gigamon quickstart delivers advanced security capabilities that offer network detection and response to advanced threats, including shadow IT activities, crypto-mining and torrent activities, SSL cipher versions and expiration dates across both managed and unmanaged hosts, such as IoT/OT and containers." }, "id": "616c0b0f196a671a8c3c9c10" + }, + { + "sections": [ + "Docker container for infrastructure monitoring", + "What you need", + "Custom setup (recommended)", + "Docker CLI", + "Docker Compose", + "Basic setup", + "Required container privileges", + "Next steps after install", + "Inventory collected", + "Container data", + "Containerized agent image", + "Check the source code" + ], + "title": "Docker container for infrastructure monitoring", + "type": "docs", + "tags": [ + "Linux installation", + "Install the infrastructure agent", + "Infrastructure" + ], + "external_id": "022f4fba474d662414d9542a107d4d8a30d24895", + "image": "", + "url": "https://docs.newrelic.com/docs/infrastructure/install-infrastructure-agent/linux-installation/docker-container-infrastructure-monitoring/", + "published_at": "2022-02-14T09:39:58Z", + "updated_at": "2022-02-04T10:44:25Z", + "document_type": "page", + "popularity": 1, + "body": "The infrastructure monitoring agent for Linux supports Docker environments by default. If you're running a container OS or have restrictions that require deploying the agent as a container, you can run a containerized version of our infrastructure monitoring agent. This can monitor metrics for the container itself, as well as the underlying host. Using the custom (recommended) or basic setup allows the infrastructure agent to run inside a container environment. A host can only run one instance of the agent at a time, whether that's the containerized agent or the non-containerized version. Want to try out our infrastructure monitoring agent for Linux? Create a New Relic account for free! No credit card required. What you need The containerized version of the infrastructure agent requires Docker 1.12 or higher. The container must run any of the Linux distributions and versions supported by our agent. The container image is available and supported on AMD64 and ARM64 architectures. The log forwarder is not included with the containerized agent. We recommend installing the agent on the underlying host which provides all capabilities. Custom setup (recommended) The following are basic instructions for creating a custom Docker image on Linux. This allows you to deploy the infrastructure agent as a container that can monitor its underlying host. Recommendation: Extend the newrelic/infrastructure image, and use your own newrelic-infra.yml agent config file. Once your image is built, you can easily spin up a container without having to provide more launch time configurations. Do not provide secrets using environment variables with Docker. Docker CLI Create the newrelic-infra.yml agent config file with your New Relic license key. For config option explanations, see configuration settings. license_key: YOUR_LICENSE_KEY Copy Create the Dockerfile extending the newrelic/infrastructure image, and add your config to /etc/newrelic-infra.yml: FROM newrelic/infrastructure:latest ADD newrelic-infra.yml /etc/newrelic-infra.yml Copy Build and tag your image: docker build -t YOUR_IMAGE_NAME . Copy Run the container from the image you built with the required required run flags: docker run \\ -d \\ --name newrelic-infra \\ --network=host \\ --cap-add=SYS_PTRACE \\ --privileged \\ --pid=host \\ -v \"/:/host:ro\" \\ -v \"/var/run/docker.sock:/var/run/docker.sock\" \\ YOUR_IMAGE_NAME Copy For potential next steps, like how to see data in the UI, see What's next? Docker Compose Create a folder to store the configuration files: mkdir ~/newrelic-infra-setup Copy Change directory to the one you've just created: cd ~/newrelic-infra-setup Copy Create the newrelic-infra.yml agent config file with your New Relic license key. For config option explanations, see configuration settings. echo \"license_key: YOUR_LICENSE_KEY\" > newrelic-infra.yml Copy Create the newrelic-infra.dockerfile extending the newrelic/infrastructure image, and add your config to /etc/newrelic-infra.yml: touch newrelic-infra.dockerfile Copy vim newrelic-infra.dockerfile #you can use any text editor Copy Put the following content in the file: FROM newrelic/infrastructure:latest ADD newrelic-infra.yml /etc/newrelic-infra.yml Copy Create docker-compose.yaml: touch docker-compose.yaml Copy vim docker-compose.yaml #you can use any text editor Copy Put following content in the file: version: '3' services: agent: container_name: newrelic-infra build: context: . dockerfile: newrelic-infra.dockerfile cap_add: - SYS_PTRACE network_mode: host pid: host privileged: true volumes: - \"/:/host:ro\" - \"/var/run/docker.sock:/var/run/docker.sock\" restart: unless-stopped Copy Build and start docker-compose: docker-compose -f docker-compose.yaml up -d Copy For potential next steps, like how to see data in the UI, see What's next? Basic setup To use the basic setup with a base New Relic infrastructure image: Docker CLI Run the container with the required run flags: docker run \\ -d \\ --name newrelic-infra \\ --network=host \\ --cap-add=SYS_PTRACE \\ --privileged \\ --pid=host \\ -v \"/:/host:ro\" \\ -v \"/var/run/docker.sock:/var/run/docker.sock\" \\ -e NRIA_LICENSE_KEY=YOUR_LICENSE_KEY \\ newrelic/infrastructure:latest Copy For potential next steps, like how to see data in the UI, see What's next? Docker Compose Create docker-compose.yaml: touch docker-compose.yaml Copy vim docker-compose.yaml #you can use any text editor Copy Put following content in the file: version: '3' services: agent: container_name: newrelic-infra image: newrelic/infrastructure:latest cap_add: - SYS_PTRACE network_mode: host pid: host privileged: true volumes: - \"/:/host:ro\" - \"/var/run/docker.sock:/var/run/docker.sock\" environment: NRIA_LICENSE_KEY: \"YOUR_LICENSE_KEY\" restart: unless-stopped Copy Build and start docker-compose: docker-compose -f docker-compose.yaml up -d Copy For potential next steps, like how to see data in the UI, see What's next? Required container privileges Due to resource isolation from the host and other containers via Linux namespaces, a container has a very restricted view and control of its underlying host's resources by default. Without these extra privileges, the infrastructure agent cannot monitor the host and its containers. The infrastructure agent collects data about its host using system files and system calls. For more information about how the infrastructure agent collects data, see our documentation about infrastructure monitoring and security. Required privileges include: Privilege Description --network=host Sets the container's network namespace to the host's network namespace. This allows the agent to collect the network metrics about the host. -v \"/:/host:ro\" Bind mounts the host's root volume to the container. This read-only access to the host's root allows the agent to collect process and storage metrics as well as Inventory data from the host. --cap-add=SYS_PTRACE Adds the Linux capability to trace system processes. This allows the agent to gather data about processes running on the host. Read more here. --privileged --pid=host -v \"/var/run/docker.sock:/var/run/docker.sock\" Bind mounts the host's Docker daemon socket to the container. This allows the agent to connect to the Engine API via the Docker daemon socket to collect the host's container data. Next steps after install For next steps after install is completed, see What's next? Inventory collected Inventory is collected from the infrastructure agent's built-in data collectors. The infrastructure agent collects this data for Linux systems running with containers. Category Source Data collected using metadata agent_config Agent's complete config file system uptime -s, /etc/redhat-release, /proc/cpuinfo, /etc/os-release, /proc/sys/kernel/random/boot_id, /proc/sys/kernel/osrelease, /sys/class/dmi/id/product_uuid, /sys/devices/virtual/dmi/id/sys_vendor, /sys/devices/virtual/dmi/id/product_name Container data Once the infrastructure agent is running in a Docker container, it can collect the same host compute data and event data that the infrastructure agent is capable of collecting when running natively on a host. For more information, see our documentation about how to view your Docker container data. Containerized agent image The containerized agent image is built from an Alpine base image. Alpine is used as the base image since version 0.0.55. This is the one pointed by latest tag. Earlier versions used CentOS 7 as base image. Check the source code This integration is open source software. You can browse its source code and send improvements, or create your own fork and build it.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 101.570114, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "title": "Docker container for infrastructure monitoring", + "sections": "Docker container for infrastructure monitoring", + "tags": "Install the infrastructure agent", + "body": " about infrastructure monitoring and security. Required privileges include: Privilege Description --network=host Sets the container's network namespace to the host's network namespace. This allows the agent to collect the network metrics about the host. -v "/:/host:ro" Bind mounts the host's root" + }, + "id": "6043ef6a28ccbce71b2c6062" } ], "/drupal/12c2e734-6da4-4723-b484-e3dcf2e20012": [ @@ -49927,7 +49862,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 95.57204, + "_score": 95.55783, "_version": null, "_explanation": null, "sort": null, @@ -49969,7 +49904,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 89.09743, + "_score": 89.10961, "_version": null, "_explanation": null, "sort": null, @@ -50002,7 +49937,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 87.529205, + "_score": 87.58515, "_version": null, "_explanation": null, "sort": null, @@ -50039,7 +49974,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 87.46018, + "_score": 87.50511, "_version": null, "_explanation": null, "sort": null, @@ -50072,7 +50007,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 86.51562, + "_score": 86.57056, "_version": null, "_explanation": null, "sort": null, @@ -50117,7 +50052,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 65.80243, + "_score": 62.135056, "_version": null, "_explanation": null, "sort": null, @@ -50146,7 +50081,7 @@ "external_id": "531f2f3985bf64bb0dc92a642445887095048882", "image": "", "url": "https://developer.newrelic.com/automate-workflows/get-started-new-relic-cli/", - "published_at": "2022-02-15T01:37:22Z", + "published_at": "2022-02-16T01:38:29Z", "updated_at": "2021-09-02T01:40:23Z", "document_type": "page", "popularity": 1, @@ -50154,7 +50089,7 @@ "body": "Access the New Relic platform from the comfort of your terminal. You can use the New Relic CLI to manage entity tags, define workloads, record deployment markers, and much more. In short, you can use the CLI to automate common tasks in your DevOps workflow. This guide walks you through the essentials of New Relic CLI, from install and configuration to basic usage. Before you begin For this guide you just need: Your New Relic user key. An instrumented application in your New Relic account. Step 1 of 10 Install the New Relic CLI Download the New Relic CLI for your operating system, as described below. You can also download pre-built binaries for all platforms, including .deb and .rpm packages, and our Windows x64 .msi installer. Linux Using Snapcraft, run: bash Copy $ sudo snap install newrelic-cli macOS Using Homebrew, run: bash Copy $ brew install newrelic-cli Windows Using Scoop, run: bash Copy $ scoop bucket add newrelic-cli https://github.com/newrelic/newrelic-cli.git $ scoop install newrelic-cli Step 2 of 10 Create your New Relic CLI profile. After you install the New Relic CLI, it's time to create your first profile. Profiles contain credentials and settings that you can apply to any CLI command, which is useful when switching between accounts. Run the profiles add command: bash Copy $ # Create the tutorial account for the US region $ newrelic profiles add --profile tutorial --apiKey YOUR_NEW_RELIC_USER_KEY -r YOUR_REGION $ # Set the profile as defaults $ newrelic profiles default --profile tutorial Important You must set the region of your New Relic account. Use -r to set either us or eu. Step 3 of 10 Get your application details. Now, add tags to the application you've instrumented with New Relic. Tags are key-value pairs that can help you organize and filter your entities. An entity (for example, an application) can have a maximum of 100 key-value pairs tied to it. Before searching for your application using the New Relic CLI, write down or copy your Account ID and the name of your application in New Relic - you need both to find applications in the New Relic platform. Step 4 of 10 Retrieve your application details as a JSON object. To search for your APM application, use the apm application search command: bash Copy $ newrelic apm application search --accountId YOUR_ACCOUNT_ID --name NAME_OF_YOUR_APP Tip If you get an error, check that your account ID and application name are correct. Step 5 of 10 Find the guid value. If the account ID is valid, and the application name exists in your account, apm application search yields data similar to this example: [ { \"accountId\": YOUR_ACCOUNT_ID, \"applicationId\": YOUR_APP_ID, \"domain\": \"APM\", \"entityType\": \"APM_APPLICATION_ENTITY\", \"guid\": \"A_LONG_GUID\", \"name\": \"NAME_OF_YOUR_APP\", \"permalink\": \"https://one.newrelic.com/redirect/entity/A_LONG_GUID\", \"reporting\": true, \"type\": \"APPLICATION\" } ] Copy When you've successfully searched for your application, look for the guid value. It's a unique identifier for your application. You should copy it or write it down. Step 6 of 10 Add a simple tag to your application. Now that you have the GUID, you can point the New Relic CLI directly at your application. Adding a tag is the simplest way to try out the CLI capabilities (don't worry, tags can be deleted by using entity tags delete). Here, you add an environment tag to your application. Add the dev:testing tag⁠ (or any other key-value pair) to your application using the entities tags create command: bash Copy $ newrelic entity tags create --guid YOUR_APP_GUID --tag devkit:testing Step 7 of 10 Add tag sets. What if you want to add multiple tags? Tag sets to the rescue! While tags are key-value pairs separated by colons, tag sets are comma separated lists of tags. For example: tag1:value1,tag2:value2 To add multiple tags to your application at once, modify and run this snippet: bash Copy $ newrelic entity tags create --guid YOUR_APP_GUID --tag tag1:test,tag2:test Important Adding tags is an asynchronous operation: this means it could take a while for the tags to get created. Step 8 of 10 Retrieve your application's tags. You've created and added some tags to your application, but to test that they're working, you need to retrieve them. Run the entity tags get command: bash Copy $ newrelic entity tags get --guid YOUR_APP_GUID All tags associated with your application are retrieved as a JSON array: [ { \"Key\": \"tag1\", \"Values\": [\"true\"] }, { \"Key\": \"tag2\", \"Values\": [\"test\"] }, { \"Key\": \"tag3\", \"Values\": [\"testing\"] } // ... ] Copy Step 9 of 10 Bonus step: create a deployment marker. Deployments of applications often go wrong. Deployment markers are labels that, when attached to your application data, help you track deployments and troubleshoot what happened. To create a deployment marker, run the apm deployment create command using the same application ID from your earlier search: bash Copy $ newrelic apm deployment create --applicationId YOUR_APP_ID --revision $(git describe --tags --always) Step 10 of 10 Check the JSON response for the revision and timestamp of the deployment. You can build this workflow into a continuous integration or continuous deployment (CI/CD) system to indicate changes in your application's behavior after deployments. Here's an example: { \"id\": 37075986, \"links\": { \"application\": 204261368 }, \"revision\": \"v1.2.4\", \"timestamp\": \"2020-03-04T15:11:44-08:00\", \"user\": \"Developer Toolkit Test Account\" } Copy Next steps Have a look at all the available commands in the New Relic CLI. For example, you can create a New Relic workflow using workload create If you'd like to engage with other community members, visit our New Relic Explorers Hub page. We welcome feature requests or bug reports on GitHub.", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 37.163136, + "_score": 37.092064, "_version": null, "_explanation": null, "sort": null, @@ -50165,32 +50100,6 @@ } ], "/golang/87dc92ae-94df-4034-a9fa-d5edfb253e29": [ - { - "image": "https://docs.newrelic.com/static/6decabb9d8cd5dc3e18f2f647f7c7cdd/c1b63/arrow-step-diagram-trans.png", - "url": "https://docs.newrelic.com/docs/distributed-tracing/concepts/quick-start/", - "sections": [ - "Distributed tracing setup options" - ], - "published_at": "2022-02-14T03:23:44Z", - "title": "Distributed tracing setup options", - "updated_at": "2022-02-14T03:23:44Z", - "type": "docs", - "external_id": "44df1a2d07693a41fa23c9bba9473ce8ebabe47e", - "document_type": "page", - "popularity": 1, - "body": "We recommend you do an initial setup of distributed tracing and consider the advanced Infinite Tracing feature if you are not getting the data you need. Also, if you are currently using New Relic APM agents and would like to enable distributed tracing, see our planning guide. Ready to get started? If you don't already have one, sign up for a New Relic account. It's free, forever! To set up distributed tracing, you'll complete three general steps: Identify services: Identify and write down the endpoints, services, languages, and systems that are used to complete this request (you'll need this information in the next step). If you have an environment diagram like the following, you could use it to create a list of services handling requests: Instrument services: Instrument each service you identify so it can send your trace data. Some tools, such as APM agents, instrument services automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile monitoring Kamon OpenTelemetry Trace API: generic format Trace API: Zipkin format View traces: After you instrument the services, generate some traffic in your application, and then go to the New Relic UI to see your trace data.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 116.20624, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "body": " automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile" - }, - "id": "61d8b6a664441fbe9700cc16" - }, { "sections": [ "Mux", @@ -50222,7 +50131,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 112.697266, + "_score": 112.86975, "_version": null, "_explanation": null, "sort": null, @@ -50263,7 +50172,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 112.6965, + "_score": 112.86901, "_version": null, "_explanation": null, "sort": null, @@ -50273,6 +50182,32 @@ }, "id": "61566dd8e7b9d279cf8de386" }, + { + "image": "https://docs.newrelic.com/static/6decabb9d8cd5dc3e18f2f647f7c7cdd/c1b63/arrow-step-diagram-trans.png", + "url": "https://docs.newrelic.com/docs/distributed-tracing/concepts/quick-start/", + "sections": [ + "Distributed tracing setup options" + ], + "published_at": "2022-02-14T03:23:44Z", + "title": "Distributed tracing setup options", + "updated_at": "2022-02-14T03:23:44Z", + "type": "docs", + "external_id": "44df1a2d07693a41fa23c9bba9473ce8ebabe47e", + "document_type": "page", + "popularity": 1, + "body": "We recommend you do an initial setup of distributed tracing and consider the advanced Infinite Tracing feature if you are not getting the data you need. Also, if you are currently using New Relic APM agents and would like to enable distributed tracing, see our planning guide. Ready to get started? If you don't already have one, sign up for a New Relic account. It's free, forever! To set up distributed tracing, you'll complete three general steps: Identify services: Identify and write down the endpoints, services, languages, and systems that are used to complete this request (you'll need this information in the next step). If you have an environment diagram like the following, you could use it to create a list of services handling requests: Instrument services: Instrument each service you identify so it can send your trace data. Some tools, such as APM agents, instrument services automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile monitoring Kamon OpenTelemetry Trace API: generic format Trace API: Zipkin format View traces: After you instrument the services, generate some traffic in your application, and then go to the New Relic UI to see your trace data.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 109.96106, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "body": " automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile" + }, + "id": "61d8b6a664441fbe9700cc16" + }, { "sections": [ "Troubleshoot enabling serverless monitoring of AWS Lambda", @@ -50299,7 +50234,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 89.00305, + "_score": 82.84302, "_version": null, "_explanation": null, "sort": null, @@ -50337,7 +50272,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 70.97598, + "_score": 70.94992, "_version": null, "_explanation": null, "sort": null, @@ -50351,32 +50286,6 @@ } ], "/httprouter/66433ac5-1908-4af0-b1f8-9c78e5eed8df": [ - { - "image": "https://docs.newrelic.com/static/6decabb9d8cd5dc3e18f2f647f7c7cdd/c1b63/arrow-step-diagram-trans.png", - "url": "https://docs.newrelic.com/docs/distributed-tracing/concepts/quick-start/", - "sections": [ - "Distributed tracing setup options" - ], - "published_at": "2022-02-14T03:23:44Z", - "title": "Distributed tracing setup options", - "updated_at": "2022-02-14T03:23:44Z", - "type": "docs", - "external_id": "44df1a2d07693a41fa23c9bba9473ce8ebabe47e", - "document_type": "page", - "popularity": 1, - "body": "We recommend you do an initial setup of distributed tracing and consider the advanced Infinite Tracing feature if you are not getting the data you need. Also, if you are currently using New Relic APM agents and would like to enable distributed tracing, see our planning guide. Ready to get started? If you don't already have one, sign up for a New Relic account. It's free, forever! To set up distributed tracing, you'll complete three general steps: Identify services: Identify and write down the endpoints, services, languages, and systems that are used to complete this request (you'll need this information in the next step). If you have an environment diagram like the following, you could use it to create a list of services handling requests: Instrument services: Instrument each service you identify so it can send your trace data. Some tools, such as APM agents, instrument services automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile monitoring Kamon OpenTelemetry Trace API: generic format Trace API: Zipkin format View traces: After you instrument the services, generate some traffic in your application, and then go to the New Relic UI to see your trace data.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 116.38019, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "body": " automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile" - }, - "id": "61d8b6a664441fbe9700cc16" - }, { "sections": [ "Mux", @@ -50408,7 +50317,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 112.697266, + "_score": 112.87141, "_version": null, "_explanation": null, "sort": null, @@ -50449,7 +50358,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 112.6965, + "_score": 112.87066, "_version": null, "_explanation": null, "sort": null, @@ -50459,79 +50368,6 @@ }, "id": "61566dd8e7b9d279cf8de386" }, - { - "sections": [ - "Install the Go agent in GAE flexible environment", - "Important", - "Build a custom runtime using Docker", - "1. Set up the GAE project and install dependencies", - "2. Configure your app.yaml", - "3. Configure a Dockerfile", - "4. Build a Docker image", - "5. Deploy Docker image to initialized GAE flexible environment", - "Recommendation: Disable health checks", - "Get agent troubleshooting logs from GAE" - ], - "title": "Install the Go agent in GAE flexible environment", - "type": "docs", - "tags": [ - "Installation", - "Go agent", - "Agents" - ], - "external_id": "45635e5ca8f209b31de810abc207ae9926c03274", - "image": "", - "url": "https://docs.newrelic.com/docs/apm/agents/go-agent/installation/install-go-agent-gae-flexible-environment/", - "published_at": "2022-02-15T17:32:59Z", - "updated_at": "2021-10-23T20:23:54Z", - "document_type": "page", - "popularity": 1, - "body": "With our Go agent, you can monitor applications that reside in the Google App Engine (GAE) flexible environment. Adding New Relic to your GAE flex app gives you insight into the health and performance of your app and extends GAE with metrics you can view in APM and browser monitoring. Here we explain how to add New Relic to your GAE flex app by configuring a custom runtime, and give an example of deploying a Go app with Docker. Important The Go agent can run in a GAE flexible environment using a custom runtime. Due to limitations of other environments, do not use the GAE standard environment or Google App Engine's \"native mode\" installation. Build a custom runtime using Docker See Google's documentation for building custom runtimes. This example describes how to add New Relic to your GAE flex app by installing the Go agent, building a custom Go runtime for Docker, and deploying a golang application. For more information about deploying and configuring your Go app in the GAE flexible environment, see: Google App Engine's documentation for Go Google App Engine's tutorials to deploy a Go app 1. Set up the GAE project and install dependencies Follow standard procedures to install the Go agent for your specific app server, including your license key. Follow Google App Engine procedures for Go to create a new Cloud Platform project, create an App Engine application, download and install git, and complete other prerequisites for the Google Cloud SDK. The Google Cloud SDK provides the gcloud command line tool to manage and deploy GAE apps. 2. Configure your app.yaml The app.yaml configuration file is required for a GAE flexible environment app with a custom runtime. At a minimum, make sure it contains: runtime: custom env: flex Copy 3. Configure a Dockerfile The Dockerfile defines the Docker image to be built and is required for a GAE flexible environment app. The following Dockerfile example code defines the golang version used. FROM golang:1.8-onbuild CMD go run main.go Copy 4. Build a Docker image To build the Docker image, run the following command. Be sure to include the period at the end of the code, to indicate the current directory contains the build files. docker build --rm -t Docker-image-name . Copy 5. Deploy Docker image to initialized GAE flexible environment To deploy your Docker image to your initialized GAE flexible environment, run the following command: gcloud --project go-app-name app deploy Copy Wait until the deployment completes. To view your GAE flex app data in New Relic, go to the APM Summary page. Recommendation: Disable health checks Google App Engine sends periodic health check requests to confirm that an instance has been successfully deployed, and to check that a running instance maintains a healthy status. A health check is an HTTP request to the URL /_ah/health. If you create a custom runtime, your app must be able to handle a large number of health check requests. Otherwise, your app data may not display correctly in APM. Recommendation: Configure your app.yaml to disable health checks by adding: health_check: enable_health_check: False Copy Get agent troubleshooting logs from GAE Use these resources to troubleshoot your GAE flex environment app: To connect to the GAE instance and start a shell in the Docker container running your code, see GAE's documentation for debugging an instance. To redirect New Relic Go agent logs to Stackdriver in the Cloud Platform Console, change the newrelic.yml file to: log_file_name: STDOUT Copy", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 65.33989, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "body": "With our Go agent, you can monitor applications that reside in the Google App Engine (GAE) flexible environment. Adding New Relic to your GAE flex app gives you insight into the health and performance of your app and extends GAE with metrics you can view in APM and browser monitoring. Here we" - }, - "id": "617e6e9ee7b9d27090c06297" - }, - { - "sections": [ - "New Relic Infrastructure licenses" - ], - "title": "New Relic Infrastructure licenses", - "type": "docs", - "tags": [ - "New Relic Infrastructure", - "Product or service licenses", - "Licenses" - ], - "external_id": "a3dabb2eb2d680f1261751bba22a49549635db9f", - "image": "", - "url": "https://docs.newrelic.com/docs/licenses/product-or-service-licenses/new-relic-infrastructure/new-relic-infrastructure-licenses/", - "published_at": "2022-02-14T18:53:51Z", - "updated_at": "2021-03-13T03:34:37Z", - "document_type": "page", - "popularity": 1, - "body": "We love open-source software, and use the following in New Relic Infrastructure. Thank you, open-source community, for making these fine tools! Some of these are listed under multiple software licenses, and in that case we have listed the license we've chosen to use. Product Licenses Sirupsen/logrus MIT davecgh/go-spew/spew ISC docker/distribution/digestset Apache-2.0 docker/distribution/reference Apache-2.0 fluent/fluent-bit Apache-2.0 moby/moby/api Apache-2.0 moby/moby/api/types Apache-2.0 moby/moby/api/types/blkiodev Apache-2.0 moby/moby/api/types/container Apache-2.0 moby/moby/api/types/events Apache-2.0 moby/moby/api/types/filters Apache-2.0 moby/moby/api/types/image Apache-2.0 moby/moby/api/types/mount Apache-2.0 moby/moby/api/types/network Apache-2.0 moby/moby/api/types/registry Apache-2.0 moby/moby/api/types/strslice Apache-2.0 moby/moby/api/types/swarm Apache-2.0 moby/moby/api/types/time Apache-2.0 moby/moby/api/types/versions Apache-2.0 moby/moby/api/types/volume Apache-2.0 moby/moby/client Apache-2.0 moby/moby/pkg/tlsconfig Apache-2.0 docker/go-connections/nat Apache-2.0 docker/go-connections/sockets Apache-2.0 docker/go-connections/tlsconfig Apache-2.0 docker/go-units Apache-2.0 evanphx/json-patch BSD-3-Clause ghodss/yaml MIT julienschmidt/httprouter BSD-3-Clause kardianos/service Zlib kolo/xmlrpc MIT opencontainers/go-digest Apache-2.0 opsmatic/canstop MIT opsmatic/fsnotify BSD-3-Clause pkg/errors BSD-2-Clause pmezard/go-difflib/difflib BSD-3-Clause shirou/gopsutil/cpu BSD-3-Clause shirou/gopsutil/disk BSD-3-Clause shirou/gopsutil/internal/common BSD-3-Clause shirou/gopsutil/load BSD-3-Clause shirou/gopsutil/mem BSD-3-Clause shirou/gopsutil/net BSD-3-Clause shirou/gopsutil/process BSD-3-Clause stretchr/objx MIT stretchr/testify/assert MIT stretchr/testify/mock MIT stretchr/testify/require MIT golang.org/x/crypto/ssh/terminal BSD-3-Clause golang.org/x/net/context BSD-3-Clause golang.org/x/net/context/ctxhttp BSD-3-Clause golang.org/x/net/internal/socks BSD-3-Clause golang.org/x/net/proxy BSD-3-Clause golang.org/x/sys/unix BSD-3-Clause gopkg.in/check.v1 BSD-2-Clause gopkg.in/yaml.v2 Apache-2.0 garyburd/redigo/internal Apache-2.0 garyburd/redigo/redis Apache-2.0 go-sql-driver/mysql MPL-2.0 Shopify/sarama MIT eapache/go-resiliency/breaker MIT eapache/go-xerial-snappy MIT eapache/queue MIT golang/snappy BSD-3-Clause pierrec/lz4 BSD-3-Clause pierrec/lz4/internal/xxh32 BSD-3-Clause rcrowley/go-metrics BSD-2-Clause-FreeBSD samuel/go-zookeeper/zk BSD-3-Clause gopkg.in/goracle.v2 Apache-2.0 globalsign/mgo BSD-2-Clause globalsign/mgo/bson BSD-2-Clause globalsign/mgo/internal/json BSD-3-Clause globalsign/mgo/internal/scram BSD-2-Clause cloud.google.com/go/civil Apache-2.0 denisenkom/go-mssqldb BSD-3-Clause denisenkom/go-mssqldb/internal/cp BSD-3-Clause jmoiron/sqlx MIT jmoiron/sqlx/reflectx MIT golang.org/x/crypto/md4 BSD-3-Clause gopkg.in/DATA-DOG/go-sqlmock.v1 BSD-3-Clause blang/semver MIT lib/pq MIT lib/pq/oid MIT memcachier/mc MIT mitchellh/mapstructure MIT armon/go-metrics MIT hashicorp/consul/api MPL-2.0 hashicorp/go-cleanhttp MPL-2.0 hashicorp/go-immutable-radix MPL-2.0 hashicorp/go-rootcerts MPL-2.0 hashicorp/golang-lru/simplelru MPL-2.0 hashicorp/serf/coordinate MPL-2.0 mitchellh/go-homedir MIT cloud.google.com/go/compute/metadata Apache-2.0 davecgh/go-spew/spew ISC docker/spdystream Apache-2.0 docker/spdystream/spdy Apache-2.0 ghodss/yaml MIT gogo/protobuf/proto BSD-3-Clause gogo/protobuf/sortkeys BSD-3-Clause golang/glog Apache-2.0 golang/protobuf/proto BSD-3-Clause golang/protobuf/ptypes BSD-3-Clause golang/protobuf/ptypes/any BSD-3-Clause golang/protobuf/ptypes/duration BSD-3-Clause golang/protobuf/ptypes/timestamp BSD-3-Clause google/gofuzz Apache-2.0 googleapis/gnostic/OpenAPIv2 Apache-2.0 googleapis/gnostic/compiler Apache-2.0 googleapis/gnostic/extensions Apache-2.0 howeyc/gopass ISC imdario/mergo BSD-3-Clause json-iterator/go MIT matttproud/golang_protobuf_extensions/pbutil Apache-2.0 pkg/errors BSD-2-Clause pmezard/go-difflib/difflib BSD-3-Clause prometheus/client_model/go Apache-2.0 prometheus/common/expfmt Apache-2.0 prometheus/common/internal/bitbucket.org/ww/goautoneg Apache-2.0 prometheus/common/model Apache-2.0 sirupsen/logrus MIT spf13/pflag BSD-3-Clause stretchr/objx MIT stretchr/testify/assert MIT stretchr/testify/mock MIT xeipuuv/gojsonpointer Apache-2.0 xeipuuv/gojsonreference Apache-2.0 xeipuuv/gojsonschema Apache-2.0 golang.org/x/crypto/ssh/terminal BSD-3-Clause golang.org/x/net/context BSD-3-Clause golang.org/x/net/context/ctxhttp BSD-3-Clause golang.org/x/net/http2 BSD-3-Clause golang.org/x/net/http2/hpack BSD-3-Clause golang.org/x/net/idna BSD-3-Clause golang.org/x/net/lex/httplex BSD-3-Clause golang.org/x/oauth2 BSD-3-Clause golang.org/x/oauth2/google BSD-3-Clause golang.org/x/oauth2/internal BSD-3-Clause golang.org/x/oauth2/jws BSD-3-Clause golang.org/x/oauth2/jwt BSD-3-Clause golang.org/x/sys/unix BSD-3-Clause golang.org/x/text/secure/bidirule BSD-3-Clause golang.org/x/text/transform BSD-3-Clause golang.org/x/text/unicode/bidi BSD-3-Clause golang.org/x/text/unicode/norm BSD-3-Clause golang.org/x/time/rate BSD-3-Clause gopkg.in/inf.v0 BSD-3-Clause gopkg.in/yaml.v2 Apache-2.0 k8s.io/api/admissionregistration/v1alpha1 Apache-2.0 k8s.io/api/admissionregistration/v1beta1 Apache-2.0 k8s.io/api/apps/v1 Apache-2.0 k8s.io/api/apps/v1beta1 Apache-2.0 k8s.io/api/apps/v1beta2 Apache-2.0 k8s.io/api/authentication/v1 Apache-2.0 k8s.io/api/authentication/v1beta1 Apache-2.0 k8s.io/api/authorization/v1 Apache-2.0 k8s.io/api/authorization/v1beta1 Apache-2.0 k8s.io/api/autoscaling/v1 Apache-2.0 k8s.io/api/autoscaling/v2beta1 Apache-2.0 k8s.io/api/batch/v1 Apache-2.0 k8s.io/api/batch/v1beta1 Apache-2.0 k8s.io/api/batch/v2alpha1 Apache-2.0 k8s.io/api/certificates/v1beta1 Apache-2.0 k8s.io/api/core/v1 Apache-2.0 k8s.io/api/events/v1beta1 Apache-2.0 k8s.io/api/extensions/v1beta1 Apache-2.0 k8s.io/api/networking/v1 Apache-2.0 k8s.io/api/policy/v1beta1 Apache-2.0 k8s.io/api/rbac/v1 Apache-2.0 k8s.io/api/rbac/v1alpha1 Apache-2.0 k8s.io/api/rbac/v1beta1 Apache-2.0 k8s.io/api/scheduling/v1alpha1 Apache-2.0 k8s.io/api/settings/v1alpha1 Apache-2.0 k8s.io/api/storage/v1 Apache-2.0 k8s.io/api/storage/v1alpha1 Apache-2.0 k8s.io/api/storage/v1beta1 Apache-2.0 k8s.io/apimachinery/pkg/api/errors Apache-2.0 k8s.io/apimachinery/pkg/api/meta Apache-2.0 k8s.io/apimachinery/pkg/api/resource Apache-2.0 k8s.io/apimachinery/pkg/apis/meta/v1 Apache-2.0 k8s.io/apimachinery/pkg/apis/meta/v1/unstructured Apache-2.0 k8s.io/apimachinery/pkg/apis/meta/v1beta1 Apache-2.0 k8s.io/apimachinery/pkg/conversion Apache-2.0 k8s.io/apimachinery/pkg/conversion/queryparams Apache-2.0 k8s.io/apimachinery/pkg/fields Apache-2.0 k8s.io/apimachinery/pkg/labels Apache-2.0 k8s.io/apimachinery/pkg/runtime Apache-2.0 k8s.io/apimachinery/pkg/runtime/schema Apache-2.0 k8s.io/apimachinery/pkg/runtime/serializer Apache-2.0 k8s.io/apimachinery/pkg/runtime/serializer/json Apache-2.0 k8s.io/apimachinery/pkg/runtime/serializer/protobuf Apache-2.0 k8s.io/apimachinery/pkg/runtime/serializer/recognizer Apache-2.0 k8s.io/apimachinery/pkg/runtime/serializer/streaming Apache-2.0 k8s.io/apimachinery/pkg/runtime/serializer/versioning Apache-2.0 k8s.io/apimachinery/pkg/selection Apache-2.0 k8s.io/apimachinery/pkg/types Apache-2.0 k8s.io/apimachinery/pkg/util/clock Apache-2.0 k8s.io/apimachinery/pkg/util/errors Apache-2.0 k8s.io/apimachinery/pkg/util/framer Apache-2.0 k8s.io/apimachinery/pkg/util/httpstream Apache-2.0 k8s.io/apimachinery/pkg/util/httpstream/spdy Apache-2.0 k8s.io/apimachinery/pkg/util/intstr Apache-2.0 k8s.io/apimachinery/pkg/util/json Apache-2.0 k8s.io/apimachinery/pkg/util/net Apache-2.0 k8s.io/apimachinery/pkg/util/remotecommand Apache-2.0 k8s.io/apimachinery/pkg/util/runtime Apache-2.0 k8s.io/apimachinery/pkg/util/sets Apache-2.0 k8s.io/apimachinery/pkg/util/validation Apache-2.0 k8s.io/apimachinery/pkg/util/validation/field Apache-2.0 k8s.io/apimachinery/pkg/util/wait Apache-2.0 k8s.io/apimachinery/pkg/util/yaml Apache-2.0 k8s.io/apimachinery/pkg/version Apache-2.0 k8s.io/apimachinery/pkg/watch Apache-2.0 k8s.io/apimachinery/third_party/forked/golang/netutil Apache-2.0 k8s.io/apimachinery/third_party/forked/golang/reflect Apache-2.0 k8s.io/client-go/discovery Apache-2.0 k8s.io/client-go/kubernetes Apache-2.0 k8s.io/client-go/kubernetes/scheme Apache-2.0 k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1 Apache-2.0 k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1 Apache-2.0 k8s.io/client-go/kubernetes/typed/apps/v1 Apache-2.0 k8s.io/client-go/kubernetes/typed/apps/v1beta1 Apache-2.0 k8s.io/client-go/kubernetes/typed/apps/v1beta2 Apache-2.0 k8s.io/client-go/kubernetes/typed/authentication/v1 Apache-2.0 k8s.io/client-go/kubernetes/typed/authentication/v1beta1 Apache-2.0 k8s.io/client-go/kubernetes/typed/authorization/v1 Apache-2.0 k8s.io/client-go/kubernetes/typed/authorization/v1beta1 Apache-2.0 k8s.io/client-go/kubernetes/typed/autoscaling/v1 Apache-2.0 k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1 Apache-2.0 k8s.io/client-go/kubernetes/typed/batch/v1 Apache-2.0 k8s.io/client-go/kubernetes/typed/batch/v1beta1 Apache-2.0 k8s.io/client-go/kubernetes/typed/batch/v2alpha1 Apache-2.0 k8s.io/client-go/kubernetes/typed/certificates/v1beta1 Apache-2.0 k8s.io/client-go/kubernetes/typed/core/v1 Apache-2.0 k8s.io/client-go/kubernetes/typed/events/v1beta1 Apache-2.0 k8s.io/client-go/kubernetes/typed/extensions/v1beta1 Apache-2.0 k8s.io/client-go/kubernetes/typed/networking/v1 Apache-2.0 k8s.io/client-go/kubernetes/typed/policy/v1beta1 Apache-2.0 k8s.io/client-go/kubernetes/typed/rbac/v1 Apache-2.0 k8s.io/client-go/kubernetes/typed/rbac/v1alpha1 Apache-2.0 k8s.io/client-go/kubernetes/typed/rbac/v1beta1 Apache-2.0 k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1 Apache-2.0 k8s.io/client-go/kubernetes/typed/settings/v1alpha1 Apache-2.0 k8s.io/client-go/kubernetes/typed/storage/v1 Apache-2.0 k8s.io/client-go/kubernetes/typed/storage/v1alpha1 Apache-2.0 k8s.io/client-go/kubernetes/typed/storage/v1beta1 Apache-2.0 k8s.io/client-go/pkg/apis/clientauthentication Apache-2.0 k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1 Apache-2.0 k8s.io/client-go/pkg/version Apache-2.0 k8s.io/client-go/plugin/pkg/client/auth/exec Apache-2.0 k8s.io/client-go/rest Apache-2.0 k8s.io/client-go/rest/watch Apache-2.0 k8s.io/client-go/third_party/forked/golang/template Apache-2.0 k8s.io/client-go/tools/auth Apache-2.0 k8s.io/client-go/tools/clientcmd Apache-2.0 k8s.io/client-go/tools/clientcmd/api Apache-2.0 k8s.io/client-go/tools/clientcmd/api/latest Apache-2.0 k8s.io/client-go/tools/clientcmd/api/v1 Apache-2.0 k8s.io/client-go/tools/metrics Apache-2.0 k8s.io/client-go/tools/reference Apache-2.0 k8s.io/client-go/tools/remotecommand Apache-2.0 k8s.io/client-go/transport Apache-2.0 k8s.io/client-go/transport/spdy Apache-2.0 k8s.io/client-go/util/cert Apache-2.0 k8s.io/client-go/util/exec Apache-2.0 k8s.io/client-go/util/flowcontrol Apache-2.0 k8s.io/client-go/util/homedir Apache-2.0 k8s.io/client-go/util/integer Apache-2.0 k8s.io/client-go/util/jsonpath Apache-2.0 k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1 Apache-2.0", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 58.958164, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "body": "/mock MIT stretchr/testify/require MIT golang.org/x/crypto/ssh/terminal BSD-3-Clause golang.org/x/net/context BSD-3-Clause golang.org/x/net/context/ctxhttp BSD-3-Clause golang.org/x/net/internal/socks BSD-3-Clause golang.org/x/net/proxy BSD-3-Clause golang.org/x/sys/unix BSD-3-Clause gopkg.in" - }, - "id": "60452840196a679fc3960f7e" - } - ], - "/gin/d0dee9c1-b67b-427d-bb85-303cc5dab2a8": [ { "image": "https://docs.newrelic.com/static/6decabb9d8cd5dc3e18f2f647f7c7cdd/c1b63/arrow-step-diagram-trans.png", "url": "https://docs.newrelic.com/docs/distributed-tracing/concepts/quick-start/", @@ -50549,7 +50385,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 116.380035, + "_score": 110.14922, "_version": null, "_explanation": null, "sort": null, @@ -50558,6 +50394,79 @@ }, "id": "61d8b6a664441fbe9700cc16" }, + { + "sections": [ + "Install the Go agent in GAE flexible environment", + "Important", + "Build a custom runtime using Docker", + "1. Set up the GAE project and install dependencies", + "2. Configure your app.yaml", + "3. Configure a Dockerfile", + "4. Build a Docker image", + "5. Deploy Docker image to initialized GAE flexible environment", + "Recommendation: Disable health checks", + "Get agent troubleshooting logs from GAE" + ], + "title": "Install the Go agent in GAE flexible environment", + "type": "docs", + "tags": [ + "Installation", + "Go agent", + "Agents" + ], + "external_id": "45635e5ca8f209b31de810abc207ae9926c03274", + "image": "", + "url": "https://docs.newrelic.com/docs/apm/agents/go-agent/installation/install-go-agent-gae-flexible-environment/", + "published_at": "2022-02-15T17:32:59Z", + "updated_at": "2021-10-23T20:23:54Z", + "document_type": "page", + "popularity": 1, + "body": "With our Go agent, you can monitor applications that reside in the Google App Engine (GAE) flexible environment. Adding New Relic to your GAE flex app gives you insight into the health and performance of your app and extends GAE with metrics you can view in APM and browser monitoring. Here we explain how to add New Relic to your GAE flex app by configuring a custom runtime, and give an example of deploying a Go app with Docker. Important The Go agent can run in a GAE flexible environment using a custom runtime. Due to limitations of other environments, do not use the GAE standard environment or Google App Engine's \"native mode\" installation. Build a custom runtime using Docker See Google's documentation for building custom runtimes. This example describes how to add New Relic to your GAE flex app by installing the Go agent, building a custom Go runtime for Docker, and deploying a golang application. For more information about deploying and configuring your Go app in the GAE flexible environment, see: Google App Engine's documentation for Go Google App Engine's tutorials to deploy a Go app 1. Set up the GAE project and install dependencies Follow standard procedures to install the Go agent for your specific app server, including your license key. Follow Google App Engine procedures for Go to create a new Cloud Platform project, create an App Engine application, download and install git, and complete other prerequisites for the Google Cloud SDK. The Google Cloud SDK provides the gcloud command line tool to manage and deploy GAE apps. 2. Configure your app.yaml The app.yaml configuration file is required for a GAE flexible environment app with a custom runtime. At a minimum, make sure it contains: runtime: custom env: flex Copy 3. Configure a Dockerfile The Dockerfile defines the Docker image to be built and is required for a GAE flexible environment app. The following Dockerfile example code defines the golang version used. FROM golang:1.8-onbuild CMD go run main.go Copy 4. Build a Docker image To build the Docker image, run the following command. Be sure to include the period at the end of the code, to indicate the current directory contains the build files. docker build --rm -t Docker-image-name . Copy 5. Deploy Docker image to initialized GAE flexible environment To deploy your Docker image to your initialized GAE flexible environment, run the following command: gcloud --project go-app-name app deploy Copy Wait until the deployment completes. To view your GAE flex app data in New Relic, go to the APM Summary page. Recommendation: Disable health checks Google App Engine sends periodic health check requests to confirm that an instance has been successfully deployed, and to check that a running instance maintains a healthy status. A health check is an HTTP request to the URL /_ah/health. If you create a custom runtime, your app must be able to handle a large number of health check requests. Otherwise, your app data may not display correctly in APM. Recommendation: Configure your app.yaml to disable health checks by adding: health_check: enable_health_check: False Copy Get agent troubleshooting logs from GAE Use these resources to troubleshoot your GAE flex environment app: To connect to the GAE instance and start a shell in the Docker container running your code, see GAE's documentation for debugging an instance. To redirect New Relic Go agent logs to Stackdriver in the Cloud Platform Console, change the newrelic.yml file to: log_file_name: STDOUT Copy", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 65.31719, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "body": "With our Go agent, you can monitor applications that reside in the Google App Engine (GAE) flexible environment. Adding New Relic to your GAE flex app gives you insight into the health and performance of your app and extends GAE with metrics you can view in APM and browser monitoring. Here we" + }, + "id": "617e6e9ee7b9d27090c06297" + }, + { + "sections": [ + "New Relic Infrastructure licenses" + ], + "title": "New Relic Infrastructure licenses", + "type": "docs", + "tags": [ + "New Relic Infrastructure", + "Product or service licenses", + "Licenses" + ], + "external_id": "a3dabb2eb2d680f1261751bba22a49549635db9f", + "image": "", + "url": "https://docs.newrelic.com/docs/licenses/product-or-service-licenses/new-relic-infrastructure/new-relic-infrastructure-licenses/", + "published_at": "2022-02-14T18:53:51Z", + "updated_at": "2021-03-13T03:34:37Z", + "document_type": "page", + "popularity": 1, + "body": "We love open-source software, and use the following in New Relic Infrastructure. Thank you, open-source community, for making these fine tools! Some of these are listed under multiple software licenses, and in that case we have listed the license we've chosen to use. Product Licenses Sirupsen/logrus MIT davecgh/go-spew/spew ISC docker/distribution/digestset Apache-2.0 docker/distribution/reference Apache-2.0 fluent/fluent-bit Apache-2.0 moby/moby/api Apache-2.0 moby/moby/api/types Apache-2.0 moby/moby/api/types/blkiodev Apache-2.0 moby/moby/api/types/container Apache-2.0 moby/moby/api/types/events Apache-2.0 moby/moby/api/types/filters Apache-2.0 moby/moby/api/types/image Apache-2.0 moby/moby/api/types/mount Apache-2.0 moby/moby/api/types/network Apache-2.0 moby/moby/api/types/registry Apache-2.0 moby/moby/api/types/strslice Apache-2.0 moby/moby/api/types/swarm Apache-2.0 moby/moby/api/types/time Apache-2.0 moby/moby/api/types/versions Apache-2.0 moby/moby/api/types/volume Apache-2.0 moby/moby/client Apache-2.0 moby/moby/pkg/tlsconfig Apache-2.0 docker/go-connections/nat Apache-2.0 docker/go-connections/sockets Apache-2.0 docker/go-connections/tlsconfig Apache-2.0 docker/go-units Apache-2.0 evanphx/json-patch BSD-3-Clause ghodss/yaml MIT julienschmidt/httprouter BSD-3-Clause kardianos/service Zlib kolo/xmlrpc MIT opencontainers/go-digest Apache-2.0 opsmatic/canstop MIT opsmatic/fsnotify BSD-3-Clause pkg/errors BSD-2-Clause pmezard/go-difflib/difflib BSD-3-Clause shirou/gopsutil/cpu BSD-3-Clause shirou/gopsutil/disk BSD-3-Clause shirou/gopsutil/internal/common BSD-3-Clause shirou/gopsutil/load BSD-3-Clause shirou/gopsutil/mem BSD-3-Clause shirou/gopsutil/net BSD-3-Clause shirou/gopsutil/process BSD-3-Clause stretchr/objx MIT stretchr/testify/assert MIT stretchr/testify/mock MIT stretchr/testify/require MIT golang.org/x/crypto/ssh/terminal BSD-3-Clause golang.org/x/net/context BSD-3-Clause golang.org/x/net/context/ctxhttp BSD-3-Clause golang.org/x/net/internal/socks BSD-3-Clause golang.org/x/net/proxy BSD-3-Clause golang.org/x/sys/unix BSD-3-Clause gopkg.in/check.v1 BSD-2-Clause gopkg.in/yaml.v2 Apache-2.0 garyburd/redigo/internal Apache-2.0 garyburd/redigo/redis Apache-2.0 go-sql-driver/mysql MPL-2.0 Shopify/sarama MIT eapache/go-resiliency/breaker MIT eapache/go-xerial-snappy MIT eapache/queue MIT golang/snappy BSD-3-Clause pierrec/lz4 BSD-3-Clause pierrec/lz4/internal/xxh32 BSD-3-Clause rcrowley/go-metrics BSD-2-Clause-FreeBSD samuel/go-zookeeper/zk BSD-3-Clause gopkg.in/goracle.v2 Apache-2.0 globalsign/mgo BSD-2-Clause globalsign/mgo/bson BSD-2-Clause globalsign/mgo/internal/json BSD-3-Clause globalsign/mgo/internal/scram BSD-2-Clause cloud.google.com/go/civil Apache-2.0 denisenkom/go-mssqldb BSD-3-Clause denisenkom/go-mssqldb/internal/cp BSD-3-Clause jmoiron/sqlx MIT jmoiron/sqlx/reflectx MIT golang.org/x/crypto/md4 BSD-3-Clause gopkg.in/DATA-DOG/go-sqlmock.v1 BSD-3-Clause blang/semver MIT lib/pq MIT lib/pq/oid MIT memcachier/mc MIT mitchellh/mapstructure MIT armon/go-metrics MIT hashicorp/consul/api MPL-2.0 hashicorp/go-cleanhttp MPL-2.0 hashicorp/go-immutable-radix MPL-2.0 hashicorp/go-rootcerts MPL-2.0 hashicorp/golang-lru/simplelru MPL-2.0 hashicorp/serf/coordinate MPL-2.0 mitchellh/go-homedir MIT cloud.google.com/go/compute/metadata Apache-2.0 davecgh/go-spew/spew ISC docker/spdystream Apache-2.0 docker/spdystream/spdy Apache-2.0 ghodss/yaml MIT gogo/protobuf/proto BSD-3-Clause gogo/protobuf/sortkeys BSD-3-Clause golang/glog Apache-2.0 golang/protobuf/proto BSD-3-Clause golang/protobuf/ptypes BSD-3-Clause golang/protobuf/ptypes/any BSD-3-Clause golang/protobuf/ptypes/duration BSD-3-Clause golang/protobuf/ptypes/timestamp BSD-3-Clause google/gofuzz Apache-2.0 googleapis/gnostic/OpenAPIv2 Apache-2.0 googleapis/gnostic/compiler Apache-2.0 googleapis/gnostic/extensions Apache-2.0 howeyc/gopass ISC imdario/mergo BSD-3-Clause json-iterator/go MIT matttproud/golang_protobuf_extensions/pbutil Apache-2.0 pkg/errors BSD-2-Clause pmezard/go-difflib/difflib BSD-3-Clause prometheus/client_model/go Apache-2.0 prometheus/common/expfmt Apache-2.0 prometheus/common/internal/bitbucket.org/ww/goautoneg Apache-2.0 prometheus/common/model Apache-2.0 sirupsen/logrus MIT spf13/pflag BSD-3-Clause stretchr/objx MIT stretchr/testify/assert MIT stretchr/testify/mock MIT xeipuuv/gojsonpointer Apache-2.0 xeipuuv/gojsonreference Apache-2.0 xeipuuv/gojsonschema Apache-2.0 golang.org/x/crypto/ssh/terminal BSD-3-Clause golang.org/x/net/context BSD-3-Clause golang.org/x/net/context/ctxhttp BSD-3-Clause golang.org/x/net/http2 BSD-3-Clause golang.org/x/net/http2/hpack BSD-3-Clause golang.org/x/net/idna BSD-3-Clause golang.org/x/net/lex/httplex BSD-3-Clause golang.org/x/oauth2 BSD-3-Clause golang.org/x/oauth2/google BSD-3-Clause golang.org/x/oauth2/internal BSD-3-Clause golang.org/x/oauth2/jws BSD-3-Clause golang.org/x/oauth2/jwt BSD-3-Clause golang.org/x/sys/unix BSD-3-Clause golang.org/x/text/secure/bidirule BSD-3-Clause golang.org/x/text/transform BSD-3-Clause golang.org/x/text/unicode/bidi BSD-3-Clause golang.org/x/text/unicode/norm BSD-3-Clause golang.org/x/time/rate BSD-3-Clause gopkg.in/inf.v0 BSD-3-Clause gopkg.in/yaml.v2 Apache-2.0 k8s.io/api/admissionregistration/v1alpha1 Apache-2.0 k8s.io/api/admissionregistration/v1beta1 Apache-2.0 k8s.io/api/apps/v1 Apache-2.0 k8s.io/api/apps/v1beta1 Apache-2.0 k8s.io/api/apps/v1beta2 Apache-2.0 k8s.io/api/authentication/v1 Apache-2.0 k8s.io/api/authentication/v1beta1 Apache-2.0 k8s.io/api/authorization/v1 Apache-2.0 k8s.io/api/authorization/v1beta1 Apache-2.0 k8s.io/api/autoscaling/v1 Apache-2.0 k8s.io/api/autoscaling/v2beta1 Apache-2.0 k8s.io/api/batch/v1 Apache-2.0 k8s.io/api/batch/v1beta1 Apache-2.0 k8s.io/api/batch/v2alpha1 Apache-2.0 k8s.io/api/certificates/v1beta1 Apache-2.0 k8s.io/api/core/v1 Apache-2.0 k8s.io/api/events/v1beta1 Apache-2.0 k8s.io/api/extensions/v1beta1 Apache-2.0 k8s.io/api/networking/v1 Apache-2.0 k8s.io/api/policy/v1beta1 Apache-2.0 k8s.io/api/rbac/v1 Apache-2.0 k8s.io/api/rbac/v1alpha1 Apache-2.0 k8s.io/api/rbac/v1beta1 Apache-2.0 k8s.io/api/scheduling/v1alpha1 Apache-2.0 k8s.io/api/settings/v1alpha1 Apache-2.0 k8s.io/api/storage/v1 Apache-2.0 k8s.io/api/storage/v1alpha1 Apache-2.0 k8s.io/api/storage/v1beta1 Apache-2.0 k8s.io/apimachinery/pkg/api/errors Apache-2.0 k8s.io/apimachinery/pkg/api/meta Apache-2.0 k8s.io/apimachinery/pkg/api/resource Apache-2.0 k8s.io/apimachinery/pkg/apis/meta/v1 Apache-2.0 k8s.io/apimachinery/pkg/apis/meta/v1/unstructured Apache-2.0 k8s.io/apimachinery/pkg/apis/meta/v1beta1 Apache-2.0 k8s.io/apimachinery/pkg/conversion Apache-2.0 k8s.io/apimachinery/pkg/conversion/queryparams Apache-2.0 k8s.io/apimachinery/pkg/fields Apache-2.0 k8s.io/apimachinery/pkg/labels Apache-2.0 k8s.io/apimachinery/pkg/runtime Apache-2.0 k8s.io/apimachinery/pkg/runtime/schema Apache-2.0 k8s.io/apimachinery/pkg/runtime/serializer Apache-2.0 k8s.io/apimachinery/pkg/runtime/serializer/json Apache-2.0 k8s.io/apimachinery/pkg/runtime/serializer/protobuf Apache-2.0 k8s.io/apimachinery/pkg/runtime/serializer/recognizer Apache-2.0 k8s.io/apimachinery/pkg/runtime/serializer/streaming Apache-2.0 k8s.io/apimachinery/pkg/runtime/serializer/versioning Apache-2.0 k8s.io/apimachinery/pkg/selection Apache-2.0 k8s.io/apimachinery/pkg/types Apache-2.0 k8s.io/apimachinery/pkg/util/clock Apache-2.0 k8s.io/apimachinery/pkg/util/errors Apache-2.0 k8s.io/apimachinery/pkg/util/framer Apache-2.0 k8s.io/apimachinery/pkg/util/httpstream Apache-2.0 k8s.io/apimachinery/pkg/util/httpstream/spdy Apache-2.0 k8s.io/apimachinery/pkg/util/intstr Apache-2.0 k8s.io/apimachinery/pkg/util/json Apache-2.0 k8s.io/apimachinery/pkg/util/net Apache-2.0 k8s.io/apimachinery/pkg/util/remotecommand Apache-2.0 k8s.io/apimachinery/pkg/util/runtime Apache-2.0 k8s.io/apimachinery/pkg/util/sets Apache-2.0 k8s.io/apimachinery/pkg/util/validation Apache-2.0 k8s.io/apimachinery/pkg/util/validation/field Apache-2.0 k8s.io/apimachinery/pkg/util/wait Apache-2.0 k8s.io/apimachinery/pkg/util/yaml Apache-2.0 k8s.io/apimachinery/pkg/version Apache-2.0 k8s.io/apimachinery/pkg/watch Apache-2.0 k8s.io/apimachinery/third_party/forked/golang/netutil Apache-2.0 k8s.io/apimachinery/third_party/forked/golang/reflect Apache-2.0 k8s.io/client-go/discovery Apache-2.0 k8s.io/client-go/kubernetes Apache-2.0 k8s.io/client-go/kubernetes/scheme Apache-2.0 k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1 Apache-2.0 k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1 Apache-2.0 k8s.io/client-go/kubernetes/typed/apps/v1 Apache-2.0 k8s.io/client-go/kubernetes/typed/apps/v1beta1 Apache-2.0 k8s.io/client-go/kubernetes/typed/apps/v1beta2 Apache-2.0 k8s.io/client-go/kubernetes/typed/authentication/v1 Apache-2.0 k8s.io/client-go/kubernetes/typed/authentication/v1beta1 Apache-2.0 k8s.io/client-go/kubernetes/typed/authorization/v1 Apache-2.0 k8s.io/client-go/kubernetes/typed/authorization/v1beta1 Apache-2.0 k8s.io/client-go/kubernetes/typed/autoscaling/v1 Apache-2.0 k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1 Apache-2.0 k8s.io/client-go/kubernetes/typed/batch/v1 Apache-2.0 k8s.io/client-go/kubernetes/typed/batch/v1beta1 Apache-2.0 k8s.io/client-go/kubernetes/typed/batch/v2alpha1 Apache-2.0 k8s.io/client-go/kubernetes/typed/certificates/v1beta1 Apache-2.0 k8s.io/client-go/kubernetes/typed/core/v1 Apache-2.0 k8s.io/client-go/kubernetes/typed/events/v1beta1 Apache-2.0 k8s.io/client-go/kubernetes/typed/extensions/v1beta1 Apache-2.0 k8s.io/client-go/kubernetes/typed/networking/v1 Apache-2.0 k8s.io/client-go/kubernetes/typed/policy/v1beta1 Apache-2.0 k8s.io/client-go/kubernetes/typed/rbac/v1 Apache-2.0 k8s.io/client-go/kubernetes/typed/rbac/v1alpha1 Apache-2.0 k8s.io/client-go/kubernetes/typed/rbac/v1beta1 Apache-2.0 k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1 Apache-2.0 k8s.io/client-go/kubernetes/typed/settings/v1alpha1 Apache-2.0 k8s.io/client-go/kubernetes/typed/storage/v1 Apache-2.0 k8s.io/client-go/kubernetes/typed/storage/v1alpha1 Apache-2.0 k8s.io/client-go/kubernetes/typed/storage/v1beta1 Apache-2.0 k8s.io/client-go/pkg/apis/clientauthentication Apache-2.0 k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1 Apache-2.0 k8s.io/client-go/pkg/version Apache-2.0 k8s.io/client-go/plugin/pkg/client/auth/exec Apache-2.0 k8s.io/client-go/rest Apache-2.0 k8s.io/client-go/rest/watch Apache-2.0 k8s.io/client-go/third_party/forked/golang/template Apache-2.0 k8s.io/client-go/tools/auth Apache-2.0 k8s.io/client-go/tools/clientcmd Apache-2.0 k8s.io/client-go/tools/clientcmd/api Apache-2.0 k8s.io/client-go/tools/clientcmd/api/latest Apache-2.0 k8s.io/client-go/tools/clientcmd/api/v1 Apache-2.0 k8s.io/client-go/tools/metrics Apache-2.0 k8s.io/client-go/tools/reference Apache-2.0 k8s.io/client-go/tools/remotecommand Apache-2.0 k8s.io/client-go/transport Apache-2.0 k8s.io/client-go/transport/spdy Apache-2.0 k8s.io/client-go/util/cert Apache-2.0 k8s.io/client-go/util/exec Apache-2.0 k8s.io/client-go/util/flowcontrol Apache-2.0 k8s.io/client-go/util/homedir Apache-2.0 k8s.io/client-go/util/integer Apache-2.0 k8s.io/client-go/util/jsonpath Apache-2.0 k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1 Apache-2.0", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 58.9981, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "body": "/mock MIT stretchr/testify/require MIT golang.org/x/crypto/ssh/terminal BSD-3-Clause golang.org/x/net/context BSD-3-Clause golang.org/x/net/context/ctxhttp BSD-3-Clause golang.org/x/net/internal/socks BSD-3-Clause golang.org/x/net/proxy BSD-3-Clause golang.org/x/sys/unix BSD-3-Clause gopkg.in" + }, + "id": "60452840196a679fc3960f7e" + } + ], + "/gin/d0dee9c1-b67b-427d-bb85-303cc5dab2a8": [ { "sections": [ "Mux", @@ -50589,7 +50498,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 112.69726, + "_score": 112.87141, "_version": null, "_explanation": null, "sort": null, @@ -50630,7 +50539,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 112.6965, + "_score": 112.87066, "_version": null, "_explanation": null, "sort": null, @@ -50640,6 +50549,32 @@ }, "id": "61566dd8e7b9d279cf8de386" }, + { + "image": "https://docs.newrelic.com/static/6decabb9d8cd5dc3e18f2f647f7c7cdd/c1b63/arrow-step-diagram-trans.png", + "url": "https://docs.newrelic.com/docs/distributed-tracing/concepts/quick-start/", + "sections": [ + "Distributed tracing setup options" + ], + "published_at": "2022-02-14T03:23:44Z", + "title": "Distributed tracing setup options", + "updated_at": "2022-02-14T03:23:44Z", + "type": "docs", + "external_id": "44df1a2d07693a41fa23c9bba9473ce8ebabe47e", + "document_type": "page", + "popularity": 1, + "body": "We recommend you do an initial setup of distributed tracing and consider the advanced Infinite Tracing feature if you are not getting the data you need. Also, if you are currently using New Relic APM agents and would like to enable distributed tracing, see our planning guide. Ready to get started? If you don't already have one, sign up for a New Relic account. It's free, forever! To set up distributed tracing, you'll complete three general steps: Identify services: Identify and write down the endpoints, services, languages, and systems that are used to complete this request (you'll need this information in the next step). If you have an environment diagram like the following, you could use it to create a list of services handling requests: Instrument services: Instrument each service you identify so it can send your trace data. Some tools, such as APM agents, instrument services automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile monitoring Kamon OpenTelemetry Trace API: generic format Trace API: Zipkin format View traces: After you instrument the services, generate some traffic in your application, and then go to the New Relic UI to see your trace data.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 110.14922, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "body": " automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile" + }, + "id": "61d8b6a664441fbe9700cc16" + }, { "sections": [ "Install the Go agent in GAE flexible environment", @@ -50671,7 +50606,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 65.33989, + "_score": 65.31719, "_version": null, "_explanation": null, "sort": null, @@ -50702,7 +50637,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 58.958164, + "_score": 58.9981, "_version": null, "_explanation": null, "sort": null, @@ -50713,32 +50648,6 @@ } ], "/mux/dc906e6a-ff9c-4fe5-935f-e8a7673c14bf": [ - { - "image": "https://docs.newrelic.com/static/6decabb9d8cd5dc3e18f2f647f7c7cdd/c1b63/arrow-step-diagram-trans.png", - "url": "https://docs.newrelic.com/docs/distributed-tracing/concepts/quick-start/", - "sections": [ - "Distributed tracing setup options" - ], - "published_at": "2022-02-14T03:23:44Z", - "title": "Distributed tracing setup options", - "updated_at": "2022-02-14T03:23:44Z", - "type": "docs", - "external_id": "44df1a2d07693a41fa23c9bba9473ce8ebabe47e", - "document_type": "page", - "popularity": 1, - "body": "We recommend you do an initial setup of distributed tracing and consider the advanced Infinite Tracing feature if you are not getting the data you need. Also, if you are currently using New Relic APM agents and would like to enable distributed tracing, see our planning guide. Ready to get started? If you don't already have one, sign up for a New Relic account. It's free, forever! To set up distributed tracing, you'll complete three general steps: Identify services: Identify and write down the endpoints, services, languages, and systems that are used to complete this request (you'll need this information in the next step). If you have an environment diagram like the following, you could use it to create a list of services handling requests: Instrument services: Instrument each service you identify so it can send your trace data. Some tools, such as APM agents, instrument services automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile monitoring Kamon OpenTelemetry Trace API: generic format Trace API: Zipkin format View traces: After you instrument the services, generate some traffic in your application, and then go to the New Relic UI to see your trace data.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 116.38019, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "body": " automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile" - }, - "id": "61d8b6a664441fbe9700cc16" - }, { "sections": [ "Mux", @@ -50770,7 +50679,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 112.697266, + "_score": 112.87141, "_version": null, "_explanation": null, "sort": null, @@ -50811,7 +50720,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 112.6965, + "_score": 112.87066, "_version": null, "_explanation": null, "sort": null, @@ -50821,6 +50730,32 @@ }, "id": "61566dd8e7b9d279cf8de386" }, + { + "image": "https://docs.newrelic.com/static/6decabb9d8cd5dc3e18f2f647f7c7cdd/c1b63/arrow-step-diagram-trans.png", + "url": "https://docs.newrelic.com/docs/distributed-tracing/concepts/quick-start/", + "sections": [ + "Distributed tracing setup options" + ], + "published_at": "2022-02-14T03:23:44Z", + "title": "Distributed tracing setup options", + "updated_at": "2022-02-14T03:23:44Z", + "type": "docs", + "external_id": "44df1a2d07693a41fa23c9bba9473ce8ebabe47e", + "document_type": "page", + "popularity": 1, + "body": "We recommend you do an initial setup of distributed tracing and consider the advanced Infinite Tracing feature if you are not getting the data you need. Also, if you are currently using New Relic APM agents and would like to enable distributed tracing, see our planning guide. Ready to get started? If you don't already have one, sign up for a New Relic account. It's free, forever! To set up distributed tracing, you'll complete three general steps: Identify services: Identify and write down the endpoints, services, languages, and systems that are used to complete this request (you'll need this information in the next step). If you have an environment diagram like the following, you could use it to create a list of services handling requests: Instrument services: Instrument each service you identify so it can send your trace data. Some tools, such as APM agents, instrument services automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile monitoring Kamon OpenTelemetry Trace API: generic format Trace API: Zipkin format View traces: After you instrument the services, generate some traffic in your application, and then go to the New Relic UI to see your trace data.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 110.14922, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "body": " automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile" + }, + "id": "61d8b6a664441fbe9700cc16" + }, { "sections": [ "Install the Go agent in GAE flexible environment", @@ -50852,7 +50787,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 65.33989, + "_score": 65.31719, "_version": null, "_explanation": null, "sort": null, @@ -50883,7 +50818,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 58.958164, + "_score": 58.9981, "_version": null, "_explanation": null, "sort": null, @@ -50894,32 +50829,6 @@ } ], "/nats/ac4355fd-31bc-4753-a67f-7e24feb9cb55": [ - { - "image": "https://docs.newrelic.com/static/6decabb9d8cd5dc3e18f2f647f7c7cdd/c1b63/arrow-step-diagram-trans.png", - "url": "https://docs.newrelic.com/docs/distributed-tracing/concepts/quick-start/", - "sections": [ - "Distributed tracing setup options" - ], - "published_at": "2022-02-14T03:23:44Z", - "title": "Distributed tracing setup options", - "updated_at": "2022-02-14T03:23:44Z", - "type": "docs", - "external_id": "44df1a2d07693a41fa23c9bba9473ce8ebabe47e", - "document_type": "page", - "popularity": 1, - "body": "We recommend you do an initial setup of distributed tracing and consider the advanced Infinite Tracing feature if you are not getting the data you need. Also, if you are currently using New Relic APM agents and would like to enable distributed tracing, see our planning guide. Ready to get started? If you don't already have one, sign up for a New Relic account. It's free, forever! To set up distributed tracing, you'll complete three general steps: Identify services: Identify and write down the endpoints, services, languages, and systems that are used to complete this request (you'll need this information in the next step). If you have an environment diagram like the following, you could use it to create a list of services handling requests: Instrument services: Instrument each service you identify so it can send your trace data. Some tools, such as APM agents, instrument services automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile monitoring Kamon OpenTelemetry Trace API: generic format Trace API: Zipkin format View traces: After you instrument the services, generate some traffic in your application, and then go to the New Relic UI to see your trace data.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 116.38011, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "body": " automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile" - }, - "id": "61d8b6a664441fbe9700cc16" - }, { "sections": [ "Mux", @@ -50951,7 +50860,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 112.697266, + "_score": 112.87141, "_version": null, "_explanation": null, "sort": null, @@ -50992,7 +50901,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 112.6965, + "_score": 112.87066, "_version": null, "_explanation": null, "sort": null, @@ -51002,6 +50911,32 @@ }, "id": "61566dd8e7b9d279cf8de386" }, + { + "image": "https://docs.newrelic.com/static/6decabb9d8cd5dc3e18f2f647f7c7cdd/c1b63/arrow-step-diagram-trans.png", + "url": "https://docs.newrelic.com/docs/distributed-tracing/concepts/quick-start/", + "sections": [ + "Distributed tracing setup options" + ], + "published_at": "2022-02-14T03:23:44Z", + "title": "Distributed tracing setup options", + "updated_at": "2022-02-14T03:23:44Z", + "type": "docs", + "external_id": "44df1a2d07693a41fa23c9bba9473ce8ebabe47e", + "document_type": "page", + "popularity": 1, + "body": "We recommend you do an initial setup of distributed tracing and consider the advanced Infinite Tracing feature if you are not getting the data you need. Also, if you are currently using New Relic APM agents and would like to enable distributed tracing, see our planning guide. Ready to get started? If you don't already have one, sign up for a New Relic account. It's free, forever! To set up distributed tracing, you'll complete three general steps: Identify services: Identify and write down the endpoints, services, languages, and systems that are used to complete this request (you'll need this information in the next step). If you have an environment diagram like the following, you could use it to create a list of services handling requests: Instrument services: Instrument each service you identify so it can send your trace data. Some tools, such as APM agents, instrument services automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile monitoring Kamon OpenTelemetry Trace API: generic format Trace API: Zipkin format View traces: After you instrument the services, generate some traffic in your application, and then go to the New Relic UI to see your trace data.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 110.14922, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "body": " automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile" + }, + "id": "61d8b6a664441fbe9700cc16" + }, { "sections": [ "Install the Go agent in GAE flexible environment", @@ -51033,7 +50968,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 65.33989, + "_score": 65.31719, "_version": null, "_explanation": null, "sort": null, @@ -51064,7 +50999,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 58.958164, + "_score": 58.9981, "_version": null, "_explanation": null, "sort": null, @@ -51075,32 +51010,6 @@ } ], "/micro/1b217f59-7152-40f9-b63b-01b5952ff45a": [ - { - "image": "https://docs.newrelic.com/static/6decabb9d8cd5dc3e18f2f647f7c7cdd/c1b63/arrow-step-diagram-trans.png", - "url": "https://docs.newrelic.com/docs/distributed-tracing/concepts/quick-start/", - "sections": [ - "Distributed tracing setup options" - ], - "published_at": "2022-02-14T03:23:44Z", - "title": "Distributed tracing setup options", - "updated_at": "2022-02-14T03:23:44Z", - "type": "docs", - "external_id": "44df1a2d07693a41fa23c9bba9473ce8ebabe47e", - "document_type": "page", - "popularity": 1, - "body": "We recommend you do an initial setup of distributed tracing and consider the advanced Infinite Tracing feature if you are not getting the data you need. Also, if you are currently using New Relic APM agents and would like to enable distributed tracing, see our planning guide. Ready to get started? If you don't already have one, sign up for a New Relic account. It's free, forever! To set up distributed tracing, you'll complete three general steps: Identify services: Identify and write down the endpoints, services, languages, and systems that are used to complete this request (you'll need this information in the next step). If you have an environment diagram like the following, you could use it to create a list of services handling requests: Instrument services: Instrument each service you identify so it can send your trace data. Some tools, such as APM agents, instrument services automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile monitoring Kamon OpenTelemetry Trace API: generic format Trace API: Zipkin format View traces: After you instrument the services, generate some traffic in your application, and then go to the New Relic UI to see your trace data.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 116.38019, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "body": " automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile" - }, - "id": "61d8b6a664441fbe9700cc16" - }, { "sections": [ "Mux", @@ -51132,7 +51041,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 112.697266, + "_score": 112.87141, "_version": null, "_explanation": null, "sort": null, @@ -51173,7 +51082,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 112.6965, + "_score": 112.87066, "_version": null, "_explanation": null, "sort": null, @@ -51183,6 +51092,32 @@ }, "id": "61566dd8e7b9d279cf8de386" }, + { + "image": "https://docs.newrelic.com/static/6decabb9d8cd5dc3e18f2f647f7c7cdd/c1b63/arrow-step-diagram-trans.png", + "url": "https://docs.newrelic.com/docs/distributed-tracing/concepts/quick-start/", + "sections": [ + "Distributed tracing setup options" + ], + "published_at": "2022-02-14T03:23:44Z", + "title": "Distributed tracing setup options", + "updated_at": "2022-02-14T03:23:44Z", + "type": "docs", + "external_id": "44df1a2d07693a41fa23c9bba9473ce8ebabe47e", + "document_type": "page", + "popularity": 1, + "body": "We recommend you do an initial setup of distributed tracing and consider the advanced Infinite Tracing feature if you are not getting the data you need. Also, if you are currently using New Relic APM agents and would like to enable distributed tracing, see our planning guide. Ready to get started? If you don't already have one, sign up for a New Relic account. It's free, forever! To set up distributed tracing, you'll complete three general steps: Identify services: Identify and write down the endpoints, services, languages, and systems that are used to complete this request (you'll need this information in the next step). If you have an environment diagram like the following, you could use it to create a list of services handling requests: Instrument services: Instrument each service you identify so it can send your trace data. Some tools, such as APM agents, instrument services automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile monitoring Kamon OpenTelemetry Trace API: generic format Trace API: Zipkin format View traces: After you instrument the services, generate some traffic in your application, and then go to the New Relic UI to see your trace data.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 110.14922, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "body": " automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile" + }, + "id": "61d8b6a664441fbe9700cc16" + }, { "sections": [ "Install the Go agent in GAE flexible environment", @@ -51214,7 +51149,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 65.33989, + "_score": 65.31719, "_version": null, "_explanation": null, "sort": null, @@ -51245,7 +51180,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 58.958164, + "_score": 58.9981, "_version": null, "_explanation": null, "sort": null, @@ -51295,7 +51230,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 69.895935, + "_score": 65.391045, "_version": null, "_explanation": null, "sort": null, @@ -51375,7 +51310,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 51.225166, + "_score": 50.366966, "_version": null, "_explanation": null, "sort": null, @@ -51410,7 +51345,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 38.65253, + "_score": 38.507614, "_version": null, "_explanation": null, "sort": null, @@ -51450,7 +51385,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 35.34933, + "_score": 35.45433, "_version": null, "_explanation": null, "sort": null, @@ -51497,7 +51432,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 34.9342, + "_score": 35.04619, "_version": null, "_explanation": null, "sort": null, @@ -51508,32 +51443,6 @@ } ], "/zap/f49bc8d7-8dee-4096-974f-109eb6cbb058": [ - { - "image": "https://docs.newrelic.com/static/6decabb9d8cd5dc3e18f2f647f7c7cdd/c1b63/arrow-step-diagram-trans.png", - "url": "https://docs.newrelic.com/docs/distributed-tracing/concepts/quick-start/", - "sections": [ - "Distributed tracing setup options" - ], - "published_at": "2022-02-14T03:23:44Z", - "title": "Distributed tracing setup options", - "updated_at": "2022-02-14T03:23:44Z", - "type": "docs", - "external_id": "44df1a2d07693a41fa23c9bba9473ce8ebabe47e", - "document_type": "page", - "popularity": 1, - "body": "We recommend you do an initial setup of distributed tracing and consider the advanced Infinite Tracing feature if you are not getting the data you need. Also, if you are currently using New Relic APM agents and would like to enable distributed tracing, see our planning guide. Ready to get started? If you don't already have one, sign up for a New Relic account. It's free, forever! To set up distributed tracing, you'll complete three general steps: Identify services: Identify and write down the endpoints, services, languages, and systems that are used to complete this request (you'll need this information in the next step). If you have an environment diagram like the following, you could use it to create a list of services handling requests: Instrument services: Instrument each service you identify so it can send your trace data. Some tools, such as APM agents, instrument services automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile monitoring Kamon OpenTelemetry Trace API: generic format Trace API: Zipkin format View traces: After you instrument the services, generate some traffic in your application, and then go to the New Relic UI to see your trace data.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 116.38019, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "body": " automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile" - }, - "id": "61d8b6a664441fbe9700cc16" - }, { "sections": [ "Mux", @@ -51565,7 +51474,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 112.697266, + "_score": 112.87141, "_version": null, "_explanation": null, "sort": null, @@ -51606,7 +51515,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 112.6965, + "_score": 112.87066, "_version": null, "_explanation": null, "sort": null, @@ -51616,6 +51525,32 @@ }, "id": "61566dd8e7b9d279cf8de386" }, + { + "image": "https://docs.newrelic.com/static/6decabb9d8cd5dc3e18f2f647f7c7cdd/c1b63/arrow-step-diagram-trans.png", + "url": "https://docs.newrelic.com/docs/distributed-tracing/concepts/quick-start/", + "sections": [ + "Distributed tracing setup options" + ], + "published_at": "2022-02-14T03:23:44Z", + "title": "Distributed tracing setup options", + "updated_at": "2022-02-14T03:23:44Z", + "type": "docs", + "external_id": "44df1a2d07693a41fa23c9bba9473ce8ebabe47e", + "document_type": "page", + "popularity": 1, + "body": "We recommend you do an initial setup of distributed tracing and consider the advanced Infinite Tracing feature if you are not getting the data you need. Also, if you are currently using New Relic APM agents and would like to enable distributed tracing, see our planning guide. Ready to get started? If you don't already have one, sign up for a New Relic account. It's free, forever! To set up distributed tracing, you'll complete three general steps: Identify services: Identify and write down the endpoints, services, languages, and systems that are used to complete this request (you'll need this information in the next step). If you have an environment diagram like the following, you could use it to create a list of services handling requests: Instrument services: Instrument each service you identify so it can send your trace data. Some tools, such as APM agents, instrument services automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile monitoring Kamon OpenTelemetry Trace API: generic format Trace API: Zipkin format View traces: After you instrument the services, generate some traffic in your application, and then go to the New Relic UI to see your trace data.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 110.14922, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "body": " automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile" + }, + "id": "61d8b6a664441fbe9700cc16" + }, { "sections": [ "Install the Go agent in GAE flexible environment", @@ -51647,7 +51582,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 65.33989, + "_score": 65.31719, "_version": null, "_explanation": null, "sort": null, @@ -51678,7 +51613,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 58.958164, + "_score": 58.9981, "_version": null, "_explanation": null, "sort": null, @@ -51712,7 +51647,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 373.58154, + "_score": 351.044, "_version": null, "_explanation": null, "sort": null, @@ -51755,7 +51690,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 358.31213, + "_score": 332.26825, "_version": null, "_explanation": null, "sort": null, @@ -51794,7 +51729,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 343.37973, + "_score": 318.34332, "_version": null, "_explanation": null, "sort": null, @@ -51839,7 +51774,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 337.73837, + "_score": 317.3634, "_version": null, "_explanation": null, "sort": null, @@ -51879,7 +51814,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 334.72916, + "_score": 314.4875, "_version": null, "_explanation": null, "sort": null, @@ -51923,7 +51858,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -51964,7 +51899,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -52005,7 +51940,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -52046,7 +51981,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -52087,7 +52022,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -52125,7 +52060,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 75.93649, + "_score": 70.58235, "_version": null, "_explanation": null, "sort": null, @@ -52165,7 +52100,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 61.58174, + "_score": 58.156723, "_version": null, "_explanation": null, "sort": null, @@ -52204,7 +52139,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 52.60234, + "_score": 49.67942, "_version": null, "_explanation": null, "sort": null, @@ -52241,7 +52176,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 52.59039, + "_score": 49.66977, "_version": null, "_explanation": null, "sort": null, @@ -52252,42 +52187,39 @@ }, { "sections": [ - "New Relic One CLI reference", - "Installing the New Relic One CLI", - "Tip", - "New Relic One CLI Commands", - "Get started", - "Configure your CLI preferences", - "Set up your Nerdpacks", - "Manage your Nerdpack subscriptions", - "Install and manage plugins", - "Manage catalog information" + "iOS agent compatibility and requirements", + "Foreground monitoring", + "iOS requirements", + "Testing is not supported", + "Potential method replacement conflicts" ], - "title": "New Relic One CLI reference", - "type": "developer", + "title": "iOS agent compatibility and requirements", + "type": "docs", "tags": [ - "New Relic One app", - "nerdpack commands" + "Get started", + "New Relic Mobile iOS", + "Mobile monitoring" ], - "external_id": "858339a44ead21c83257778ce60b4c352cd30d3b", - "image": "https://developer.newrelic.com/static/2c6d337608b38a3312b4fc740afe6167/7272b/developercenter.png", - "url": "https://developer.newrelic.com/explore-docs/nr1-cli/", - "published_at": "2022-02-15T01:41:15Z", - "updated_at": "2022-02-11T01:45:15Z", + "external_id": "544e062fdc57c4545c2f36b54b38f95b30b3c25e", + "image": "", + "url": "https://docs.newrelic.com/docs/mobile-monitoring/new-relic-mobile-ios/get-started/new-relic-ios-compatibility-requirements/", + "published_at": "2022-02-14T12:02:28Z", + "updated_at": "2021-12-19T19:52:39Z", "document_type": "page", "popularity": 1, - "info": "An overview of the CLI to help you build, deploy, and manage New Relic apps.", - "body": "To build a New Relic One app, you must install the New Relic One CLI. The CLI helps you build, publish, and manage your New Relic app. We provide a variety of tools for building apps, including the New Relic One CLI (command line interface). This page explains how to use CLI commands to: Generate Nerdpack/Nerdlet templates Locally serve Nerdpacks (when developing) Publish and deploy Subscribe to Nerdpacks Add screenshots and metadata to the catalog Installing the New Relic One CLI In New Relic, click Instant Observability, then check the Apps box in the filter by section. Click the Build on New Relic launcher and follow the quick start instructions. The quick start automatically generates an API key for the account you select, and gives you the pre-populated commands to create a profile, generate your first \"Hello World\" app, and serve it locally. Tip Use the NR1 VS Code extension to build your apps. New Relic One CLI Commands This table provides descriptions for the New Relic One commands. For more context, including usage and option details, click any individual command or the command category. For details on user permissions, see Permissions. For more on how to serve and publish your application, see our guide on Deploying your New Relic One app. Get started nr1 help Shows all nr1 commands or details about each command. nr1 update Updates to the latest version of the CLI. nr1 create Creates a new component from a template (Nerdpack, Nerdlet, launcher, or catalog). nr1 profiles Manages the profiles you use to run CLI commands. nr1 autocomplete Displays autocomplete installation instructions. nr1 nrql Fetches data using NRQL (New Relic query language). Configure your CLI preferences nr1 config:set Sets a specific configuration value. nr1 config:get Shows a specific configuration. nr1 config:list Lists your configuration choices. nr1 config:delete Removes the value of a specific configuration. Set up your Nerdpacks nr1 nerdpack:build Assembles your Nerdpack into bundles. nr1 nerdpack:clone Clones an open source Nerdpack from our GitHub repository. nr1 nerdpack:serve Serves your Nerdpack for testing and development purposes. nr1 nerdpack:uuid Shows or regenerates the UUID of a Nerdpack. nr1 nerdpack:publish Publishes your Nerdpack to New Relic. nr1 nerdpack:deploy Deploys a Nerdpack version to a specific channel. nr1 nerdpack:undeploy Undeploys a Nerdpack version from a specific channel. nr1 nerdpack:clean Cleans your developtment folders. nr1 nerdpack:validate Validates the contents of your Nerdpack. nr1 nerdpack:info Shows the state of your Nerdpack in the New Relic's registry. Manage your Nerdpack subscriptions nr1 subscription:set Subscribes your account to a Nerdpack and channel. nr1 subscription:list Lists all the Nerdpacks your account is subscribed to. nr1 subscription:unset Unsubscribes your account from a Nerdpack. Install and manage plugins nr1 plugins:install Installs a plugin into the CLI. nr1 plugins:link Links a plugin into the CLI for development. nr1 plugins:update Updates your installed plugins. nr1 plugins:uninstall Removes a plugin from the CLI. Manage catalog information nr1 catalog:info Shows the Nerdpack info stored in the catalog. nr1 catalog:submit Gathers and submits the catalog info on the current folder.", + "body": "Before you install and configure the iOS agent, follow these guidelines for compatibility and other requirements. Foreground monitoring The iOS agent only monitors your app while it is in the foreground. The agent does not monitor background services while the app is closed. For more information, see our Mobile data privacy and security documentation. iOS requirements Make sure your iOS app meets these requirements: Component iOS application requirements Operating system iOS 9 or higher For Bitcode support, use SDK version 5.3.0 or higher. API/SDK NSURLConnection and AFNetworking are supported. NSURLSession supports upload and data tags only. ASIHttpRequest networking APIs are deprecated as of iOS agent version 5.8.2. Network traffic for UIWebView and WKWebView is supported. However, WKWebView Transfer size and Http errors are not supported. Languages Objective-C Swift: Works with both network traces and crash reporting, but no interaction traces by default. Interaction traces must be enabled for Swift. Devices Any iOS compatible device: iPhones, iPads, etc. File sizes The agent adds about 2 to 12 megabytes to your iOS release app, depending on platform build. Architectures ARM 64-bit. SHA-2 As a standard security measure for data collection, New Relic requires that your application server supports SHA-2 (256-bit). SHA-1 is not supported. Xcode To take advantage of New Relic's iOS features, make sure you have the latest version of Xcode. arm64e support To be able to properly symbolicate crashes from devices with arm64e architectures, make sure your Xcode settings are enabled for pointer authentication. For more information, see the Apple developer documentation. CocoaPods In order to use the latest XCFramework Agent, use CocoaPods version 1.10.1 or higher. Testing is not supported Our agents are designed and tested to work in a normal app lifecycle. New Relic does not support running any testing environment on applications with the agent. Testing can cause conflicts and unpredictable behavior. Potential method replacement conflicts Our iOS agent utilizes method replacement during run time. This may result in a conflict with other libraries that also implement method replacement, such as ReactiveCocoa, Firebase, Aspects, and AppleGuice.", + "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 36.00315, + "_score": 35.780693, "_version": null, "_explanation": null, "sort": null, "highlight": { - "body": " nerdpack:serve Serves your Nerdpack for testing and development purposes. nr1 nerdpack:uuid Shows or regenerates the UUID of a Nerdpack. nr1 nerdpack:publish Publishes your Nerdpack to New Relic. nr1 nerdpack:deploy Deploys a Nerdpack version to a specific channel. nr1 nerdpack:undeploy Undeploys a Nerdpack" + "sections": "Testing is not supported", + "body": " symbolicate crashes from devices with arm64e architectures, make sure your Xcode settings are enabled for pointer authentication. For more information, see the Apple developer documentation. CocoaPods In order to use the latest XCFramework Agent, use CocoaPods version 1.10.1 or higher. Testing" }, - "id": "6091fa9864441feb412f36d4" + "id": "6044196064441f4f10378f04" } ], "/mssql/58b693d2-6e8d-4df0-b8b4-00a089376d2d": [ @@ -52322,7 +52254,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 110.59566, + "_score": 102.699814, "_version": null, "_explanation": null, "sort": null, @@ -52364,7 +52296,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 109.75413, + "_score": 101.91943, "_version": null, "_explanation": null, "sort": null, @@ -52406,7 +52338,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 109.7467, + "_score": 101.913666, "_version": null, "_explanation": null, "sort": null, @@ -52448,7 +52380,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 105.93249, + "_score": 98.52904, "_version": null, "_explanation": null, "sort": null, @@ -52491,7 +52423,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 101.185425, + "_score": 95.344635, "_version": null, "_explanation": null, "sort": null, @@ -52531,7 +52463,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 362.94754, + "_score": 343.18988, "_version": null, "_explanation": null, "sort": null, @@ -52588,7 +52520,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 312.2268, + "_score": 296.05, "_version": null, "_explanation": null, "sort": null, @@ -52629,7 +52561,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 292.37012, + "_score": 274.1557, "_version": null, "_explanation": null, "sort": null, @@ -52666,7 +52598,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 263.57642, + "_score": 249.1105, "_version": null, "_explanation": null, "sort": null, @@ -52680,51 +52612,65 @@ }, { "sections": [ - "Notification message templates", - "Message template variables", - "The variables menu", - "Use the Handlebars syntax", - "Helper functions", - "JSON", - "Equality", - "Replace", - "Usage examples", - "Validate data", - "Return JSON", - "Get values from an array", - "Iterate through an array", - "Handle missing attributes" + "Get started with Incident Intelligence", + "Requirements", + "Set up Incident Intelligence", + "1. Configure your environment (one-time)", + "Tip", + "2. Configure sources", + "Alerts", + "Datarobot (formerly Algorithmia)", + "Aporia (MLOps)", + "Superwise (MLOps)", + "Anomalies", + "AWS", + "Grafana", + "PagerDuty", + "EOL NOTICE", + "Prometheus Alertmanager", + "REST API", + "Splunk", + "Important", + "Splunk metadata", + "3. Configure destinations (ServiceNow and others)", + "Configure ServiceNow (example)", + "Send data to ServiceNow", + "Custom notification message", + "Other destinations", + "Send data to PagerDuty", + "Send data via webhook", + "4. Configure pathways", + "What's next?" ], - "title": "Notification message templates", + "title": "Get started with Incident Intelligence", "type": "docs", "tags": [ - "Notification templates", - "Incident Intelligence", + "Incident intelligence", "Applied intelligence", "Alerts and Applied Intelligence" ], - "external_id": "17c0cb0905ad9d7fad7c31c814704279312f55b5", - "image": "https://docs.newrelic.com/static/7b1203c718220cb4a25d8d52dbbbbe98/c1b63/notification-payload-template.png", - "url": "https://docs.newrelic.com/docs/alerts-applied-intelligence/notifications/message-templates/", - "published_at": "2022-02-14T04:51:44Z", - "updated_at": "2022-02-14T04:51:44Z", + "external_id": "8c4a5a914ca01cb42250908d2fb1a12ccc697e25", + "image": "https://docs.newrelic.com/static/5cb28999dc618c6a5a2b8be1fa72e660/b97f6/image-%25281%2529_0.png", + "url": "https://docs.newrelic.com/docs/alerts-applied-intelligence/applied-intelligence/incident-intelligence/get-started-incident-intelligence/", + "published_at": "2022-02-14T06:50:45Z", + "updated_at": "2022-01-17T04:46:06Z", "document_type": "page", "popularity": 1, - "body": "Notification message templates enable you to customize your notification event data before it's sent to your third-party destination. The templates map your custom values to the values used by your third-party destination. This gives you full control over what data will be sent and where, as well as being able to fully engage with the services you use. Message template variables A message template is what you use to convert New Relic event data to data that's consumable by your third-party service. Variables are specific attributes that are mapped to data fields in your third-party service. Message templates are written in a simple templating language called Handlebars. Variables in the message templates are written as expressions inside double curly braces {{ }}. Use the notification message template to map your New Relic notifications to the fields in your external services. The variables menu The New Relic variable names are listed in the message template variables menu. The variables are grouped into subcategories. In the variables menu, type {{ to select from a list of variables. As you type, variable names appear via autocomplete. The variable type is written on the right-hand side. You can add enriched data to these variables. The variables menu shows the options you have when mapping New Relic notification fields onto the fields in your external service. Use the Handlebars syntax When an event generates a notification, the message template uses the Handlebar variables to map the notification data to the fields used by your third-party service. The Handlebars language provides many features in addition to basic variable replacement, including evaluating nested input objects and functions, such as iterations (loops), conditional statements, and more. In Handlebars, these functions are called helpers. Helper functions Our message templates support the Handlebars built-in helpers. In addition, we've added other helpers that might be useful to you. JSON The {{json}} helper converts text to a JSON element. Use this when you're configuring a Webhook’s payload, which uses a JSON syntax, and any other situation you might want to pass JSON formatted data. For example, with a variable called data. { \"data\": { \"tags\": [\"infra, team-a\"] } } Copy To get the names array as a JSON element, use the {{json}} helper: {{json data.tags}} Copy to get: [\"infra\", \"team-a\"] Copy Equality Use the equality {{#eq}} helper to compare variables. Compares variables a and b, renders 'yes' or 'no': {{#eq a b}} yes {{else}} no {{/eq}} Compares string value \"a\" to variable b, renders 'yes' or 'no': {{#eq \"a\" b}} yes {{else}} no {{/eq}} Renders 'true' or 'false': {{eq a b}} Render 'y' or 'n': {{eq a b yes='y' no='n'}} Copy Replace The replace helper replaces instances of the first parameter in the second parameter with the child block. Use else clause to specify what happens when no instance of the first parameter is found. If it is omitted an empty string will be generated. Example #1: replace the word dog with cat in the sentence The dog likes to eat: {{#replace \"dog\" \"The dog likes to eat\"}}cat{{/replace}} Copy to get: The cat likes to eat Copy Example #2: replace the word cat with mouse in the sentence The dog likes to eat: {{#replace \"cat\" \"The dog likes to eat\"}}mouse{{/replace}} Copy to get an empty string: Copy Example #3: replace the word cat with mouse in the sentence The dog likes to eat, using the else clause: {{#replace \"cat\" \"The dog likes to eat\"}}mouse{{else}}There is no cat to replace{{/replace}} Copy to get: There is no cat to replace Copy Example #4: replace the word dog with cat in the sentence The DOG likes to eat while ignoring case: {{#replace \"/dog/i\" \"The DOG likes to eat\"}}cat{{/replace}} Copy to get: The cat likes to eat Copy Example #5: replace the variable {{needle}} with the variable {{replacement}} in the variable {{haystack}}: {{#replace needle haystack }}{{replacement}}{{/replace}} Copy using this data: { \"needle\": \"/dog/i\", \"haystack\": \"The DOG likes to eat\", \"replacement\": \"cat\" } Copy to get: The cat likes to eat Copy Usage examples The examples are based on a variable called data: \"data\": { \"tags\":[\"infra, team-a\"], \"id\":123456789, \"name\": \"Alice\", } Copy The data value has an equivalent, dot-notated format: \"data.tags\": [\"infra, team-a\"] \"data.id\": 123456789 \"data.name\": \"Alice\" Copy Validate data If id equals 123456789, then the output is valid. If not, the output is not valid. {{eq data.name \"Alice\" yes='valid' no='not valid'}} Copy If name equals Alice, then the output is valid. Return JSON Get the tags and object’s properties in a JSON form: {{json data.tags}} Copy This would return the following JSON: [\"infra\", \"team-a\"] Copy Get values from an array Get the first tag from the tags array: {{json data.tags.[0]}} Copy This would return the first value from the array: bash Copy $ \"infra\" Iterate through an array Iterate a variable of type array and aggregate the values into a string: {{#each tags}}{{this}}{{#unless @last}}, {{/unless}}{{/each}} Copy The result contains the tags, seperated by commas (the trailing comma is omitted): bash Copy $ infra, team Similarly, iterate the data variable, aggregate the object’s values, and output a JSON element: {{#each (json data)}}{{this}}{{/each}} Copy This would return a JSON such as: { \"tags\": [\"infra, team-a\"], \"name\": \"Alice\", \"id\": \"123456789\" } Copy Iterate the data variable, then aggregate the object’s entries to a string: {{#each data}}{{@key}}: {{this}}{{#unless @last}}, {{/unless}}{{/each}} Copy This would return a string such as: bash Copy $ tags: infra,team-a, name: Alice, id: 123456789 Handle missing attributes In some cases, an attribute may be missing from the variables menu, or not exist whatsoever. We can use the #if statement to set a fallback, such as: {{#if data.type}} {{ json data.type }} {{else}}\"N/A\"{{/if}} Copy would return the string \"N/A\".", + "body": "As part of Applied Intelligence, Incident Intelligence helps you correlate your incidents and reduce noise in your environment. It gives you an overview of all your incidents, their sources, and related events. Requirements If you haven't already, sign up for a New Relic account to get started. To use most Applied Intelligence features, you must be a full platform user. For more information, see capabilities by user type. Set up Incident Intelligence To enable Incident Intelligence, follow these steps. Afterwards, issues should start to appear in your issue feed. 1. Configure your environment (one-time). 2. Configure sources. 3. Configure destinations. 4. Configure pathways. 1. Configure your environment (one-time) To set up an environment in Incident Intelligence, you need an administrator to select a New Relic account for it. This account should be the one your team is using. Who sets the environment? Only administrators, and only for accounts where they have admin privileges. Can administrators set more than one environment? They can set one environment per parent account and its child accounts. More than one can be set if an administrator has privileges for more than one parent account. Tip Incident Intelligence is a cross-account product. This means you can send in data from any New Relic account or external source to correlate events. 2. Configure sources After setting up your environment, determine your incident sources. These are your data inputs. You can get data from any of the following sources: Alerts By integrating Incident Intelligence with your alerts violations, you can get context and correlations from what you're monitoring. To get data from alerts: From one.newrelic.com, click Alerts. On the left under Incident Intelligence, click Sources and then click Alerts. Select the policies you want to connect to Applied Intelligence, and click Connect. You can add additional alerts policies or remove policies you've already connected in Sources > New Relic Alerts. Tip Adding alerts as a source will not affect your current configuration or notifications. Datarobot (formerly Algorithmia) By integrating Incident Intelligence with your Datarobot machine-learning models, you can monitor your machine learning model performance. To configure Datarobot for Incident Intelligence, see our integration docs. Aporia (MLOps) By integrating Incident Intelligence with your Aporia machine-learning models, you can monitor your machine learning model performance. To configure our Aporia integration, see our docs. Superwise (MLOps) By integrating Incident Intelligence with your Superwise machine-learning models, you can monitor your machine learning model performance. To configure our Superwise integration, see our docs. Anomalies By integrating Incident Intelligence with your New Relic Proactive Detection anomalies, you can get context and correlations from what you're monitoring. To get data from New Relic Proactive Detection anomalies: From one.newrelic.com, click Alerts & AI. On the left under Incident Intelligence, click Sources and then click Anomalies. Select the configurations you want to connect to Applied Intelligence by toggling to on (green). To add or remove alert policies, from Alerts & AI, click Sources, then Alerts. Tip Adding anomalies as a source won't affect your current Proactive Detection configurations or notifications. AWS You can integrate Incident Intelligence with Amazon CloudWatch to provide incident management for all of your AWS services. To integrate Amazon CloudWatch: Go to one.newrelic.com and click Alerts & AI. On the left under Incident Intelligence, click Sources and then click Amazon Web Services. Copy the URL. Create a new Amazon SNS topic. Set CloudWatch to forward all Alarms state changes to that topic: In the Amazon CloudWatch UI, click Events > Event Pattern. Select Service Name > CloudWatch. Select Event Type > CloudWatch Alarm State Change. Select Targets > SNS Topic, and select your new Amazon SNS topic. Create a new subscription: In the Amazon AWS UI, click Create a Subscription. Select your new Amazon SNS topic. Select Protocol > choose HTTPS. In Endpoint, paste the URL you previously copied from the Applied Intelligence Sources. Grafana You can integrate Incident Intelligence with Grafana's notifications for insight into events across your applications and environment. Grafana's webhook notification is a simple way to send information over HTTP to a custom endpoint. To integrate Grafana as a new webhook: Log into your Grafana portal using Admin permissions, and choose Alerting. On the Grafana Notification Channels page, click New Channel > Webhook. Go to one.newrelic.com and click Alerts & AI. On the left under Incident Intelligence, click Sources, and then click Grafana. Copy the URL, and paste it into your new Grafana webhook. PagerDuty EOL NOTICE As of October 2021, we've discontinued support for several capabilities with PagerDuty, including suggested responders, golden signals, and component enrichment. For more details, including how you can easily make this transition, see our Explorers Hub post. You can integrate Incident Intelligence directly with your PagerDuty services to ingest, process, and enhance all of your PagerDuty incidents. Connecting PagerDuty services to Applied Intelligence will not affect your current services or notifications. To get data from PagerDuty: Make sure your PagerDuty API key has write access. From one.newrelic.com, click Alerts & AI. On the left under Incident Intelligence, click Sources and then click PagerDuty. Enter your PagerDuty API key. The key should be either a personal or general access key with write access. If it's created by a user, the user should be an admin. Select the PagerDuty services you want to connect to Applied Intelligence, and click Connect. You can add additional services or remove services you've already connected in Sources > PagerDuty. Prometheus Alertmanager By integrating Incident Intelligence with Prometheus Alertmanager, you can receive and correlate your Prometheus alerts with events from other sources. To integrate Prometheus Alertmanager: Set up your Alertmanager configuration file by running: ./alertmanager -config.file=simple.yml Copy Go to one.newrelic.com and click Alerts & AI. On the left under Incident Intelligence, click Sources and then click Prometheus Alertmanager. Copy the Prometheus Alertmanager URL, and paste it into the /url section of your Alertmanager config file. Reload the Prometheus Alertmanager configuration with one of the two methods: Send a SIGHUP to the process. Send an HTTP POST request to the /-/reload endpoint. REST API Incident Intelligence supports a dedicated REST API interface that lets you integrate with additional systems. The interface allows instrumentation of your code or other monitoring solutions to report any kind of metric or event. A metric can be a raw data point such as CPU, memory, disk utilization, or business KPI. An event can be a monitoring alert, deployment event, incident, exceptions or any other change in state that you want to describe. You can also send any type of data to Incident Intelligence straight from your own systems or applications. The REST API supports secure token-based authentication and accepts JSON content as input. For more information on authentication and the full API reference, see REST API for New Relic Applied Intelligence. Splunk By integrating Incident Intelligence with your Splunk log monitoring, you can: Use your environment's log data for searches and key term reports. Correlate alerts and search reports with your other metrics and incidents. Important Applied Intelligence supports Splunk Light, Splunk Cloud, and Splunk Enterprise version 6.3 and higher. To get data from Splunk: In your Splunk console, start a search for the relevant events. Save your search as an alert, configure your alert conditions, and then choose the webhook as the delivery method. Go to one.newrelic.com and click Alerts & AI. On the left under Incident Intelligence, click Sources and then click Splunk. Copy the collector URL, and paste it into the webhook endpoint in the Splunk console. Optional: Use Splunk tokens to enrich alert data with Splunk metadata. Splunk metadata To enrich alerts data with your Splunk metadata, use Splunk tokens. This helps you leverage your search data, which includes metadata and values from the first row of search results. If you want to... Do this... Access search data Use the format $$. For example, use $app$ for the app context for the search. Access field values To access field values from the first result row that a search returns, use the format $result.$. For example, use $result.host$ for the host value and $result.sourcetype$ for the source type. Use variables You can leverage any of the Selected fields in the Splunk search and add any unique fields to the Selected fields to make the data available as a variable. The following fields will automatically provide hints to the correlation engine: app: parsed as APPLICATION_NAME application:parsed as APPLICATION_NAME application_name: parsed as APPLICATION_NAME cluster: parsed as CLUSTER_NAME computer: parsed as HOST_NAME Dc: parsed as DATACENTER_NAME datacenter: parsed as DATACENTER_NAME host: parsed as HOST_NAME host_name: parsed as HOST_NAME hostname: parsed as HOST_NAME transaction: parsed as EVENT_ID Transaction_id: parsed as EVENT_ID user: parsed as USER_NAME 3. Configure destinations (ServiceNow and others) Now that you've set up your sources, you can configure your destinations. These are the data outputs where you view your incidents. Configure ServiceNow (example) Using ServiceNow as a destination enables you to push valuable violation data into new ServiceNow incident tickets. Send data to ServiceNow To configure Incident Intelligence to send data to ServiceNow: Go to one.newrelic.com, click Alerts & AI, in the left nav under Incident Intelligence click Destinations, then click ServiceNow. Required: Enter a channel name. This is used internally in Applied Intelligence to identify the destination (for example, in Pathways). Required: Enter your ServiceNow credentials: Team domain (This must be unique. No two destinations can have the same domain). Username Password Follow the two-way integration on screen instructions: Open and download this XML file. In the ServiceNow sidebar menu, go to System Definition > Business Rule. Click the menu icon in one of the column headers, select Import XML, and upload the XML file you downloaded. The two way integration will allow the ServiceNow incident to be updated with changes to the Applied Intelligence issue. Closing a ServiceNow incident will close its corresponding New Relic issue. When a New Relic issue is resolved, the corresponding ServiceNow incident will be closed. Custom notification message Applied Intelligence uses a templating framework called Jinja2 in the customization section interface. The Value field must be in valid Jinja syntax. By default, the interface populates a set of default fields in ServiceNow. When you add a custom field, enter the ServiceNow field name you want to use. When you want to skip a selected field in an issue update, add the | skip_on_update string at the end of the value you've selected. Tip By default, ServiceNow adds u_ to the beginning of its custom values. When mapping to ServiceNow attributes, use the Column name value. Please note that the name needs to be lowercase separated by underscores. Go here to see the custom notification message attribute descriptions. Go here to see Jinja2 Useful Syntax. Other destinations You can set other destinations: Send data to PagerDuty EOL NOTICE As of October 2021, we've discontinued support for several capabilities with PagerDuty, including suggested responders, golden signals, and component enrichment. For more details, including how you can easily make this transition, see our Explorers Hub post. Recommended: Create a new PagerDuty service to use as a destination. Because PagerDuty services can also be used as sources, this can help you distinguish your data input from your output. To create a PagerDuty destination: Go to one.newrelic.com, click Alerts & AI, in the left nav under Incident Intelligence click Destinations, then click PagerDuty. Enter your PagerDuty API key. The key should be either a personal or general access key with write access. If it's created by a user, the user should be an admin. If you've configured a PagerDuty source with an API key, you can use the same key. Select the PagerDuty services you want to connect to Applied Intelligence, and click Connect. When you're ready, you can add policies for one or more PagerDuty destinations. You can also transfer the policies over from your existing services or leave them as sources if needed. From the Destinations > PagerDuty page, you can also: Review the permissions for your services. Click Authorize when you're done. Add or delete existing services from the PagerDuty destination. Edit permissions for any service. To configure your PagerDuty destinations, use the following settings: Configuration setting Description Trigger new incidents Required. Trigger correlated parent incidents so you can identify issues faster. Edit incident titles Required. Alter your incident titles to help you orient and understand issues. Add new integrations Required. Add integrations to enable incident creation for selected services. Add webhook extensions Add webhook extensions to sync user actions in PagerDuty to New Relic. This lets you update the correlated issue state. Auto-resolve correlated incidents When enabled, this will resolve and automatically close correlated parent/child incidents. Select a user to take actions in PagerDuty You need to select a user before you can enable deep integration with PagerDuty. Once you do, the user can: Add notes to incidents (required): Incident notes are used to enrich incidents with context. Acknowledge triggered incidents: When enabled, Applied Intelligence will acknowledge and correlate newly triggered incidents in PagerDuty before you're notified. Use the original escalation policy: When enabled, the escalation policy of the source service will be applied to each incident. Send data via webhook Incident Intelligence will send the event body in JSON format by HTTPS POST. The system expects the endpoint to return a successful HTTP code (2xx). To configure Incident Intelligence to send data via webhook: Go to one.newrelic.com, click Alerts & AI, in the left nav under Incident Intelligence click Destinations, then click Webhook. Required: Configure the unique webhook key, used in Applied Intelligence to refer to this webhook configuration and its specific settings. Required: Configure the destination endpoint where the webhook payload will be sent. Optional steps: Configure custom headers, which are key:value pairs of headers to be sent with the request. Example: \"Authentication\" \"Bearer\" Configure a custom payload template that can be used to map New Relic fields to match the destination tool's expected name and format. Configure priority mapping (critical, high, medium, or low), used to map New Relic's priorities to the priorities expected at the destination. Tip There’s a retry mechanism that is triggered a few times with exponential backoff for a couple of minutes once an error occurs. If we reach the retry limit, the Webhook will get auto-disabled. For examples of destination templates, webhook formats, and JSON schema, see the Incident Intelligence destination examples. 4. Configure pathways To control when and where you want to receive notifications from your incidents, you can configure pathways. To add a pathway: Go to one.newrelic.com, click Alerts & AI, in the left nav under Incident Intelligence click Pathways, then click Add a pathway. In the query builder box, select an attribute, such as application/name. This can be from the list of all attributes available in PagerDuty incidents and New Relic alerts violations, or you can add your own attributes. Select a logical operator. For example, contains. Enter a specific value to complete the logical expression. To include all issues created by your sources, select Send everything. (Use this if you only use one PagerDuty service to manage all incidents.) To build more complex logic, use the AND/OR operators. Select one or more of your destinations. To edit or remove existing pathways, mouse over the pathway's name on the Pathways page. What's next? Now that you've set up some sources and destinations for your incidents, read about how to use Incident Intelligence.", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 169.98407, + "_score": 165.62338, "_version": null, "_explanation": null, "sort": null, "highlight": { - "title": "Notification message templates", - "sections": "Notification message templates", - "tags": "Incident Intelligence", - "body": ". Message templates are written in a simple templating language called Handlebars. Variables in the message templates are written as expressions inside double curly braces {{ }}. Use the notification message template to map your New Relic notifications to the fields in your external services" + "title": "Get started with Incident Intelligence", + "sections": "Get started with Incident Intelligence", + "tags": "Incident intelligence", + "body": " (one-time). 2. Configure sources. 3. Configure destinations. 4. Configure pathways. 1. Configure your environment (one-time) To set up an environment in Incident Intelligence, you need an administrator to select a New Relic account for it. This account should be the one your team is using. Who sets" }, - "id": "618f3a6c28ccbc60e70317f1" + "id": "603ea62e64441f119f4e883f" } ], "/mariadb/0919c174-0ce5-4b34-a5c0-255986ff9706": [ @@ -52759,7 +52705,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 110.59566, + "_score": 102.69989, "_version": null, "_explanation": null, "sort": null, @@ -52801,7 +52747,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 109.75413, + "_score": 101.91952, "_version": null, "_explanation": null, "sort": null, @@ -52843,7 +52789,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 109.7467, + "_score": 101.91374, "_version": null, "_explanation": null, "sort": null, @@ -52885,7 +52831,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 105.93249, + "_score": 98.52911, "_version": null, "_explanation": null, "sort": null, @@ -52928,7 +52874,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 101.185425, + "_score": 95.344696, "_version": null, "_explanation": null, "sort": null, @@ -52961,7 +52907,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 98.59509, + "_score": 97.548, "_version": null, "_explanation": null, "sort": null, @@ -52994,7 +52940,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 159.44751, + "_score": 158.1604, "_version": null, "_explanation": null, "sort": null, @@ -53040,7 +52986,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 121.0877, + "_score": 118.64972, "_version": null, "_explanation": null, "sort": null, @@ -53080,7 +53026,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 68.61354, + "_score": 68.64998, "_version": null, "_explanation": null, "sort": null, @@ -53127,7 +53073,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 62.07149, + "_score": 62.019394, "_version": null, "_explanation": null, "sort": null, @@ -53165,7 +53111,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 66.42835, + "_score": 61.670063, "_version": null, "_explanation": null, "sort": null, @@ -53212,7 +53158,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 65.87024, + "_score": 61.17478, "_version": null, "_explanation": null, "sort": null, @@ -53224,47 +53170,6 @@ }, "id": "617d6c3064441facbdfbcea9" }, - { - "sections": [ - "Google Cloud Storage monitoring integration", - "Features", - "Activate integration", - "Polling frequency", - "Find and use data", - "Metric data", - "Inventory data", - "EOL NOTICE" - ], - "title": "Google Cloud Storage monitoring integration", - "type": "docs", - "tags": [ - "GCP integrations list", - "Google Cloud Platform integrations", - "Integrations" - ], - "external_id": "c61e6b13585a04041372583cafdc41fcb04f88a2", - "image": "", - "url": "https://docs.newrelic.com/docs/infrastructure/google-cloud-platform-integrations/gcp-integrations-list/google-cloud-storage-monitoring-integration/", - "published_at": "2022-02-15T17:19:02Z", - "updated_at": "2022-02-15T17:19:02Z", - "document_type": "page", - "popularity": 1, - "body": "New Relic offers an integration for reporting your Google Cloud Storage data to New Relic. Learn how to connect this integration to infrastructure monitoring and about the metric data that New Relic reports for this integration. Features Google Cloud Storage is a Google Cloud Platform service that you can use to serve website content, to store data for archival and disaster recovery, and to distribute data objects via direct download. With the Google Cloud Storage integration, you can access these features: View charts and information about the data you are storing and retrieving from Google Cloud Storage. Create custom queries and charts in from automatically captured data. Set alerts on your Google Cloud Storage data directly from the Integrations page. Activate integration To enable the integration follow standard procedures to connect your GCP service to New Relic. Polling frequency New Relic queries your Google Cloud Storage services based on a polling interval of 5 minutes. Find and use data After connecting the integration to New Relic and waiting a few minutes, data will appear in the New Relic UI. To find and use integration data, including your dashboards and your alert settings, go to one.newrelic.com > Infrastructure > GCP > Google Cloud Storage. To create custom dashboards for the integration, create queries for the GcpStorageBucketSample event type with the provider value GcpStorageBucket. Metric data The integration reports metric data for all values of method and response_code: response_code: The response code of the requests. method: The name of the API method called. The metric data that New Relic receives from your Google Cloud Storage integration includes: Metric Description api.Requests Delta count of API calls. network.ReceivedBytes Delta count of bytes received over the network. network.SentBytes Delta count of bytes sent over the network. Inventory data EOL NOTICE After March 2022, we're discontinuing support for several capabilities, including inventory data for cloud integrations. For more details, including how you can easily prepare for this transition, see our Explorers Hub post. Inventory data for Google Cloud Storage bucket objects includes the following properties: Inventory data Description acl Access control list for the bucket that lets you specify who has access to your data and to what extent. cors The Cross-Origin Resource Sharing (CORS) configuration for the bucket. createTime Time when the bucket was created. defaultAcl Default access control list configuration for the bucket's blobs. etag HTTP 1.1 entity tag for the bucket. indexPage The bucket's website index page. This behaves as the bucket's directory index where missing blobs are treated as potential directories. labels Labels for the bucket, in key/value pairs. This is only available if the GCP project is linked to New Relic through a service account and extended inventory collection is enabled. metageneration The generation of the metadata for the bucket. name The name of the bucket. notFoundPage The custom object that will be returned when a requested resource is not found. owner The owner of the bucket. A bucket is always owned by the project team owners group. project The name that you assigned to the project. A project consists of a set of users, a set of APIs, and settings for those APIs. requesterPays If set to true, the user accessing the bucket or an object it contains assumes the access transit costs. storageClass The default storage class for a bucket, if you don't specify one for a new object. The storage class defines how Google Cloud Storage stores objects in the bucket and determines the SLA and storage cost. For more information, see storage classes. zone The zone where the bucket is deployed.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 65.42726, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "title": "Google Cloud Storage monitoring integration", - "sections": "Google Cloud Storage monitoring integration", - "tags": "GCP integrations list", - "body": "New Relic offers an integration for reporting your Google Cloud Storage data to New Relic. Learn how to connect this integration to infrastructure monitoring and about the metric data that New Relic reports for this integration. Features Google Cloud Storage is a Google Cloud Platform service" - }, - "id": "617dc5b664441fe2d8fbe3b9" - }, { "sections": [ "Amazon Athena monitoring integration", @@ -53293,7 +53198,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 65.38165, + "_score": 60.92514, "_version": null, "_explanation": null, "sort": null, @@ -53333,7 +53238,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 65.3671, + "_score": 60.913704, "_version": null, "_explanation": null, "sort": null, @@ -53344,6 +53249,47 @@ "body": "Important Enable the AWS CloudWatch Metric Streams integration to monitor all CloudWatch metrics from your AWS services, including custom namespaces. Individual integrations are no longer our recommended option. New Relic infrastructure integrations include an integration for reporting your AWS" }, "id": "617da500196a676aaef7c7a0" + }, + { + "sections": [ + "Google Cloud Storage monitoring integration", + "Features", + "Activate integration", + "Polling frequency", + "Find and use data", + "Metric data", + "Inventory data", + "EOL NOTICE" + ], + "title": "Google Cloud Storage monitoring integration", + "type": "docs", + "tags": [ + "GCP integrations list", + "Google Cloud Platform integrations", + "Integrations" + ], + "external_id": "c61e6b13585a04041372583cafdc41fcb04f88a2", + "image": "", + "url": "https://docs.newrelic.com/docs/infrastructure/google-cloud-platform-integrations/gcp-integrations-list/google-cloud-storage-monitoring-integration/", + "published_at": "2022-02-15T17:19:02Z", + "updated_at": "2022-02-15T17:19:02Z", + "document_type": "page", + "popularity": 1, + "body": "New Relic offers an integration for reporting your Google Cloud Storage data to New Relic. Learn how to connect this integration to infrastructure monitoring and about the metric data that New Relic reports for this integration. Features Google Cloud Storage is a Google Cloud Platform service that you can use to serve website content, to store data for archival and disaster recovery, and to distribute data objects via direct download. With the Google Cloud Storage integration, you can access these features: View charts and information about the data you are storing and retrieving from Google Cloud Storage. Create custom queries and charts in from automatically captured data. Set alerts on your Google Cloud Storage data directly from the Integrations page. Activate integration To enable the integration follow standard procedures to connect your GCP service to New Relic. Polling frequency New Relic queries your Google Cloud Storage services based on a polling interval of 5 minutes. Find and use data After connecting the integration to New Relic and waiting a few minutes, data will appear in the New Relic UI. To find and use integration data, including your dashboards and your alert settings, go to one.newrelic.com > Infrastructure > GCP > Google Cloud Storage. To create custom dashboards for the integration, create queries for the GcpStorageBucketSample event type with the provider value GcpStorageBucket. Metric data The integration reports metric data for all values of method and response_code: response_code: The response code of the requests. method: The name of the API method called. The metric data that New Relic receives from your Google Cloud Storage integration includes: Metric Description api.Requests Delta count of API calls. network.ReceivedBytes Delta count of bytes received over the network. network.SentBytes Delta count of bytes sent over the network. Inventory data EOL NOTICE After March 2022, we're discontinuing support for several capabilities, including inventory data for cloud integrations. For more details, including how you can easily prepare for this transition, see our Explorers Hub post. Inventory data for Google Cloud Storage bucket objects includes the following properties: Inventory data Description acl Access control list for the bucket that lets you specify who has access to your data and to what extent. cors The Cross-Origin Resource Sharing (CORS) configuration for the bucket. createTime Time when the bucket was created. defaultAcl Default access control list configuration for the bucket's blobs. etag HTTP 1.1 entity tag for the bucket. indexPage The bucket's website index page. This behaves as the bucket's directory index where missing blobs are treated as potential directories. labels Labels for the bucket, in key/value pairs. This is only available if the GCP project is linked to New Relic through a service account and extended inventory collection is enabled. metageneration The generation of the metadata for the bucket. name The name of the bucket. notFoundPage The custom object that will be returned when a requested resource is not found. owner The owner of the bucket. A bucket is always owned by the project team owners group. project The name that you assigned to the project. A project consists of a set of users, a set of APIs, and settings for those APIs. requesterPays If set to true, the user accessing the bucket or an object it contains assumes the access transit costs. storageClass The default storage class for a bucket, if you don't specify one for a new object. The storage class defines how Google Cloud Storage stores objects in the bucket and determines the SLA and storage cost. For more information, see storage classes. zone The zone where the bucket is deployed.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 60.746803, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "title": "Google Cloud Storage monitoring integration", + "sections": "Google Cloud Storage monitoring integration", + "tags": "GCP integrations list", + "body": "New Relic offers an integration for reporting your Google Cloud Storage data to New Relic. Learn how to connect this integration to infrastructure monitoring and about the metric data that New Relic reports for this integration. Features Google Cloud Storage is a Google Cloud Platform service" + }, + "id": "617dc5b664441fe2d8fbe3b9" } ], "/golden-signals-for-web-servers/aae62e98-51c4-4e73-82b4-29d9ae95433a": [ @@ -53371,7 +53317,7 @@ "external_id": "a8e6eb8132628da407bf24eeeca752931f4a09df", "image": "", "url": "https://developer.newrelic.com/automate-workflows/get-started-terraform/", - "published_at": "2022-02-15T01:39:02Z", + "published_at": "2022-02-16T01:41:52Z", "updated_at": "2021-03-09T01:49:21Z", "document_type": "page", "popularity": 1, @@ -53379,7 +53325,7 @@ "body": "Terraform is a popular infrastructure-as-code software tool built by HashiCorp. You use it to provision all kinds of infrastructure and services, including New Relic dashboards and alerts. In this guide, you learn how to set up New Relic alerts with Terraform. More specifically, you provision an alert policy, four alert conditions, and a notification channel. The four alert conditions are based on the four golden signals of monitoring introduced in Google’s Site Reliability Engineering book: Latency: The amount of time it takes your application to service a request. Traffic: The amount of requests your system receives. Errors: The rate of requests that fail. Saturation: The stress on resources to meet the demands of your application. Before you begin To use this guide, you should have some basic knowledge of both New Relic and Terraform. If you haven't deployed a New Relic open source agent yet, install New Relic for your application. Also, install the Terraform CLI. Step 1 of 5 Bootstrap Terraform and the New Relic provider Start by initializing a working directory and creating a Terraform configuration file: bash Copy $ mkdir terraform-project && cd terraform-project $ touch main.tf Next, instruct Terraform to install and use the New Relic provider, by setting the terraform and required_providers blocks in main.tf: terraform { # Require Terraform version 0.13.x (recommended) required_version = \"~> 0.13.0\" # Require the latest 2.x version of the New Relic provider required_providers { newrelic = { source = \"newrelic/newrelic\" version = \"~> 2.21\" } } } Copy In this code block, you're setting the required version of Terraform to 0.13.x and setting the New Relic provider to the latest 2.x version. Using the right version constraints for your setup will provide better stability with your Terraform runs. Now that you've set your Terraform and New Relic provider versions, you need to configure the New Relic provider. Step 2 of 5 Configure the New Relic provider With terraform all set, configure the New Relic provider with the following items: Your New Relic Account ID. Your New Relic user key. Most user keys begin with the prefix NRAK-. Your New Relic region. Your region is US if your account settings page is located at one.newrelic.com, and EU if your account is located at one.eu.newrelic.com. In main.tf, set those values on the provider: provider \"newrelic\" { account_id = 12345 # Your New Relic account ID api_key = \"NRAK-***\" # Your New Relic user key region = \"US\" # US or EU (defaults to US) } Copy By setting these values on the New Relic provider, you're configuring that provider to make changes on behalf of your account through New Relic APIs. Tip You can also configure the New Relic provider using environment variables. This is a useful way to set default values for your provider configuration. For more information about configuring the New Relic provider, please feel free to check out our official provider documentation. With your New Relic provider configured, initialize Terraform: bash Copy $ terraform init When Terraform finishes installing and registering the New Relic provider, you'll receive a success message and some actionable next steps, such as running terraform plan. Before you can run terraform plan, however, you need to create your resources. Step 3 of 5 Create a New Relic alert policy with the golden signal alerts With the New Relic provider configured and initialized, you can define an alerting strategy for your application. Since you're targeting a specific application, use a newrelic_entity to fetch the application information from New Relic and allow us to reference that data elsewhere in the configuration: data \"newrelic_entity\" \"example_app\" { name = \"Your App Name\" # Must be an exact match to your application name in New Relic domain = \"APM\" # or BROWSER, INFRA, MOBILE, SYNTH, depending on your entity's domain type = \"APPLICATION\" } Copy Next, create a newrelic_alert_policy. Give the policy a dynamic name based on your application's name. This helps specify the scope of the policy: resource \"newrelic_alert_policy\" \"golden_signal_policy\" { name = \"Golden Signals - ${data.newrelic_entity.example_app.name}\" } Copy At this point, you should be able to test your configuration with a dry run: bash Copy $ terraform plan You should see output that displays Terraform's execution plan. The plan contains the actions Terraform performs when your run terraform apply: bash Copy # Example output ------------------------------------------------------------------------ An execution plan has been generated and is shown below. Resource actions are indicated with the following symbols: + create Terraform will perform the following actions: # newrelic_alert_policy.golden_signal_policy will be created + resource \"newrelic_alert_policy\" \"golden_signal_policy\" { + account_id = (known after apply) + id = (known after apply) + incident_preference = \"PER_POLICY\" + name = \"Golden Signals - Your App Name\" } Plan: 1 to add, 0 to change, 0 to destroy. ------------------------------------------------------------------------ In this case, the plan shows you that Terraform will create a new alert policy when you run terraform apply. After verifying the details, execute the plan to provision the alert policy resource in your New Relic account: bash Copy $ terraform apply Every time you apply changes, Terraform asks you to confirm the actions you've told it to run. Type \"yes\". While it's running, Terraform sends logs to your console: bash Copy # Example output of `terraform apply` newrelic_alert_policy.golden_signal_policy: Creating... newrelic_alert_policy.golden_signal_policy: Creation complete after 1s [id=111222333] Apply complete! Resources: 1 added, 0 changed, 0 destroyed. Log in to New Relic and navigate to Alert Policies to confirm that Terraform created your new policy. As you move through the next steps of creating alert conditions, you can run terraform apply after configuring each resource. Refresh your alert policy webpage to see the new resources. Step 4 of 5 Provision alert conditions based on the four golden signals Next, add alert conditions for your application based on the four golden signals: latency, traffic, errors, and saturation. Apply these alert conditions to the alert policy you created in the previous step. Latency Most folks want to avoid slow response times. You can create a newrelic_alert_condition that triggers if the overall response time of your application rises above five seconds for five minutes: # Response time resource \"newrelic_alert_condition\" \"response_time_web\" { policy_id = newrelic_alert_policy.golden_signal_policy.id name = \"High Response Time (Web) - ${data.newrelic_entity.example_app.name}\" type = \"apm_app_metric\" entities = [data.newrelic_entity.example_app.application_id] metric = \"response_time_web\" runbook_url = \"https://www.example.com\" condition_scope = \"application\" term { duration = 5 operator = \"above\" priority = \"critical\" threshold = \"5\" time_function = \"all\" } } Copy Note that you're linking this alert condition to the previously configured alert policy with policy_id. Traffic Traffic represents how much demand is placed on your system at any given moment. Throughput is a metric that measures how much traffic goes to your application. Create a newrelic_alert_condition that triggers if the overall response rate of your application falls below five requests per minute for five minutes: # Low throughput resource \"newrelic_alert_condition\" \"throughput_web\" { policy_id = newrelic_alert_policy.golden_signal_policy.id name = \"Low Throughput (Web)\" type = \"apm_app_metric\" entities = [data.newrelic_entity.example_app.application_id] metric = \"throughput_web\" condition_scope = \"application\" # Define a critical alert threshold that will # trigger after 5 minutes below 5 requests per minute. term { priority = \"critical\" duration = 5 operator = \"below\" threshold = \"5\" time_function = \"all\" } } Copy This type of alert is useful when you expect a constant baseline of traffic throughout the day — a drop off in traffic can indicate a problem. Errors If your application's error rate spikes, you need to know about it. Create a newrelic_alert_condition that triggers if your application's error rate rises above 5% for five minutes: # Error percentage resource \"newrelic_alert_condition\" \"error_percentage\" { policy_id = newrelic_alert_policy.golden_signal_policy.id name = \"High Error Percentage\" type = \"apm_app_metric\" entities = [data.newrelic_entity.example_app.application_id] metric = \"error_percentage\" runbook_url = \"https://www.example.com\" condition_scope = \"application\" # Define a critical alert threshold that will trigger after 5 minutes above a 5% error rate. term { duration = 5 operator = \"above\" threshold = \"5\" time_function = \"all\" } } Copy Saturation Saturation represents how \"full\" your service is and can take many forms, such as CPU time, memory allocation, or queue depth. In this example, assume you already have a New Relic Infrastructure agent installed on the hosts serving your application, and you want to configure an alert for when CPU utilization spikes above a certain threshold: # High CPU usage resource \"newrelic_infra_alert_condition\" \"high_cpu\" { policy_id = newrelic_alert_policy.golden_signal_policy.id name = \"High CPU usage\" type = \"infra_metric\" event = \"SystemSample\" select = \"cpuPercent\" comparison = \"above\" runbook_url = \"https://www.example.com\" where = \"(`applicationId` = '${data.newrelic_entity.example_app.application_id}')\" # Define a critical alert threshold that will trigger after 5 minutes above 90% CPU utilization. critical { duration = 5 value = 90 time_function = \"all\" } } Copy For the Infrastructure alert, you created a newrelic_infra_alert_condition that triggers if the aggregate CPU usage on these hosts rises above 90% for five minutes. Step 5 of 5 Get notified when an alert triggers Now that you've configured some important alert conditions, add a notification channel to your alert policy to ensure the proper folks get notified when an alert triggers. To do so, use a newrelic_alert_channel. To begin, create an email notification channel to send alert notifications to your email. Use this when you want to notify a specific person or team when alerts are triggered: resource \"newrelic_alert_channel\" \"team_email\" { name = \"example\" type = \"email\" config { recipients = \"yourawesometeam@example.com\" include_json_attachment = \"1\" } } Copy If you want to specify multiple recipients, use a comma-delimited list of emails. Last, but not least, in order to apply the notification channel to your alert policy, create a newrelic_alert_policy_channel: resource \"newrelic_alert_policy_channel\" \"golden_signals\" { policy_id = newrelic_alert_policy.golden_signal_policy.id channel_ids = [newrelic_alert_channel.team_email.id] } Copy A newrelic_alert_policy_channel links the notification channel you just created to your alert policy. To finalize your golden signal alerts configuration, run terraform apply one last time to make sure all of your configured resources are up to date. Extra Credit new_relic_alert_channel supports several types of notification channels, including email, Slack, and PagerDuty. So, if you want to explore this more, try creating an alert channel for a second channel type, such as Slack: # Slack notification channel resource \"newrelic_alert_channel\" \"slack_notification\" { name = \"slack-example\" type = \"slack\" config { # Use the URL provided in your New Relic Slack integration url = \"https://hooks.slack.com/services/XXXXXXX/XXXXXXX/XXXXXXXXXX\" channel = \"your-slack-channel-for-alerts\" } } Copy Before you apply this change, you need to add the New Relic Slack App to your Slack account and select a Slack channel to send the notification. With this new alert channel, triggered alerts send notifications to the Slack channel of your choice. Conclusion As your team evaluates the alerting system you’ve put in place, you’ll find that you may need to tweak configuration values, such as the alert threshold and duration. If you manage your Terraform project in a remote repository, you can submit a pull request so your team can review these changes alongside the rest of your code contributions. Tip You may also want to consider automating this process in your CI/CD pipeline. Use Terraform's recommended practices guide to learn more about their recommended workflow and how to evolve your provisioning practices. Congratulations! You're officially practicing observability-as-code. Review the New Relic Terraform provider documentation to learn how you can take your configuration to the next level.", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 225.34741, + "_score": 219.73543, "_version": null, "_explanation": null, "sort": null, @@ -53414,7 +53360,7 @@ "external_id": "76368b6d7b42905b1effbe6f6d7a328160d5a967", "image": "", "url": "https://developer.newrelic.com/terraform/get-started-terraform/", - "published_at": "2022-02-15T01:40:34Z", + "published_at": "2022-02-16T01:42:41Z", "updated_at": "2021-02-06T01:55:16Z", "document_type": "page", "popularity": 1, @@ -53422,7 +53368,7 @@ "body": "Terraform is a popular infrastructure-as-code software tool built by HashiCorp. You use it to provision all kinds of infrastructure and services, including New Relic dashboards and alerts. In this guide, you learn how to set up New Relic alerts with Terraform. More specifically, you provision an alert policy, four alert conditions, and a notification channel. The four alert conditions are based on the four golden signals of monitoring introduced in Google’s Site Reliability Engineering book: Latency: The amount of time it takes your application to service a request. Traffic: The amount of requests your system receives. Errors: The rate of requests that fail. Saturation: The stress on resources to meet the demands of your application. Before you begin To use this guide, you should have some basic knowledge of both New Relic and Terraform. If you haven't deployed a New Relic open source agent yet, install New Relic for your application. Also, install the Terraform CLI. Step 1 of 5 Bootstrap Terraform and the New Relic provider Start by initializing a working directory and creating a Terraform configuration file: bash Copy $ mkdir terraform-project && cd terraform-project $ touch main.tf Next, instruct Terraform to install and use the New Relic provider, by setting the terraform and required_providers blocks in main.tf: terraform { # Require Terraform version 0.13.x (recommended) required_version = \"~> 0.13.0\" # Require the latest 2.x version of the New Relic provider required_providers { newrelic = { source = \"newrelic/newrelic\" version = \"~> 2.12\" } } } Copy In this code block, you're setting the required version of Terraform to 0.13.x and setting the New Relic provider to the latest 2.x version. Using the right version constraints for your setup will provide better stability with your Terraform runs. Now that you've set your Terraform and New Relic provider versions, you need to configure the New Relic provider. Step 2 of 5 Configure the New Relic provider With terraform all set, configure the New Relic provider with the following items: Your New Relic Account ID. Your New Relic user key. Most user keys begin with the prefix NRAK-. Your New Relic region. Your region is US if your account settings page is located at one.newrelic.com, and EU if your account is located at one.eu.newrelic.com. In main.tf, set those values on the provider: provider \"newrelic\" { account_id = 12345 # Your New Relic account ID api_key = \"NRAK-***\" # Your New Relic user key region = \"US\" # US or EU (defaults to US) } Copy By setting these values on the New Relic provider, you're configuring that provider to make changes on behalf of your account through New Relic APIs. Tip You can also configure the New Relic provider using environment variables. This is a useful way to set default values for your provider configuration. For more information about configuring the New Relic provider, please feel free to check out our official provider documentation. With your New Relic provider configured, initialize Terraform: bash Copy $ terraform init When Terraform finishes installing and registering the New Relic provider, you'll receive a success message and some actionable next steps, such as running terraform plan. Before you can run terraform plan, however, you need to create your resources. Step 3 of 5 Create a New Relic alert policy with the golden signal alerts With the New Relic provider configured and initialized, you can define an alerting strategy for your application. Since you're targeting a specific application, use a newrelic_entity to fetch the application information from New Relic and allow us to reference that data elsewhere in the configuration: data \"newrelic_entity\" \"example_app\" { name = \"Your App Name\" # Must be an exact match to your application name in New Relic domain = \"APM\" # or BROWSER, INFRA, MOBILE, SYNTH, depending on your entity's domain type = \"APPLICATION\" } Copy Next, create a newrelic_alert_policy. Give the policy a dynamic name based on your application's name. This helps specify the scope of the policy: resource \"newrelic_alert_policy\" \"golden_signal_policy\" { name = \"Golden Signals - ${data.newrelic_entity.example_app.name}\" } Copy At this point, you should be able to test your configuration with a dry run: bash Copy $ terraform plan You should see output that displays Terraform's execution plan. The plan contains the actions Terraform performs when your run terraform apply: bash Copy # Example output ------------------------------------------------------------------------ An execution plan has been generated and is shown below. Resource actions are indicated with the following symbols: + create Terraform will perform the following actions: # newrelic_alert_policy.golden_signal_policy will be created + resource \"newrelic_alert_policy\" \"golden_signal_policy\" { + account_id = (known after apply) + id = (known after apply) + incident_preference = \"PER_POLICY\" + name = \"Golden Signals - Your App Name\" } Plan: 1 to add, 0 to change, 0 to destroy. ------------------------------------------------------------------------ In this case, the plan shows you that Terraform will create a new alert policy when you run terraform apply. After verifying the details, execute the plan to provision the alert policy resource in your New Relic account: bash Copy $ terraform apply Every time you apply changes, Terraform asks you to confirm the actions you've told it to run. Type \"yes\". While it's running, Terraform sends logs to your console: bash Copy # Example output of `terraform apply` newrelic_alert_policy.golden_signal_policy: Creating... newrelic_alert_policy.golden_signal_policy: Creation complete after 1s [id=111222333] Apply complete! Resources: 1 added, 0 changed, 0 destroyed. Log in to New Relic and navigate to Alert Policies to confirm that Terraform created your new policy. As you move through the next steps of creating alert conditions, you can run terraform apply after configuring each resource. Refresh your alert policy webpage to see the new resources. Step 4 of 5 Provision alert conditions based on the four golden signals Next, add alert conditions for your application based on the four golden signals: latency, traffic, errors, and saturation. Apply these alert conditions to the alert policy you created in the previous step. Latency Most folks want to avoid slow response times. You can create a newrelic_alert_condition that triggers if the overall response time of your application rises above five seconds for five minutes: # Response time resource \"newrelic_alert_condition\" \"response_time_web\" { policy_id = newrelic_alert_policy.golden_signal_policy.id name = \"High Response Time (Web) - ${data.newrelic_entity.example_app.name}\" type = \"apm_app_metric\" entities = [data.newrelic_entity.example_app.application_id] metric = \"response_time_web\" runbook_url = \"https://www.example.com\" condition_scope = \"application\" term { duration = 5 operator = \"above\" priority = \"critical\" threshold = \"5\" time_function = \"all\" } } Copy Note that you're linking this alert condition to the previously configured alert policy with policy_id. Traffic Traffic represents how much demand is placed on your system at any given moment. Throughput is a metric that measures how much traffic goes to your application. Create a newrelic_alert_condition that triggers if the overall response rate of your application falls below five requests per minute for five minutes: # Low throughput resource \"newrelic_alert_condition\" \"throughput_web\" { policy_id = newrelic_alert_policy.golden_signal_policy.id name = \"Low Throughput (Web)\" type = \"apm_app_metric\" entities = [data.newrelic_entity.example_app.application_id] metric = \"throughput_web\" condition_scope = \"application\" # Define a critical alert threshold that will # trigger after 5 minutes below 5 requests per minute. term { priority = \"critical\" duration = 5 operator = \"below\" threshold = \"5\" time_function = \"all\" } } Copy This type of alert is useful when you expect a constant baseline of traffic throughout the day — a drop off in traffic can indicate a problem. Errors If your application's error rate spikes, you need to know about it. Create a newrelic_alert_condition that triggers if your application's error rate rises above 5% for five minutes: # Error percentage resource \"newrelic_alert_condition\" \"error_percentage\" { policy_id = newrelic_alert_policy.golden_signal_policy.id name = \"High Error Percentage\" type = \"apm_app_metric\" entities = [data.newrelic_entity.example_app.application_id] metric = \"error_percentage\" runbook_url = \"https://www.example.com\" condition_scope = \"application\" # Define a critical alert threshold that will trigger after 5 minutes above a 5% error rate. term { duration = 5 operator = \"above\" threshold = \"5\" time_function = \"all\" } } Copy Saturation Saturation represents how \"full\" your service is and can take many forms, such as CPU time, memory allocation, or queue depth. In this example, assume you already have a New Relic Infrastructure agent installed on the hosts serving your application, and you want to configure an alert for when CPU utilization spikes above a certain threshold: # High CPU usage resource \"newrelic_infra_alert_condition\" \"high_cpu\" { policy_id = newrelic_alert_policy.golden_signal_policy.id name = \"High CPU usage\" type = \"infra_metric\" event = \"SystemSample\" select = \"cpuPercent\" comparison = \"above\" runbook_url = \"https://www.example.com\" where = \"(`applicationId` = '${data.newrelic_entity.example_app.application_id}')\" # Define a critical alert threshold that will trigger after 5 minutes above 90% CPU utilization. critical { duration = 5 value = 90 time_function = \"all\" } } Copy For the Infrastructure alert, you created a newrelic_infra_alert_condition that triggers if the aggregate CPU usage on these hosts rises above 90% for five minutes. Step 5 of 5 Get notified when an alert triggers Now that you've configured some important alert conditions, add a notification channel to your alert policy to ensure the proper folks get notified when an alert triggers. To do so, use a newrelic_alert_channel. To begin, create an email notification channel to send alert notifications to your email. Use this when you want to notify a specific person or team when alerts are triggered: resource \"newrelic_alert_channel\" \"team_email\" { name = \"example\" type = \"email\" config { recipients = \"yourawesometeam@example.com\" include_json_attachment = \"1\" } } Copy If you want to specify multiple recipients, use a comma-delimited list of emails. Last, but not least, in order to apply the notification channel to your alert policy, create a newrelic_alert_policy_channel: resource \"newrelic_alert_policy_channel\" \"golden_signals\" { policy_id = newrelic_alert_policy.golden_signal_policy.id channel_ids = [newrelic_alert_channel.team_email.id] } Copy A newrelic_alert_policy_channel links the notification channel you just created to your alert policy. To finalize your golden signal alerts configuration, run terraform apply one last time to make sure all of your configured resources are up to date. Extra Credit new_relic_alert_channel supports several types of notification channels, including email, Slack, and PagerDuty. So, if you want to explore this more, try creating an alert channel for a second channel type, such as Slack: # Slack notification channel resource \"newrelic_alert_channel\" \"slack_notification\" { name = \"slack-example\" type = \"slack\" config { # Use the URL provided in your New Relic Slack integration url = \"https://hooks.slack.com/services/XXXXXXX/XXXXXXX/XXXXXXXXXX\" channel = \"your-slack-channel-for-alerts\" } } Copy Before you apply this change, you need to add the New Relic Slack App to your Slack account and select a Slack channel to send the notification. With this new alert channel, triggered alerts send notifications to the Slack channel of your choice. Conclusion As your team evaluates the alerting system you’ve put in place, you’ll find that you may need to tweak configuration values, such as the alert threshold and duration. If you manage your Terraform project in a remote repository, you can submit a pull request so your team can review these changes alongside the rest of your code contributions. Tip You may also want to consider automating this process in your CI/CD pipeline. Use Terraform's recommended practices guide to learn more about their recommended workflow and how to evolve your provisioning practices. Congratulations! You're officially practicing observability-as-code. Review the New Relic Terraform provider documentation to learn how you can take your configuration to the next level.", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 224.79312, + "_score": 219.19783, "_version": null, "_explanation": null, "sort": null, @@ -53450,7 +53396,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 209.0588, + "_score": 207.38963, "_version": null, "_explanation": null, "sort": null, @@ -53484,7 +53430,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 178.21005, + "_score": 177.26266, "_version": null, "_explanation": null, "sort": null, @@ -53514,7 +53460,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 174.59943, + "_score": 173.36263, "_version": null, "_explanation": null, "sort": null, @@ -53554,7 +53500,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 532.0009, + "_score": 525.3327, "_version": null, "_explanation": null, "sort": null, @@ -53591,7 +53537,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 446.42773, + "_score": 440.508, "_version": null, "_explanation": null, "sort": null, @@ -53629,7 +53575,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 226.74731, + "_score": 223.68562, "_version": null, "_explanation": null, "sort": null, @@ -53665,7 +53611,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.92932, + "_score": 188.04886, "_version": null, "_explanation": null, "sort": null, @@ -53700,7 +53646,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 132.5083, + "_score": 125.69632, "_version": null, "_explanation": null, "sort": null, @@ -53736,7 +53682,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 453.2217, + "_score": 448.534, "_version": null, "_explanation": null, "sort": null, @@ -53771,7 +53717,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 381.1413, + "_score": 376.22098, "_version": null, "_explanation": null, "sort": null, @@ -53809,7 +53755,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 335.4368, + "_score": 330.31564, "_version": null, "_explanation": null, "sort": null, @@ -53850,7 +53796,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 323.09326, + "_score": 319.63602, "_version": null, "_explanation": null, "sort": null, @@ -53886,7 +53832,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 302.724, + "_score": 298.96582, "_version": null, "_explanation": null, "sort": null, @@ -53899,54 +53845,6 @@ } ], "/port-monitoring/650d963a-8568-49a8-81d4-fc226f12ec10": [ - { - "sections": [ - "Alerts for infrastructure: Add, edit, or view host alert information", - "Create alert conditions for infrastructure", - "Important", - "Other infrastructure alert condition methods", - "Use the Alerts UI", - "Use the Infrastructure UI", - "Use infrastructure settings for integrations", - "Tip", - "View host alert events", - "Update or delete host alert information", - "Use New Relic Alerts to monitor your entire infrastructure", - "Add a description", - "Add or edit a runbook URL", - "Violation time limit for violations", - "Alert conditions that generate too-long NRQL queries" - ], - "title": "Alerts for infrastructure: Add, edit, or view host alert information", - "type": "docs", - "tags": [ - "Infrastructure alert conditions", - "Infrastructure alerts", - "Infrastructure" - ], - "external_id": "00207a1020aa29ea6d5d5bbb8e806a50a5966f80", - "image": "", - "url": "https://docs.newrelic.com/docs/infrastructure/new-relic-infrastructure/infrastructure-alert-conditions/infrastructure-alerts-add-edit-or-view-host-alert-information/", - "published_at": "2022-02-14T10:16:12Z", - "updated_at": "2022-02-14T10:16:12Z", - "document_type": "page", - "popularity": 1, - "body": "With New Relic's infrastructure monitoring, you can create alert conditions directly within the context of what you're currently monitoring with New Relic. For example, if you're monitoring a filter set and notice a problem, you can create an alert directly, and you don't need to recreate it from Alerts. To create the alert, select your filter set immediately, and tailor the alert condition directly from the chart you're viewing. This helps you proactively manage and monitor the alerting system for your environment. Any alert violations will be created per entity within the filter set. Want to try out alert conditions with our infrastructure agent? Create a New Relic account for free! No credit card required. Create alert conditions for infrastructure Alert conditions apply to alert policies. You can select an existing policy or create a new policy with email notifications from the Infrastructure monitoring UI. If you want to use other types of notification channels, create a new policy from within the Alerts UI. Important The Infrastructure REST API has a limit of 3,700 alert conditions, including both active and disabled conditions. The API, whether used directly or via the UI, will reject all requests to add any additional alert conditions beyond the 3,700 alert condition limit. To add an infrastructure alert condition to an alerts policy: Go to one.newrelic.com > Infrastructure, then select any of these Infrastructure monitoring pages: Hosts, Processes, Network, or Storage. Mouse over the chart you want to alert on, select the ellipses icon, and then select Create alert. Type a meaningful condition name. Select the Alert type, or refer to the examples to decide which type to select. Create individual filters, or copy all the filters from a filter set to identify the hosts that you want the alert condition to use. Important For more information about the rules behind filters, see Filter set logic. Define the Critical (required) and Warning (optional, if available) thresholds for triggering the alert notification. Optional: To create the condition criteria proactively but not receive alert notifications at this time, turn off the Enabled checkbox option. Select an existing policy for the new condition. OR Select the option to create a new policy and identify the email for alert notifications. Optional: Add a runbook url. Optional: Set Violation time limit for violations (this defaults to 24 hours). Select Create. Important If New Relic hasn't received a cloud integration service's attribute in the past 60 minutes, we refer to this as a \"silent attribute,\" and it won't be available to use as an alert condition in the UI. In this situation, you can use the API to create alert conditions for silent attributes. Other infrastructure alert condition methods You can also use these other methods to create an infrastructure alert condition: Use the Alerts UI Go to one.newrelic.com > Alerts & AI > Alerts > Alert policies > New alert policy > Create new condition, then select Infrastructure as the product. Use the Infrastructure UI Go to one.newrelic.com > Infrastructure. Select any of these Infrastructure monitoring pages: Hosts, Processes, Network, or Storage. Mouse over the chart you want to alert on, select the ellipses icon, and then select Create alert. Use infrastructure settings for integrations Tip Use this method to create an alert condition for infrastructure integrations. Go to one.newrelic.com > Infrastructure > Settings > Alerts, and then click Create alert condition. Name and describe the alert condition. Click the Integrations alert type, and then select the integration data source you'd like to use. Use the Filter entities dropdown to limit your condition to specific attributes. Use the Define thresholds dropdowns to define your condition's thresholds, and then click Create. The configuration settings are optional. You can always update them later. View host alert events Anyone included in the policy's notification channels receive alert notifications directly. In addition, anyone with permissions for your New Relic account can view Infrastructure alert incidents and individual violations through the user interface. Go to one.newrelic.com > Infrastructure > Events. To change the hosts or time frame, use the search window, Filter set, or Time functions. From the Events list, select the alert violation. To view detailed information in Alerts about the selected violation, select the link. Update or delete host alert information To edit, disable (or re-enable), or delete host alert information: Go to one.newrelic.com > Infrastructure > Settings > Alerts. Optional: Use the search window or Select all checkbox to locate one or more alert conditions. Select any of the available functions to edit, disable, enable, or delete the selected conditions. Use New Relic Alerts to monitor your entire infrastructure New Relic Alerts provides a single, coordinated alerting tool across all of your New Relic products. This allows you to manage alert policies and conditions that focus on the metrics for entities that you care about the most, such as Docker containers, JVMs, and more. Alert features Features in Infrastructure Alert conditions Create: Use the Infrastructure UI. View, change, disable (or re-enable), or delete: Use the Infrastructure Settings > Alerts UI. Information on alerts View summary information about events: Use the Infrastructure Events UI. View detailed information about alert incidents or individual violations: Use the Alerts UI or the notification channel integrated with the associated policy. Alert policies View, add, change, disable, or delete: For policies with a variety of notification channels: Use the Alerts UI. For policies only needing email notifications: Go to one.newrelic.com > Infrastructure > Settings > Alerts > Create a new policy, and add one or more email addresses as needed. Add host conditions to an existing policy: Use the Infrastructure UI. Notification channels To view, add, change, or delete available notification options: Go to one.newrelic.com > Infrastructure > Settings > Alerts. Optional: Search for the condition or policy name. From the list of conditions, select the policy link to view notification channel information in the Alerts UI. Add a description The use of the Description field is available for these alert condition types: NRQL conditions: add a description using the NerdGraph API. Infrastructure conditions: add a description using the UI or the REST API. The text you place in an alert condition's Description field is passed downstream to associated violations and notifications. A description can be used for several purposes, including: Capturing the reason for the alert condition. Defining the signal being monitored. Defining next steps. Add metadata to downstream systems. You can use template substitution to insert values from the attributes in the associated violation event. The template format is {{attributeName}}. For the attributes you can use when creating a description, see Violation event attributes. One available attribute is the special {{tag.*}} attribute. This attribute prefix is used to access any of the tag values that are included with the target signal, or any of the entity tags that are associated with the target signal. If there are entity tags associated with your violation, then they can be accessed using the entity tag name. An example of this would be {{tag.aws.awsRegion}}. When entity tags are available to use, you see them included with the violation, and displayed when you view the violations in an incident. This field has a maximum character size of 4,000. Add or edit a runbook URL The alert condition creation process includes an option for setting a URL for runbook instructions. This lets you link to information or standard procedures for handling a violation. Before adding or updating the link, make sure you use a valid URL. To add, update, or delete an alert condition's runbook URL: Select an alert condition, and make changes to the Runbook URL link. Save the condition. In order to be saved, the URL must be a valid URL. Violation time limit for violations The violation time limit allows you to define a time period after which violations will be force-closed. By default, violation time limit is 24 hours. To add or update an alert condition's violation time limit: Select an alert condition, and make changes to the violation time limit. Save the condition. Alert conditions that generate too-long NRQL queries Alert conditions created for infrastructure rely on behind-the-scenes NRQL queries, and NRQL queries have a 4096-character limit. This means that if your condition generates a very complex NRQL query that filters on many elements (for example, including many hosts or many tags), it will exceed this limit and display an error message saying that the condition failed. To solve this problem, reduce the number of elements you are using in your alert condition. For example: Problem Solution Hosts If you entered a large number of hosts that caused the condition to fail, reduce the number of hosts. Use substrings to target hosts. For example, instead of targeting prod-host-01, prod-host-02, and prod-host-03, just target all hosts with prod-host-0 in the name. Entities Edit your alert condition to target specific attributes that apply to the entities you're trying to target. Create custom attributes for the entities you want to target, and use those attributes in your alert condition. For more information, see Best practices for filtering in infrastructure alerts in New Relic's Explorers Hub.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 85.161606, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "title": "Alerts for infrastructure: Add, edit, or view host alert information", - "sections": "Alerts for infrastructure: Add, edit, or view host alert information", - "tags": "Infrastructure alert conditions", - "body": ", will reject all requests to add any additional alert conditions beyond the 3,700 alert condition limit. To add an infrastructure alert condition to an alerts policy: Go to one.newrelic.com > Infrastructure, then select any of these Infrastructure monitoring pages: Hosts, Processes, Network" - }, - "id": "6043fa3428ccbc401d2c60b9" - }, { "sections": [ "Default infrastructure monitoring data", @@ -53985,7 +53883,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 85.0192, + "_score": 83.5758, "_version": null, "_explanation": null, "sort": null, @@ -54034,7 +53932,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 84.977234, + "_score": 80.72998, "_version": null, "_explanation": null, "sort": null, @@ -54044,6 +53942,54 @@ }, "id": "61b9389528ccbcb4d396ee5e" }, + { + "sections": [ + "Alerts for infrastructure: Add, edit, or view host alert information", + "Create alert conditions for infrastructure", + "Important", + "Other infrastructure alert condition methods", + "Use the Alerts UI", + "Use the Infrastructure UI", + "Use infrastructure settings for integrations", + "Tip", + "View host alert events", + "Update or delete host alert information", + "Use New Relic Alerts to monitor your entire infrastructure", + "Add a description", + "Add or edit a runbook URL", + "Violation time limit for violations", + "Alert conditions that generate too-long NRQL queries" + ], + "title": "Alerts for infrastructure: Add, edit, or view host alert information", + "type": "docs", + "tags": [ + "Infrastructure alert conditions", + "Infrastructure alerts", + "Infrastructure" + ], + "external_id": "00207a1020aa29ea6d5d5bbb8e806a50a5966f80", + "image": "", + "url": "https://docs.newrelic.com/docs/infrastructure/new-relic-infrastructure/infrastructure-alert-conditions/infrastructure-alerts-add-edit-or-view-host-alert-information/", + "published_at": "2022-02-14T10:16:12Z", + "updated_at": "2022-02-14T10:16:12Z", + "document_type": "page", + "popularity": 1, + "body": "With New Relic's infrastructure monitoring, you can create alert conditions directly within the context of what you're currently monitoring with New Relic. For example, if you're monitoring a filter set and notice a problem, you can create an alert directly, and you don't need to recreate it from Alerts. To create the alert, select your filter set immediately, and tailor the alert condition directly from the chart you're viewing. This helps you proactively manage and monitor the alerting system for your environment. Any alert violations will be created per entity within the filter set. Want to try out alert conditions with our infrastructure agent? Create a New Relic account for free! No credit card required. Create alert conditions for infrastructure Alert conditions apply to alert policies. You can select an existing policy or create a new policy with email notifications from the Infrastructure monitoring UI. If you want to use other types of notification channels, create a new policy from within the Alerts UI. Important The Infrastructure REST API has a limit of 3,700 alert conditions, including both active and disabled conditions. The API, whether used directly or via the UI, will reject all requests to add any additional alert conditions beyond the 3,700 alert condition limit. To add an infrastructure alert condition to an alerts policy: Go to one.newrelic.com > Infrastructure, then select any of these Infrastructure monitoring pages: Hosts, Processes, Network, or Storage. Mouse over the chart you want to alert on, select the ellipses icon, and then select Create alert. Type a meaningful condition name. Select the Alert type, or refer to the examples to decide which type to select. Create individual filters, or copy all the filters from a filter set to identify the hosts that you want the alert condition to use. Important For more information about the rules behind filters, see Filter set logic. Define the Critical (required) and Warning (optional, if available) thresholds for triggering the alert notification. Optional: To create the condition criteria proactively but not receive alert notifications at this time, turn off the Enabled checkbox option. Select an existing policy for the new condition. OR Select the option to create a new policy and identify the email for alert notifications. Optional: Add a runbook url. Optional: Set Violation time limit for violations (this defaults to 24 hours). Select Create. Important If New Relic hasn't received a cloud integration service's attribute in the past 60 minutes, we refer to this as a \"silent attribute,\" and it won't be available to use as an alert condition in the UI. In this situation, you can use the API to create alert conditions for silent attributes. Other infrastructure alert condition methods You can also use these other methods to create an infrastructure alert condition: Use the Alerts UI Go to one.newrelic.com > Alerts & AI > Alerts > Alert policies > New alert policy > Create new condition, then select Infrastructure as the product. Use the Infrastructure UI Go to one.newrelic.com > Infrastructure. Select any of these Infrastructure monitoring pages: Hosts, Processes, Network, or Storage. Mouse over the chart you want to alert on, select the ellipses icon, and then select Create alert. Use infrastructure settings for integrations Tip Use this method to create an alert condition for infrastructure integrations. Go to one.newrelic.com > Infrastructure > Settings > Alerts, and then click Create alert condition. Name and describe the alert condition. Click the Integrations alert type, and then select the integration data source you'd like to use. Use the Filter entities dropdown to limit your condition to specific attributes. Use the Define thresholds dropdowns to define your condition's thresholds, and then click Create. The configuration settings are optional. You can always update them later. View host alert events Anyone included in the policy's notification channels receive alert notifications directly. In addition, anyone with permissions for your New Relic account can view Infrastructure alert incidents and individual violations through the user interface. Go to one.newrelic.com > Infrastructure > Events. To change the hosts or time frame, use the search window, Filter set, or Time functions. From the Events list, select the alert violation. To view detailed information in Alerts about the selected violation, select the link. Update or delete host alert information To edit, disable (or re-enable), or delete host alert information: Go to one.newrelic.com > Infrastructure > Settings > Alerts. Optional: Use the search window or Select all checkbox to locate one or more alert conditions. Select any of the available functions to edit, disable, enable, or delete the selected conditions. Use New Relic Alerts to monitor your entire infrastructure New Relic Alerts provides a single, coordinated alerting tool across all of your New Relic products. This allows you to manage alert policies and conditions that focus on the metrics for entities that you care about the most, such as Docker containers, JVMs, and more. Alert features Features in Infrastructure Alert conditions Create: Use the Infrastructure UI. View, change, disable (or re-enable), or delete: Use the Infrastructure Settings > Alerts UI. Information on alerts View summary information about events: Use the Infrastructure Events UI. View detailed information about alert incidents or individual violations: Use the Alerts UI or the notification channel integrated with the associated policy. Alert policies View, add, change, disable, or delete: For policies with a variety of notification channels: Use the Alerts UI. For policies only needing email notifications: Go to one.newrelic.com > Infrastructure > Settings > Alerts > Create a new policy, and add one or more email addresses as needed. Add host conditions to an existing policy: Use the Infrastructure UI. Notification channels To view, add, change, or delete available notification options: Go to one.newrelic.com > Infrastructure > Settings > Alerts. Optional: Search for the condition or policy name. From the list of conditions, select the policy link to view notification channel information in the Alerts UI. Add a description The use of the Description field is available for these alert condition types: NRQL conditions: add a description using the NerdGraph API. Infrastructure conditions: add a description using the UI or the REST API. The text you place in an alert condition's Description field is passed downstream to associated violations and notifications. A description can be used for several purposes, including: Capturing the reason for the alert condition. Defining the signal being monitored. Defining next steps. Add metadata to downstream systems. You can use template substitution to insert values from the attributes in the associated violation event. The template format is {{attributeName}}. For the attributes you can use when creating a description, see Violation event attributes. One available attribute is the special {{tag.*}} attribute. This attribute prefix is used to access any of the tag values that are included with the target signal, or any of the entity tags that are associated with the target signal. If there are entity tags associated with your violation, then they can be accessed using the entity tag name. An example of this would be {{tag.aws.awsRegion}}. When entity tags are available to use, you see them included with the violation, and displayed when you view the violations in an incident. This field has a maximum character size of 4,000. Add or edit a runbook URL The alert condition creation process includes an option for setting a URL for runbook instructions. This lets you link to information or standard procedures for handling a violation. Before adding or updating the link, make sure you use a valid URL. To add, update, or delete an alert condition's runbook URL: Select an alert condition, and make changes to the Runbook URL link. Save the condition. In order to be saved, the URL must be a valid URL. Violation time limit for violations The violation time limit allows you to define a time period after which violations will be force-closed. By default, violation time limit is 24 hours. To add or update an alert condition's violation time limit: Select an alert condition, and make changes to the violation time limit. Save the condition. Alert conditions that generate too-long NRQL queries Alert conditions created for infrastructure rely on behind-the-scenes NRQL queries, and NRQL queries have a 4096-character limit. This means that if your condition generates a very complex NRQL query that filters on many elements (for example, including many hosts or many tags), it will exceed this limit and display an error message saying that the condition failed. To solve this problem, reduce the number of elements you are using in your alert condition. For example: Problem Solution Hosts If you entered a large number of hosts that caused the condition to fail, reduce the number of hosts. Use substrings to target hosts. For example, instead of targeting prod-host-01, prod-host-02, and prod-host-03, just target all hosts with prod-host-0 in the name. Entities Edit your alert condition to target specific attributes that apply to the entities you're trying to target. Create custom attributes for the entities you want to target, and use those attributes in your alert condition. For more information, see Best practices for filtering in infrastructure alerts in New Relic's Explorers Hub.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 80.605484, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "title": "Alerts for infrastructure: Add, edit, or view host alert information", + "sections": "Alerts for infrastructure: Add, edit, or view host alert information", + "tags": "Infrastructure alert conditions", + "body": ", will reject all requests to add any additional alert conditions beyond the 3,700 alert condition limit. To add an infrastructure alert condition to an alerts policy: Go to one.newrelic.com > Infrastructure, then select any of these Infrastructure monitoring pages: Hosts, Processes, Network" + }, + "id": "6043fa3428ccbc401d2c60b9" + }, { "sections": [ "AWS VPC", @@ -54072,7 +54018,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 71.12404, + "_score": 71.1505, "_version": null, "_explanation": null, "sort": null, @@ -54110,7 +54056,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 71.08756, + "_score": 71.11451, "_version": null, "_explanation": null, "sort": null, @@ -54153,7 +54099,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.33618, + "_score": 81.56581, "_version": null, "_explanation": null, "sort": null, @@ -54195,7 +54141,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.276306, + "_score": 81.50674, "_version": null, "_explanation": null, "sort": null, @@ -54237,7 +54183,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -54279,7 +54225,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -54321,7 +54267,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -54365,7 +54311,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99103, + "_score": 95.215935, "_version": null, "_explanation": null, "sort": null, @@ -54407,7 +54353,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99097, + "_score": 95.21587, "_version": null, "_explanation": null, "sort": null, @@ -54449,7 +54395,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99097, + "_score": 95.21587, "_version": null, "_explanation": null, "sort": null, @@ -54491,7 +54437,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99081, + "_score": 95.21571, "_version": null, "_explanation": null, "sort": null, @@ -54530,7 +54476,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 93.49031, + "_score": 93.73342, "_version": null, "_explanation": null, "sort": null, @@ -54567,7 +54513,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 125.995674, + "_score": 126.224014, "_version": null, "_explanation": null, "sort": null, @@ -54609,7 +54555,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 88.49199, + "_score": 83.79138, "_version": null, "_explanation": null, "sort": null, @@ -54654,7 +54600,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 88.48265, + "_score": 83.78378, "_version": null, "_explanation": null, "sort": null, @@ -54692,7 +54638,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 63.853985, + "_score": 62.686325, "_version": null, "_explanation": null, "sort": null, @@ -54726,7 +54672,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 61.84263, + "_score": 57.944366, "_version": null, "_explanation": null, "sort": null, @@ -54739,32 +54685,6 @@ } ], "/echo/31e4cb0c-fdb0-4bdd-9631-1072be75bd8b": [ - { - "image": "https://docs.newrelic.com/static/6decabb9d8cd5dc3e18f2f647f7c7cdd/c1b63/arrow-step-diagram-trans.png", - "url": "https://docs.newrelic.com/docs/distributed-tracing/concepts/quick-start/", - "sections": [ - "Distributed tracing setup options" - ], - "published_at": "2022-02-14T03:23:44Z", - "title": "Distributed tracing setup options", - "updated_at": "2022-02-14T03:23:44Z", - "type": "docs", - "external_id": "44df1a2d07693a41fa23c9bba9473ce8ebabe47e", - "document_type": "page", - "popularity": 1, - "body": "We recommend you do an initial setup of distributed tracing and consider the advanced Infinite Tracing feature if you are not getting the data you need. Also, if you are currently using New Relic APM agents and would like to enable distributed tracing, see our planning guide. Ready to get started? If you don't already have one, sign up for a New Relic account. It's free, forever! To set up distributed tracing, you'll complete three general steps: Identify services: Identify and write down the endpoints, services, languages, and systems that are used to complete this request (you'll need this information in the next step). If you have an environment diagram like the following, you could use it to create a list of services handling requests: Instrument services: Instrument each service you identify so it can send your trace data. Some tools, such as APM agents, instrument services automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile monitoring Kamon OpenTelemetry Trace API: generic format Trace API: Zipkin format View traces: After you instrument the services, generate some traffic in your application, and then go to the New Relic UI to see your trace data.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 116.38019, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "body": " automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile" - }, - "id": "61d8b6a664441fbe9700cc16" - }, { "sections": [ "Mux", @@ -54796,7 +54716,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 112.697266, + "_score": 112.87141, "_version": null, "_explanation": null, "sort": null, @@ -54837,7 +54757,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 112.6965, + "_score": 112.87066, "_version": null, "_explanation": null, "sort": null, @@ -54847,6 +54767,32 @@ }, "id": "61566dd8e7b9d279cf8de386" }, + { + "image": "https://docs.newrelic.com/static/6decabb9d8cd5dc3e18f2f647f7c7cdd/c1b63/arrow-step-diagram-trans.png", + "url": "https://docs.newrelic.com/docs/distributed-tracing/concepts/quick-start/", + "sections": [ + "Distributed tracing setup options" + ], + "published_at": "2022-02-14T03:23:44Z", + "title": "Distributed tracing setup options", + "updated_at": "2022-02-14T03:23:44Z", + "type": "docs", + "external_id": "44df1a2d07693a41fa23c9bba9473ce8ebabe47e", + "document_type": "page", + "popularity": 1, + "body": "We recommend you do an initial setup of distributed tracing and consider the advanced Infinite Tracing feature if you are not getting the data you need. Also, if you are currently using New Relic APM agents and would like to enable distributed tracing, see our planning guide. Ready to get started? If you don't already have one, sign up for a New Relic account. It's free, forever! To set up distributed tracing, you'll complete three general steps: Identify services: Identify and write down the endpoints, services, languages, and systems that are used to complete this request (you'll need this information in the next step). If you have an environment diagram like the following, you could use it to create a list of services handling requests: Instrument services: Instrument each service you identify so it can send your trace data. Some tools, such as APM agents, instrument services automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile monitoring Kamon OpenTelemetry Trace API: generic format Trace API: Zipkin format View traces: After you instrument the services, generate some traffic in your application, and then go to the New Relic UI to see your trace data.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 110.14922, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "body": " automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile" + }, + "id": "61d8b6a664441fbe9700cc16" + }, { "sections": [ "Install the Go agent in GAE flexible environment", @@ -54878,7 +54824,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 65.33989, + "_score": 65.31719, "_version": null, "_explanation": null, "sort": null, @@ -54909,7 +54855,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 58.958164, + "_score": 58.9981, "_version": null, "_explanation": null, "sort": null, @@ -54957,7 +54903,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 834.5066, + "_score": 790.93164, "_version": null, "_explanation": null, "sort": null, @@ -55005,7 +54951,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 703.90393, + "_score": 700.6033, "_version": null, "_explanation": null, "sort": null, @@ -55059,7 +55005,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 494.56628, + "_score": 468.83264, "_version": null, "_explanation": null, "sort": null, @@ -55099,7 +55045,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 432.34802, + "_score": 431.51105, "_version": null, "_explanation": null, "sort": null, @@ -55148,7 +55094,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 357.73425, + "_score": 355.89273, "_version": null, "_explanation": null, "sort": null, @@ -55203,7 +55149,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 187.56267, + "_score": 184.03421, "_version": null, "_explanation": null, "sort": null, @@ -55234,7 +55180,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 136.27637, + "_score": 135.14978, "_version": null, "_explanation": null, "sort": null, @@ -55276,7 +55222,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 102.33976, + "_score": 102.59363, "_version": null, "_explanation": null, "sort": null, @@ -55316,7 +55262,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 101.74612, + "_score": 101.30923, "_version": null, "_explanation": null, "sort": null, @@ -55329,40 +55275,45 @@ "id": "6045248b196a67f158960f1b" }, { - "image": "https://docs.newrelic.com/static/693426d805c82f1d9155cd04b116c36e/d9199/new-relic-product-relationships.png", - "url": "https://docs.newrelic.com/docs/style-guide/capitalization/product-capability-feature-usage/", "sections": [ - "Product, capability, and feature capitalization and usage", - "Important", - "Trademark guidelines", - "When to use title case", - "Examples", - "What not to capitalize", - "What to avoid", - "Copyright and trademark notices", - "Relationships between products, features, and capabilities" + "Server-side agent configuration", + "Requirements", + "Centralization and security", + "Server-side configuration precedence", + "Configure from the UI", + "Configure from NerdGraph API", + "View or change server-side configuration settings", + "Disable server-side configuration" ], - "published_at": "2022-02-14T07:10:58Z", - "title": "Product, capability, and feature capitalization and usage", - "updated_at": "2022-02-13T14:59:44Z", + "title": "Server-side agent configuration", "type": "docs", - "external_id": "eaaecce5ea26ad32376e01d481d138326154d094", + "tags": [ + "Configuration", + "Manage APM agents", + "Agents" + ], + "external_id": "34c443ebbf3a304d6aeb3e3f012b4ddd11b06765", + "image": "", + "url": "https://docs.newrelic.com/docs/apm/agents/manage-apm-agents/configuration/server-side-agent-configuration/", + "published_at": "2022-02-15T15:45:40Z", + "updated_at": "2021-12-25T19:39:51Z", "document_type": "page", "popularity": 1, - "body": "This page is the definitive resource for capitalizing products, features, and capabilities for use by all New Relic content creators. These guidelines have been vetted by members of legal, content and product marketing, and docs, as well as leaders from around New Relic. Visit the word-nerds slack channel if you have questions about or additions to the list. In general, the only things that we use title case for are our company name, product/platform name, and a few capabilities and integrations that require it for legal reasons. The following sections also call out first and subsequent uses of terms. First use refers to the first mention in the body copy. It's okay to use the subsequent versions in space-constrained areas such as titles, headers, tables, web navigation, the UI, social media posts, and so on. Do not use acronyms specific to or coined by New Relic externally; only use industry-recognized acronyms such as APM externally. Important Find capitalization guidelines for user types and product editions. Trademark guidelines New Relic trademarks are adjectives (brand names) modifying nouns (the generic product type). Do not make New Relic or its platform, capabilities, and so on possessive using apostrophes. For example, use the “functionality of New Relic One” or “the New Relic One functionality” instead of “New Relic One's functionality.” Do not pluralize New Relic or its platform, capabilities, and so on. Do not abbreviate or combine New Relic or its platform, capabilities, and so on. Do not hyphenate New Relic or its platform, capabilities, and so on, and do not allow them to break across a page line when used in text. Avoid confusion by clearly separating and identifying New Relic trademarks from other companies' names and/or trademarks. These trademark guidelines apply to other companies' trademarked names and products too. When to use title case You must use title case for our trademarked names including our company name plus our product/platform name and a few capability and integration names. Name What it is Use this Not this New Relic* our company First use: New Relic, Inc. (corporation/entity), New Relic® (printed assets), or New Relic (digital assets) Subsequent uses: New Relic, our company, we, or it Do not use: New Relic's, new relic, New relic, NR, their New Relic One our product/ our platform First use: New Relic One (docs, UI, titles) or New Relic One observability platform (marketing content) Subsequent uses: New Relic One or New Relic One platform Note: New Relic One observability platform is recommended for marketing content where users might not be familiar with our product. Do not use: New Relic One's, New Relic one, NR1 FutureStack* { Future } Stack* our annual user group conference First use: FutureStack® or { Future } Stack® (printed assets), or FutureStack or { Future } Stack (digital assets) Subsequent uses: FutureStack Do not use: Future Stack, Futurestack, Future stack NerdGraph* our GraphQL API First use: NerdGraph® (printed assets) or NerdGraph (digital assets) Subsequent uses: NerdGraph Do not use: Nerd Graph, Nerdgraph, nerdgraph, nerd graph Nerdlet* component of New Relic One apps; a specific UI view represented by a React JavaScript package First use: Nerdlet® (printed assets) or Nerdlet (digital assets) Subsequent uses: Nerdlet Do not use: nerdlet, NerdLet Nerdpack* component of New Relic One apps; the package containing all the files needed by that app First use: Nerdpack® (printed assets) or Nerdpack (digital assets) Subsequent uses: Nerdpack Do not use: nerdpack, NerdPack, Nerd Pack, nerd pack NerdStorage* component of New Relic One apps; used to store and retrieve simple sets of data First use: NerdStorage® (printed assets) or NerdStorage (digital assets) Subsequent uses: NerdStorage Do not use: Nerdstorage, nerdstorage, Nerd Storage, Nerd storage, nerd storage New Relic CodeStream IDE extension that integrates with New Relic One New Relic CodeStream (for the New Relic integration with CodeStream) or CodeStream (for just the CodeStream app) Do not use: New Relic CodeStream's, New Relic Code Stream, Code Stream New Relic Explorer capability of New Relic One First use: New Relic Explorer Subsequent uses: New Relic Explorer Describing actions in the UI: Explorer Do not use: New Relic Explorer's, Explorer (okay when directing what to select in the UI), explorer New Relic Infinite Tracing* our fully-managed, tail-based, distributed tracing solution First use: New Relic Infinite Tracing® (printed assets) or New Relic Infinite Tracing (digital assets) Subsequent uses: Infinite Tracing Do not use: Infinite tracing, infinite tracing, New Relic Edge with Infinite Tracing New Relic Instant Observability ecosystem of quickstarts for New Relic One First use: New Relic Instant Observability or New Relic Instant Observability (I/O) Subsequent uses: Instant Observability or New Relic I/O (avoid using the acronym externally, if possible) Do not use: New Relic instant observability, instant observability, NRIO, IO, I/O New Relic Lookout capability of New Relic One First use: New Relic Lookout Subsequent uses: New Relic Lookout Describing actions in the UI: Lookout Do not use: New Relic Lookout's, Lookout (okay when directing what to select in the UI), lookout New Relic Navigator capability of New Relic One First use: New Relic Navigator Subsequent uses: New Relic Navigator Describing actions in the UI: Navigator Do not use: New Relic Navigator's, Navigator (okay when directing what to select in the UI), navigator * Trademarked Examples New Relic is a registered trademark of New Relic, Inc. It was founded in 2008. We call our employees Relics. The New Relic support team can answer all of your questions about New Relic One. They're happy to help. The New Relic One observability platform lets you ingest data from practically any source. New Relic One gives you access to our curated UI experiences like application performance monitoring, browser monitoring, mobile monitoring, and more. Optimize code performance and feature planning with access to telemetry data from production and pre-production environments directly in your IDE via the New Relic CodeStream integration. New Relic Edge with Infinite Tracing is a fully managed, cloud-based solution. Infinite Tracing can analyze 100% of your trace data and choose the most actionable data. What not to capitalize Do not capitalize our capability and feature names (what you get with our platform) unless they begin a sentence (and then only capitalize the first word) or are included in the table above. If a capability or feature name includes the name of a trademarked product, then only capitalize the trademarked name (for example, Pixie or Kubernetes). Feature and capability defined: A feature is an individual experience or element of functionality in the New Relic One platform or a New Relic One capability. A capability is a collection of features that enable a customer to achieve a use case. A capability is considered a superset of features and often tends to be an outside-in term that customers associate with an existing category such as application performance monitoring, applied intelligence, infrastructure monitoring, and log management. In other words, capabilities are the things we'd treat as SKUs if we sold them all separately. Notes about features and capabilities: These are largely internal terms used so that we can discuss New Relic and its structure more clearly. For public resources, we should attempt to avoid these terms and their distinctions and simply talk about how something works. Note that this use of “capability” is different from how we define “capability” in the user management space. View a diagram of the relationship between our product, features, and capabilities. Name What it is Use this Not this alerts capability of New Relic One; detection and notification of issues alerts Do not use: Alerts anomaly detection feature of the applied intelligence capability in New Relic One that helps distinguish between typical and atypical system performance anomaly detection Do not use: Anomaly Detection, Anomaly detection application performance monitoring capability of New Relic One; using real-time data to track the uptime and reliability of an application First use: application performance monitoring (APM) Subsequent uses: application performance monitoring, APM, or application monitoring Do not use: Application Performance Management, Application Performance Monitoring, Application Monitoring applied intelligence capability of New Relic One; our AIOps solution; machine learning engine that reduces alert noise, correlates incidents, and automatically detects anomalies applied intelligence Do not use: Applied Intelligence, Applied intelligence, AI, AIOps automap feature of New Relic One; automatically displays relationships between entities in topology view automap Do not use: auto map, Auto Map, Auto map auto-telemetry with Pixie Pixie integration with New Relic One First use: auto-telemetry with Pixie Subsequent uses: auto-telemetry with Pixie, the Pixie integration with New Relic One, our Pixie integration, or the integration with Pixie Do not use: Pixie (okay if referring to px.dev and the open-source Pixie project), Pixie's, Auto-telemetry with Pixie browser monitoring capability of New Relic One; our real-user monitoring (RUM) solution along with mobile monitoring browser monitoring Do not use: Browser Monitoring, Browser monitoring containers a method to package software for deployment containers Do not use: Containers dashboards capability of New Relic One that uses NRQL to build custom visualizations dashboards Do not use: Dashboards data explorer feature of New Relic One; navigating data in New Relic One without NRQL know-how data explorer Do not use: Data Explorer, Data explorer data ingest bringing metrics, events, logs, and traces (MELT) data into New Relic One data ingest Do not use: Data Ingest, Data ingest digital experience monitoring a combo of New Relic One front-end monitoring capabilities (browser, mobile, synthetics) First use: digital experience monitoring (DEM) Subsequent uses: digital experience monitoring or DEM Do not use: Digital Experience Monitoring, Digital experience monitoring, digital monitoring distributed tracing feature of New Relic One; a solution for observing service requests as they flow through a distributed system distributed tracing Do not use: Distributed Tracing, Distributed tracing errors inbox capability of New Relic One; our error tracking solution for triaging and resolving full-stack errors errors inbox Do not use: Errors Inbox, Errors inbox event correlation feature of the applied intelligence capability in New Relic One that automatically groups alerts to decrease noise event correlation Do not use: Event Correlation, Event correlation incident intelligence feature of the applied intelligence capability in new Relic One that correlates incidents and offers suggested responders incident intelligence Do not use: Incident Intelligence, Incident intelligence infrastructure monitoring capability of New Relic One that collects performance data on hosts and servers (IT infrastructure) to understand health First use: infrastructure monitoring Subsequent uses: infrastructure monitoring, infra monitoring, or infra (for space-constrained areas only) Do not use: Infrastructure Monitoring, Infrastructure monitoring Kubernetes cluster explorer feature of the Kubernetes monitoring capability that ​provides a multi-dimensional representation of a Kubernetes cluster and enables teams to drill down into Kubernetes data Kubernetes cluster explorer Do not use: Kubernetes Cluster Explorer, kubernetes cluster explorer Kubernetes monitoring capability of New Relic One; form of reporting that helps with proactive management of clusters Kubernetes monitoring Do not use: Kubernetes Monitoring, kubernetes monitoring microservices modern application architecture (vs. monolith) microservices Do not use: micro services, Micro Services, Microservices integrations solutions that integrate with/gather data from third parties; all our integrations can be found as quickstarts in New Relic Instant Observability integrations Do not use: Integrations log management capability of New Relic One; collecting, formatting, and analyzing log data to optimize systems First use: log management Subsequent uses: log management or logs Do not use: Log Management, Log management, Logs logs in context feature of the log management capability in New Relic One; tracing logs throughout a complex service logs in context Do not use: Logs in Context, Logs in context metrics, events, logs, and traces what you monitor for full-stack observability First use: metrics, events, logs, and traces or metrics, events, logs, and traces (MELT) Subsequent uses: metrics, events, logs, and traces or MELT Do not use: Metrics, Events, Logs, and Traces mobile monitoring capability of New Relic One; our RUM solution along with browser monitoring mobile monitoring Do not use: Mobile Monitoring, Mobile monitoring model performance monitoring capability of New Relic One; our solution for MLOps; observability for machine learning (ML) models in production model performance monitoring Do not use: Model Performance Monitoring, Model performance monitoring, ML model monitoring, ML model performance monitoring, MPM network performance monitoring capability of New Relic One; understanding how a network is experienced by users First use: network performance monitoring or network performance monitoring (NPM) Subsequent uses: network performance monitoring, NPM, or network monitoring Do not use: Network Performance Monitoring, Network performance monitoring, Network Monitoring, Network monitoring observability methodology for understanding a complex system First use: observability or observability (o11y) Subsequent uses: observability, o11y, full-stack observability, or end-to-end observability Do not use: Observability, O11y, Full-Stack Observability, Full-stack Observability, Full-stack observability query, queries, querying feature of New Relic One; NRQL- or Prom-QL-style way of asking bespoke questions of data query, queries, or querying Do not use: Query, Queries, Querying query builder feature of New Relic One; previously known as chart builder query builder Do not use: Query Builder, Query builder quickstarts feature of New Relic Instant Observability; pre-built open-source integrations that include dashboards and alerts quickstarts Do not use: quick starts, Quick Starts, QuickStarts, Quickstarts serverless monitoring capability of New Relic One for Lambda and serverless functions serverless monitoring Do not use: Serverless Monitoring, Serverless monitoring service maps feature of New Relic One; visual representation of a service service maps Do not use: Service Maps, Service maps synthetic monitoring capability of New Relic One; simulating users across geographies to identify bottlenecks or experience issues; aka synthetic tests for APIs or browsers First use: synthetic monitoring Subsequent uses: synthetic monitoring or synthetics or synthetic monitors Do not use: synthetics monitoring, Synthetic Monitoring, Synthetic monitoring If you don't see a feature or capability in one of the above tables, assume that it is not capitalized. Examples Application performance monitoring (APM) helps you instantly understand application performance, dependencies, and bottlenecks. APM gives you a complete view of your applications and operating environment. Covered entities can now send application, infrastructure, digital experience, and network monitoring data to New Relic One while maintaining HIPAA compliance. When you need to correlate log data with other telemetry data, enable logs in context in New Relic One. NRQL is a query language you can use to query the New Relic database. With a quickstart, you can quickly install dashboards, alerts, and other resources. What to avoid Avoid the use of our deprecated names such as old product SKUs. Name What it is Use this Not this Applied Intelligence formerly a separate product—now a capability of New Relic One applied intelligence Do not use: Applied Intelligence, AI, AIOps Full-Stack Observability formerly a separate product—now in lowercase, it describes an outcome of using New Relic One full-stack observability Do not use: Full-Stack Observability, Full-stack Observability, Full Stack Observability, full stack observability, FSO Telemetry Data Platform formerly a separate product—now part of New Relic One telemetry data platform (avoid this term altogether when possible) Do not use: Telemetry Data Platform, Telemetry data platform, TDP Examples Engineers can use applied intelligence to detect, diagnose, and mitigate problems more quickly and easily. A set of dashboards with data from all New Relic products gives you full-stack observability of your metrics, events, logs, and traces. Copyright and trademark notices Downloadable or printable documents that are available to the public—including customer-, partner-, and reseller-facing documents—require a copyright disclaimer in the footer for all registered and unregistered trademarks used within the document. In any instance where the registration marks are not used in downloadable/printable documents, include the following statement in the copyright area of the footer: © 2008-22 New Relic, Inc. All rights reserved. New Relic and the New Relic logo are registered trademarks of New Relic, Inc. All product and company names herein may be trademarks of their registered owners. Update the copyright year to reflect the current year. For purely internal documents, neither the copyright or the trademark notices are required because we are not publishing the documents or putting third parties on notice. Instead, add the following disclaimer to the footer: New Relic confidential; for internal use only You should also add the word “internal” to the file name. Relationships between products, features, and capabilities This is not an exhaustive diagram, but it provides a model for how our features and capabilities fit together into our product.", + "body": "Server-side configuration transitions some core settings from your language agent's configuration file to the New Relic collector. Depending on the language agent, available settings may include: Transaction tracing SQL recording and logging SQL query plans Error collection Thread profiling Distributed tracing Cross application tracing Requirements Server-side configuration is not available for our C SDK agent or our PHP agent. Centralization and security Server-side configuration provides an easy way to manage the available settings from the New Relic server side. Centralized configuration settings can also help you ensure standards compliance and security management for your settings. Any change to a monitored application automatically applies to all APM agents monitoring that application, even if they run across multiple hosts. The agent still follows the local configurations for any settings that can't be set . Server-side configuration precedence This feature provides the convenience of managing the available configuration settings directly from the New Relic side, without the need for deployments or restarts. Available options to change settings, and the order of precedence they take when you use environment variables or other configuration options, will depend on the language agent used. For more information about the hierarchy of settings, see the illustration for the specific agent: C SDK configuration: A hierarchy is not applicable, because configuration values come from API calls. Also, server-side configuration is not supported. However, you can change the app name from the UI or from the C SDK configuration settings. Go hierarchy Java hierarchy .NET hierarchy Node.js hierarchy PHP hierarchy (server-side configuration not supported) Python hierarchy Ruby hierarchy Configure from the UI The C SDK and PHP agent do not support server-side configuration. To enable server-side configuration settings for monitored apps from the UI: Go to one.newrelic.com and click APM. Click on your app. Then click Settings > Application > Server-side agent configuration. After you enable server-side configuration, you can view and change the available settings through the UI. Configure from NerdGraph API For how to enable this with NerdGraph, see the NerdGraph tutorial. View or change server-side configuration settings If you use server-side configuration, you must still include your license_key and app_name in the local config file. These settings are required for the agent to communicate with the New Relic collector. The C SDK and PHP agent do not support server-side configuration. To view or change the available server-side configuration settings through the UI for apps that use other New Relic agents: Go to one.newrelic.com and click APM. Click on your app. Then click Settings > Application > Server-side agent configuration. Once you set any of these options from the UI, they will override any conflicting options in the agent's configuration file. Disable server-side configuration Once you enable server-side configuration for an account, server-side configuration cannot be turned off without assistance from New Relic Support. This helps mitigate issues that could arise from conflicts with configuration settings if you need to transition back to a local configuration setup.", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 93.197464, + "_score": 90.28703, "_version": null, "_explanation": null, "sort": null, "highlight": { - "title": "Product, capability, and feature capitalization and usage", - "sections": "Product, capability, and feature capitalization and usage", - "body": ", and capabilities. Name What it is Use this Not this alerts capability of New Relic One; detection and notification of issues alerts Do not use: Alerts anomaly detection feature of the applied intelligence capability in New Relic One that helps distinguish between typical and atypical system performance" + "title": "Server-side agent configuration", + "sections": "Server-side agent configuration", + "tags": "Configuration", + "body": " Distributed tracing Cross application tracing Requirements Server-side configuration is not available for our C SDK agent or our PHP agent. Centralization and security Server-side configuration provides an easy way to manage the available settings from the New Relic server side. Centralized configuration" }, - "id": "61fd071728ccbc7880c0c1b5" + "id": "617e649c196a67df22f7bb1c" } ], "/cakephp/64bbbba3-10ab-4f15-8151-5079cecda894": [ @@ -55397,7 +55348,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99103, + "_score": 95.215935, "_version": null, "_explanation": null, "sort": null, @@ -55439,7 +55390,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99097, + "_score": 95.21587, "_version": null, "_explanation": null, "sort": null, @@ -55481,7 +55432,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99097, + "_score": 95.21587, "_version": null, "_explanation": null, "sort": null, @@ -55523,7 +55474,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99081, + "_score": 95.21571, "_version": null, "_explanation": null, "sort": null, @@ -55562,7 +55513,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 93.49031, + "_score": 93.73342, "_version": null, "_explanation": null, "sort": null, @@ -55606,7 +55557,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99103, + "_score": 95.215935, "_version": null, "_explanation": null, "sort": null, @@ -55648,7 +55599,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99097, + "_score": 95.21587, "_version": null, "_explanation": null, "sort": null, @@ -55690,7 +55641,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99097, + "_score": 95.21587, "_version": null, "_explanation": null, "sort": null, @@ -55732,7 +55683,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99081, + "_score": 95.21571, "_version": null, "_explanation": null, "sort": null, @@ -55771,7 +55722,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 93.49031, + "_score": 93.73342, "_version": null, "_explanation": null, "sort": null, @@ -55809,7 +55760,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 319.28305, + "_score": 296.58286, "_version": null, "_explanation": null, "sort": null, @@ -55914,7 +55865,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 314.08, + "_score": 292.2414, "_version": null, "_explanation": null, "sort": null, @@ -55943,7 +55894,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 284.55774, + "_score": 269.04272, "_version": null, "_explanation": null, "sort": null, @@ -55988,7 +55939,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 269.03418, + "_score": 253.98067, "_version": null, "_explanation": null, "sort": null, @@ -56033,7 +55984,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 262.44315, + "_score": 246.80772, "_version": null, "_explanation": null, "sort": null, @@ -56075,7 +56026,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99103, + "_score": 95.21301, "_version": null, "_explanation": null, "sort": null, @@ -56117,7 +56068,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99097, + "_score": 95.21295, "_version": null, "_explanation": null, "sort": null, @@ -56159,7 +56110,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99097, + "_score": 95.21295, "_version": null, "_explanation": null, "sort": null, @@ -56201,7 +56152,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99081, + "_score": 95.2128, "_version": null, "_explanation": null, "sort": null, @@ -56240,7 +56191,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 93.49031, + "_score": 93.73055, "_version": null, "_explanation": null, "sort": null, @@ -56284,7 +56235,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99103, + "_score": 95.21301, "_version": null, "_explanation": null, "sort": null, @@ -56326,7 +56277,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99097, + "_score": 95.21295, "_version": null, "_explanation": null, "sort": null, @@ -56368,7 +56319,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99097, + "_score": 95.21295, "_version": null, "_explanation": null, "sort": null, @@ -56410,7 +56361,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99081, + "_score": 95.2128, "_version": null, "_explanation": null, "sort": null, @@ -56449,7 +56400,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 93.49031, + "_score": 93.73055, "_version": null, "_explanation": null, "sort": null, @@ -56499,7 +56450,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 615.3303, + "_score": 580.7268, "_version": null, "_explanation": null, "sort": null, @@ -56553,7 +56504,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 523.9817, + "_score": 494.63397, "_version": null, "_explanation": null, "sort": null, @@ -56595,7 +56546,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 431.46573, + "_score": 425.97406, "_version": null, "_explanation": null, "sort": null, @@ -56639,7 +56590,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 410.41974, + "_score": 407.1903, "_version": null, "_explanation": null, "sort": null, @@ -56687,7 +56638,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 395.4641, + "_score": 392.40762, "_version": null, "_explanation": null, "sort": null, @@ -56701,66 +56652,6 @@ } ], "/apache/ad5affab-545a-4355-ad48-cfd66e2fbf00": [ - { - "sections": [ - "Apache monitoring integration", - "Compatibility and requirements", - "Quick start", - "Install and activate", - "ECS", - "Kubernetes", - "Linux", - "Windows", - "Configuration", - "Enabling your Apache server", - "Configure the integration", - "Important", - "Apache Instance Settings", - "Labels/Custom attributes", - "Example configurations", - "BASIC CONFIGURATION", - "HTTP BASIC AUTHENTICATION", - "CUSTOM APACHE BINARY LOCATION", - "METRICS ONLY WITH SELF-SIGNED CERTIFICATE", - "METRICS ONLY WITH ALTERNATIVE CERTIFICATE", - "ENVIRONMENT VARIABLES REPLACEMENT", - "MULTI-INSTANCE MONITORING", - "Find and use data", - "Metric data", - "Inventory data", - "System metadata", - "Troubleshooting", - "Problem accessing HTTPS endpoint for Apache", - "Check the source code" - ], - "title": "Apache monitoring integration", - "type": "docs", - "tags": [ - "On-host integrations list", - "On-host integrations", - "Integrations" - ], - "external_id": "cae1fcc5a402bf71ae7d304b00420a9aa9b1152d", - "image": "https://docs.newrelic.com/static/6bf45ccf002250f7ebaa69cbe3ff706c/c1b63/guided-install-cli.png", - "url": "https://docs.newrelic.com/docs/infrastructure/host-integrations/host-integrations-list/apache-monitoring-integration/", - "published_at": "2022-02-14T10:11:28Z", - "updated_at": "2022-02-14T10:11:28Z", - "document_type": "page", - "popularity": 1, - "body": "Our Apache integration sends performance metrics and inventory data from your Apache web server to the New Relic platform. You can view pre-built dashboards of your Apache metric data, create alert policies, and create your own custom queries and charts. The integration works by gathering data from Apache's status module, so that module must be enabled and configured for your Apache instance (more details in Requirements). Read on to install the integration, and to see what data we collect. Compatibility and requirements Our integration is compatible with Apache versions 2.2 or 2.4. Before installing the integration, make sure that you meet the following requirements: A New Relic account. Don't have one? Sign up for free! No credit card required. Apache status module enabled and configured for Apache instance. Apache status module endpoint (default server-status) available from the host containing the Apache integration. If Apache is not running on Kubernetes or Amazon ECS, you must have the infrastructure agent installed on a Linux OS host that's running Apache. Otherwise: If running on Kubernetes, see these requirements. If running on ECS, see these requirements. Quick start Instrument your Apache web server quickly and send your telemetry data with guided install. Our guided install creates a customized CLI command for your environment that downloads and installs the New Relic CLI and the infrastructure agent. Ready to get started? Click one of these button to try it out. Guided install EU Guided install Our guided install uses the infrastructure agent to set up the Apache integration. Not only that, it discovers other applications and log sources running in your environment and then recommends which ones you should instrument. The guided install works with most setups. But if it doesn't suit your needs, you can find other methods below to get started monitoring your Apache web server. Install and activate To install the Apache integration, follow the instructions for your environment: ECS See Monitor service running on ECS. Kubernetes See Monitor service running on Kubernetes. Linux Follow the instructions for installing an integration, using the file name nri-apache. Change directory to the integration's folder: cd /etc/newrelic-infra/integrations.d Copy Copy of the sample configuration file: sudo cp apache-config.yml.sample apache-config.yml Copy Edit the apache-config.yml file as described in the configuration settings. Restart the infrastructure agent. Windows Download the nri-apache .MSI installer image from: http://download.newrelic.com/infrastructure_agent/windows/integrations/nri-apache/nri-apache-amd64.msi To install from the Windows command prompt, run: msiexec.exe /qn /i PATH\\TO\\nri-apache-amd64.msi Copy In the Integrations directory, C:\\Program Files\\New Relic\\newrelic-infra\\integrations.d\\, create a copy of the sample configuration file by running: cp apache-config.yml.sample apache-config.yml Copy Edit the apache-config.yml configuration file using the configuration settings. Restart the infrastructure agent. Additional notes: Advanced: Integrations are also available in tarball format to allow for install outside of a package manager. On-host integrations do not automatically update. For best results, regularly update the integration package and the infrastructure agent. Configuration Enabling your Apache server To capture data from the Apache integration, you must first enable and configure the status module: Ensure the Apache status module is enabled and configured for Apache instance. Ensure the Apache status module endpoint (default server-status) is available from the host containing the Apache integration. Configure the integration There are several ways to configure the integration, depending on how it was installed: If enabled via Kubernetes: see Monitor services running on Kubernetes. If enabled via Amazon ECS: see Monitor services running on ECS. If installed on-host: edit the config in the integration's YAML config file, apache-config.yml. An integration's YAML-format configuration is where you can place required login credentials and configure how data is collected. Which options you change depend on your setup and preference. The configuration file has common settings applicable to all integrations, such as interval, timeout, inventory_source. To read all about these common settings, refer to our Configuration Format document. Important If you are still using our legacy configuration/definition files, please refer to this document for help. Specific settings related to Apache are defined using the env section of the configuration file. These settings control the connection to your Apache instance as well as other security settings and features. The list of valid settings is described in the next section of this document. Apache Instance Settings The Apache integration collects both metrics(M) and inventory(I) information. In the table, use the Applies To column for the settings available to each collection: Setting Description Default Applies to STATUS_URL The URL set up to provide the metrics using the status module. http://127.0.0.1/server-status?auto M/I BINARY_PATH Set location of the apache binary file. N/A I CA_BUNDLE_FILE Alternative Certificate Authority bundle file. N/A M CA_BUNDLE_DIR Alternative Certificate Authority bundle directory. N/A M VALIDATE_CERTS Set to false if the status URL is HTTPS with a self-signed certificate. true M REMOTE_MONITORING Enable multi-tenancy monitoring. true M/I METRICS Set to true to enable metrics-only collection. false INVENTORY Set to true to enable inventory-only collection. false The values for these settings can be defined in several ways: Adding the value directly in the config file. This is the most common way. Replacing the values from environment variables using the {{}} notation. This requires infrastructure agent v1.14.0+. Read more here or see the example below. Using secrets management. Use this to protect sensitive information, such as passwords that would be exposed in plain text on the configuration file. For more information, see Secrets management. Labels/Custom attributes You can further decorate your metrics using labels. Labels allow you to add key/value pair attributes to your metrics, which you can then use to query, filter, or group your metrics on. Our default sample config file includes examples of labels; however, as they are not mandatory, you can remove, modify, or add new ones of your choice. labels: env: production role: load_balancer Copy Example configurations BASIC CONFIGURATION This is the very basic configuration to collect metrics and inventory from your localhost: integrations: - name: nri-apache env: METRICS: \"true\" STATUS_URL: http://127.0.0.1/server-status?auto REMOTE_MONITORING: true interval: 15s labels: env: production role: load_balancer - name: nri-apache env: INVENTORY: \"true\" STATUS_URL: http://127.0.0.1/server-status?auto REMOTE_MONITORING: true interval: 60s labels: env: production role: load_balancer inventory_source: config/apache Copy HTTP BASIC AUTHENTICATION This configuration collects metrics and inventory from your localhost protected with basic authentication. Replace the username and password on the STATUS_URL with your credentials: integrations: - name: nri-apache env: METRICS: \"true\" STATUS_URL: http://username:password@127.0.0.1/server-status?auto REMOTE_MONITORING: true interval: 15s labels: env: production role: load_balancer - name: nri-apache env: INVENTORY: \"true\" STATUS_URL: http://username:password@127.0.0.1/server-status?auto REMOTE_MONITORING: true interval: 60s labels: env: production role: load_balancer inventory_source: config/apache Copy CUSTOM APACHE BINARY LOCATION By default the integration tries to detect the Apache binary on these 2 different locations: /usr/sbin/httpd and /usr/sbin/apache2ctl. If your Apache is installed on a custom location or you are using MS Windows, use the BINARY_PATH setting to specify the correct location and filename of your Apache binary: integrations: - name: nri-apache env: METRICS: \"true\" STATUS_URL: http://127.0.0.1/server-status?auto REMOTE_MONITORING: true interval: 15s labels: env: production role: load_balancer - name: nri-apache env: INVENTORY: \"true\" STATUS_URL: http://127.0.0.1/server-status?auto BINARY_PATH: 'C:\\Apache\\bin\\httpd.exe' REMOTE_MONITORING: true interval: 60s labels: env: production role: load_balancer inventory_source: config/apache Copy METRICS ONLY WITH SELF-SIGNED CERTIFICATE In this configuration we only have one integration block with METRICS: true to collect only metrics and added VALIDATE_CERTS: false to prevent validation of the server's SSL certificate when using a self-signed one: integrations: - name: nri-apache env: METRICS: \"true\" STATUS_URL: https://my_apache_host/server-status?auto VALIDATE_CERTS: false REMOTE_MONITORING: true interval: 15s labels: env: production role: load_balancer Copy METRICS ONLY WITH ALTERNATIVE CERTIFICATE In this configuration we only have one integration block with METRICS: true to collect only metrics and added CA_BUNDLE_FILE pointing to an alternative certificate file: integrations: - name: nri-apache env: METRICS: \"true\" STATUS_URL: https://my_apache_host/server-status?auto CA_BUNDLE_FILE='/etc/ssl/certs/custom-ca.crt' REMOTE_MONITORING: true interval: 15s labels: env: production role: load_balancer Copy ENVIRONMENT VARIABLES REPLACEMENT In this configuration we are using the environment variable APACHE_STATUS to populate the STATUS_URL setting of the integration: integrations: - name: nri-apache env: METRICS: \"true\" STATUS_URL: {{APACHE_STATUS}} REMOTE_MONITORING: true interval: 15s labels: env: production role: load_balancer Copy MULTI-INSTANCE MONITORING In this configuration we are monitoring multiple Apache servers from the same integration. For the first instance (STATUS_URL: https://1st_apache_host/server-status?auto) we are collecting metrics and inventory while for the second instance (STATUS_URL: https://2nd_apache_host/server-status?auto) we will only collect metrics. integrations: - name: nri-apache env: METRICS: \"true\" STATUS_URL: https://1st_apache_host/server-status?auto REMOTE_MONITORING: true interval: 15s labels: env: production role: load_balancer - name: nri-apache env: INVENTORY: \"true\" STATUS_URL: https://1st_apache_host/server-status?auto REMOTE_MONITORING: true interval: 60s labels: env: production role: load_balancer inventory_source: config/apache - name: nri-apache env: METRICS: \"true\" STATUS_URL: http://2nd_apache_host/server-status?auto REMOTE_MONITORING: true interval: 15s labels: env: production role: load_balancer Copy Find and use data Data from this service is reported to an integration dashboard. Apache data is attached to the ApacheSample event type. You can query this data for troubleshooting purposes or to create charts and dashboards. For more on how to find and use your data, see Understand integration data. Metric data The Apache integration collects the following metric data attributes. Each metric name is prefixed with a category indicator and a period, such as net. or server.. Name Description net.bytesPerSecond Rate of the number of bytes served, in bytes per second. net.requestsPerSecond Rate of the number of client requests, in requests per second. server.busyWorkers Current number of busy workers. server.idleWorkers Current number of idle workers. server.scoreboard.closingWorkers Current number of workers closing TCP connection after serving the response. server.scoreboard.dnsLookupWorkers Current number of workers performing a DNS lookup. server.scoreboard.finishingWorkers Current number of workers gracefully finishing. server.scoreboard.idleCleanupWorkers Current number of idle workers ready for cleanup. server.scoreboard.keepAliveWorkers Current number of workers maintaining a keep-alive connection. server.scoreboard.loggingWorkers Current number of workers that are logging. server.scoreboard.readingWorkers Current number of workers reading requests (headers or body). server.scoreboard.startingWorkers Current number of workers that are starting up. server.scoreboard.totalWorkers Total number of workers available. Workers that are not needed to process requests may not be started. server.scoreboard.writingWorkers Current number of workers that are writing. Inventory data Inventory data captures the version numbers from running Apache and from all loaded Apache modules, and adds those version numbers under the config/apache namespace. For more about inventory data, see Understand data. System metadata Besides the standard attributes collected by the infrastructure agent, the integration collects inventory data associated with the ApacheSample event type: Name Description software.version The version of the Apache server. Example: Apache/2.4.7 (Ubuntu). Troubleshooting Problem accessing HTTPS endpoint for Apache If you are having issues accessing the HTTPS endpoint for Apache, here are two possible solutions: Although you cannot ignore the SSL certification, you can set the config file parameters ca_bundle_file and ca_bundle_dir to point to an unsigned certificate in the Apache config file. Example: instances: - name: apache-server-metrics command: metrics arguments: status_url: http://127.0.0.1/server-status?auto ca_bundle_file: /etc/newrelic-infra/integrations.d/ssl/b2b.ca-bundle Copy An example using ca_bundle_dir: ca_bundle_dir: /etc/newrelic-infra/integrations.d/ssl Copy Alternatively, you can use HTTP instead of HTTPS. Check the source code This integration is open source software. That means you can browse its source code and send improvements, or create your own fork and build it.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 122.20282, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "sections": "Enabling your Apache server", - "body": " Instrument your Apache web server quickly and send your telemetry data with guided install. Our guided install creates a customized CLI command for your environment that downloads and installs the New Relic CLI and the infrastructure agent. Ready to get started? Click one of these button to try it out" - }, - "id": "6174ae5a64441f5baf5fc976" - }, { "sections": [ "JVMs page (Java): View app server metrics from JMX", @@ -56831,7 +56722,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 118.736374, + "_score": 116.5437, "_version": null, "_explanation": null, "sort": null, @@ -56843,6 +56734,66 @@ }, "id": "617e6166196a6781dff7e2f9" }, + { + "sections": [ + "Apache monitoring integration", + "Compatibility and requirements", + "Quick start", + "Install and activate", + "ECS", + "Kubernetes", + "Linux", + "Windows", + "Configuration", + "Enabling your Apache server", + "Configure the integration", + "Important", + "Apache Instance Settings", + "Labels/Custom attributes", + "Example configurations", + "BASIC CONFIGURATION", + "HTTP BASIC AUTHENTICATION", + "CUSTOM APACHE BINARY LOCATION", + "METRICS ONLY WITH SELF-SIGNED CERTIFICATE", + "METRICS ONLY WITH ALTERNATIVE CERTIFICATE", + "ENVIRONMENT VARIABLES REPLACEMENT", + "MULTI-INSTANCE MONITORING", + "Find and use data", + "Metric data", + "Inventory data", + "System metadata", + "Troubleshooting", + "Problem accessing HTTPS endpoint for Apache", + "Check the source code" + ], + "title": "Apache monitoring integration", + "type": "docs", + "tags": [ + "On-host integrations list", + "On-host integrations", + "Integrations" + ], + "external_id": "cae1fcc5a402bf71ae7d304b00420a9aa9b1152d", + "image": "https://docs.newrelic.com/static/6bf45ccf002250f7ebaa69cbe3ff706c/c1b63/guided-install-cli.png", + "url": "https://docs.newrelic.com/docs/infrastructure/host-integrations/host-integrations-list/apache-monitoring-integration/", + "published_at": "2022-02-14T10:11:28Z", + "updated_at": "2022-02-14T10:11:28Z", + "document_type": "page", + "popularity": 1, + "body": "Our Apache integration sends performance metrics and inventory data from your Apache web server to the New Relic platform. You can view pre-built dashboards of your Apache metric data, create alert policies, and create your own custom queries and charts. The integration works by gathering data from Apache's status module, so that module must be enabled and configured for your Apache instance (more details in Requirements). Read on to install the integration, and to see what data we collect. Compatibility and requirements Our integration is compatible with Apache versions 2.2 or 2.4. Before installing the integration, make sure that you meet the following requirements: A New Relic account. Don't have one? Sign up for free! No credit card required. Apache status module enabled and configured for Apache instance. Apache status module endpoint (default server-status) available from the host containing the Apache integration. If Apache is not running on Kubernetes or Amazon ECS, you must have the infrastructure agent installed on a Linux OS host that's running Apache. Otherwise: If running on Kubernetes, see these requirements. If running on ECS, see these requirements. Quick start Instrument your Apache web server quickly and send your telemetry data with guided install. Our guided install creates a customized CLI command for your environment that downloads and installs the New Relic CLI and the infrastructure agent. Ready to get started? Click one of these button to try it out. Guided install EU Guided install Our guided install uses the infrastructure agent to set up the Apache integration. Not only that, it discovers other applications and log sources running in your environment and then recommends which ones you should instrument. The guided install works with most setups. But if it doesn't suit your needs, you can find other methods below to get started monitoring your Apache web server. Install and activate To install the Apache integration, follow the instructions for your environment: ECS See Monitor service running on ECS. Kubernetes See Monitor service running on Kubernetes. Linux Follow the instructions for installing an integration, using the file name nri-apache. Change directory to the integration's folder: cd /etc/newrelic-infra/integrations.d Copy Copy of the sample configuration file: sudo cp apache-config.yml.sample apache-config.yml Copy Edit the apache-config.yml file as described in the configuration settings. Restart the infrastructure agent. Windows Download the nri-apache .MSI installer image from: http://download.newrelic.com/infrastructure_agent/windows/integrations/nri-apache/nri-apache-amd64.msi To install from the Windows command prompt, run: msiexec.exe /qn /i PATH\\TO\\nri-apache-amd64.msi Copy In the Integrations directory, C:\\Program Files\\New Relic\\newrelic-infra\\integrations.d\\, create a copy of the sample configuration file by running: cp apache-config.yml.sample apache-config.yml Copy Edit the apache-config.yml configuration file using the configuration settings. Restart the infrastructure agent. Additional notes: Advanced: Integrations are also available in tarball format to allow for install outside of a package manager. On-host integrations do not automatically update. For best results, regularly update the integration package and the infrastructure agent. Configuration Enabling your Apache server To capture data from the Apache integration, you must first enable and configure the status module: Ensure the Apache status module is enabled and configured for Apache instance. Ensure the Apache status module endpoint (default server-status) is available from the host containing the Apache integration. Configure the integration There are several ways to configure the integration, depending on how it was installed: If enabled via Kubernetes: see Monitor services running on Kubernetes. If enabled via Amazon ECS: see Monitor services running on ECS. If installed on-host: edit the config in the integration's YAML config file, apache-config.yml. An integration's YAML-format configuration is where you can place required login credentials and configure how data is collected. Which options you change depend on your setup and preference. The configuration file has common settings applicable to all integrations, such as interval, timeout, inventory_source. To read all about these common settings, refer to our Configuration Format document. Important If you are still using our legacy configuration/definition files, please refer to this document for help. Specific settings related to Apache are defined using the env section of the configuration file. These settings control the connection to your Apache instance as well as other security settings and features. The list of valid settings is described in the next section of this document. Apache Instance Settings The Apache integration collects both metrics(M) and inventory(I) information. In the table, use the Applies To column for the settings available to each collection: Setting Description Default Applies to STATUS_URL The URL set up to provide the metrics using the status module. http://127.0.0.1/server-status?auto M/I BINARY_PATH Set location of the apache binary file. N/A I CA_BUNDLE_FILE Alternative Certificate Authority bundle file. N/A M CA_BUNDLE_DIR Alternative Certificate Authority bundle directory. N/A M VALIDATE_CERTS Set to false if the status URL is HTTPS with a self-signed certificate. true M REMOTE_MONITORING Enable multi-tenancy monitoring. true M/I METRICS Set to true to enable metrics-only collection. false INVENTORY Set to true to enable inventory-only collection. false The values for these settings can be defined in several ways: Adding the value directly in the config file. This is the most common way. Replacing the values from environment variables using the {{}} notation. This requires infrastructure agent v1.14.0+. Read more here or see the example below. Using secrets management. Use this to protect sensitive information, such as passwords that would be exposed in plain text on the configuration file. For more information, see Secrets management. Labels/Custom attributes You can further decorate your metrics using labels. Labels allow you to add key/value pair attributes to your metrics, which you can then use to query, filter, or group your metrics on. Our default sample config file includes examples of labels; however, as they are not mandatory, you can remove, modify, or add new ones of your choice. labels: env: production role: load_balancer Copy Example configurations BASIC CONFIGURATION This is the very basic configuration to collect metrics and inventory from your localhost: integrations: - name: nri-apache env: METRICS: \"true\" STATUS_URL: http://127.0.0.1/server-status?auto REMOTE_MONITORING: true interval: 15s labels: env: production role: load_balancer - name: nri-apache env: INVENTORY: \"true\" STATUS_URL: http://127.0.0.1/server-status?auto REMOTE_MONITORING: true interval: 60s labels: env: production role: load_balancer inventory_source: config/apache Copy HTTP BASIC AUTHENTICATION This configuration collects metrics and inventory from your localhost protected with basic authentication. Replace the username and password on the STATUS_URL with your credentials: integrations: - name: nri-apache env: METRICS: \"true\" STATUS_URL: http://username:password@127.0.0.1/server-status?auto REMOTE_MONITORING: true interval: 15s labels: env: production role: load_balancer - name: nri-apache env: INVENTORY: \"true\" STATUS_URL: http://username:password@127.0.0.1/server-status?auto REMOTE_MONITORING: true interval: 60s labels: env: production role: load_balancer inventory_source: config/apache Copy CUSTOM APACHE BINARY LOCATION By default the integration tries to detect the Apache binary on these 2 different locations: /usr/sbin/httpd and /usr/sbin/apache2ctl. If your Apache is installed on a custom location or you are using MS Windows, use the BINARY_PATH setting to specify the correct location and filename of your Apache binary: integrations: - name: nri-apache env: METRICS: \"true\" STATUS_URL: http://127.0.0.1/server-status?auto REMOTE_MONITORING: true interval: 15s labels: env: production role: load_balancer - name: nri-apache env: INVENTORY: \"true\" STATUS_URL: http://127.0.0.1/server-status?auto BINARY_PATH: 'C:\\Apache\\bin\\httpd.exe' REMOTE_MONITORING: true interval: 60s labels: env: production role: load_balancer inventory_source: config/apache Copy METRICS ONLY WITH SELF-SIGNED CERTIFICATE In this configuration we only have one integration block with METRICS: true to collect only metrics and added VALIDATE_CERTS: false to prevent validation of the server's SSL certificate when using a self-signed one: integrations: - name: nri-apache env: METRICS: \"true\" STATUS_URL: https://my_apache_host/server-status?auto VALIDATE_CERTS: false REMOTE_MONITORING: true interval: 15s labels: env: production role: load_balancer Copy METRICS ONLY WITH ALTERNATIVE CERTIFICATE In this configuration we only have one integration block with METRICS: true to collect only metrics and added CA_BUNDLE_FILE pointing to an alternative certificate file: integrations: - name: nri-apache env: METRICS: \"true\" STATUS_URL: https://my_apache_host/server-status?auto CA_BUNDLE_FILE='/etc/ssl/certs/custom-ca.crt' REMOTE_MONITORING: true interval: 15s labels: env: production role: load_balancer Copy ENVIRONMENT VARIABLES REPLACEMENT In this configuration we are using the environment variable APACHE_STATUS to populate the STATUS_URL setting of the integration: integrations: - name: nri-apache env: METRICS: \"true\" STATUS_URL: {{APACHE_STATUS}} REMOTE_MONITORING: true interval: 15s labels: env: production role: load_balancer Copy MULTI-INSTANCE MONITORING In this configuration we are monitoring multiple Apache servers from the same integration. For the first instance (STATUS_URL: https://1st_apache_host/server-status?auto) we are collecting metrics and inventory while for the second instance (STATUS_URL: https://2nd_apache_host/server-status?auto) we will only collect metrics. integrations: - name: nri-apache env: METRICS: \"true\" STATUS_URL: https://1st_apache_host/server-status?auto REMOTE_MONITORING: true interval: 15s labels: env: production role: load_balancer - name: nri-apache env: INVENTORY: \"true\" STATUS_URL: https://1st_apache_host/server-status?auto REMOTE_MONITORING: true interval: 60s labels: env: production role: load_balancer inventory_source: config/apache - name: nri-apache env: METRICS: \"true\" STATUS_URL: http://2nd_apache_host/server-status?auto REMOTE_MONITORING: true interval: 15s labels: env: production role: load_balancer Copy Find and use data Data from this service is reported to an integration dashboard. Apache data is attached to the ApacheSample event type. You can query this data for troubleshooting purposes or to create charts and dashboards. For more on how to find and use your data, see Understand integration data. Metric data The Apache integration collects the following metric data attributes. Each metric name is prefixed with a category indicator and a period, such as net. or server.. Name Description net.bytesPerSecond Rate of the number of bytes served, in bytes per second. net.requestsPerSecond Rate of the number of client requests, in requests per second. server.busyWorkers Current number of busy workers. server.idleWorkers Current number of idle workers. server.scoreboard.closingWorkers Current number of workers closing TCP connection after serving the response. server.scoreboard.dnsLookupWorkers Current number of workers performing a DNS lookup. server.scoreboard.finishingWorkers Current number of workers gracefully finishing. server.scoreboard.idleCleanupWorkers Current number of idle workers ready for cleanup. server.scoreboard.keepAliveWorkers Current number of workers maintaining a keep-alive connection. server.scoreboard.loggingWorkers Current number of workers that are logging. server.scoreboard.readingWorkers Current number of workers reading requests (headers or body). server.scoreboard.startingWorkers Current number of workers that are starting up. server.scoreboard.totalWorkers Total number of workers available. Workers that are not needed to process requests may not be started. server.scoreboard.writingWorkers Current number of workers that are writing. Inventory data Inventory data captures the version numbers from running Apache and from all loaded Apache modules, and adds those version numbers under the config/apache namespace. For more about inventory data, see Understand data. System metadata Besides the standard attributes collected by the infrastructure agent, the integration collects inventory data associated with the ApacheSample event type: Name Description software.version The version of the Apache server. Example: Apache/2.4.7 (Ubuntu). Troubleshooting Problem accessing HTTPS endpoint for Apache If you are having issues accessing the HTTPS endpoint for Apache, here are two possible solutions: Although you cannot ignore the SSL certification, you can set the config file parameters ca_bundle_file and ca_bundle_dir to point to an unsigned certificate in the Apache config file. Example: instances: - name: apache-server-metrics command: metrics arguments: status_url: http://127.0.0.1/server-status?auto ca_bundle_file: /etc/newrelic-infra/integrations.d/ssl/b2b.ca-bundle Copy An example using ca_bundle_dir: ca_bundle_dir: /etc/newrelic-infra/integrations.d/ssl Copy Alternatively, you can use HTTP instead of HTTPS. Check the source code This integration is open source software. That means you can browse its source code and send improvements, or create your own fork and build it.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 115.366356, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "sections": "Enabling your Apache server", + "body": " Instrument your Apache web server quickly and send your telemetry data with guided install. Our guided install creates a customized CLI command for your environment that downloads and installs the New Relic CLI and the infrastructure agent. Ready to get started? Click one of these button to try it out" + }, + "id": "6174ae5a64441f5baf5fc976" + }, { "sections": [ "NGINX monitoring integration", @@ -56882,7 +56833,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 113.3757, + "_score": 106.742325, "_version": null, "_explanation": null, "sort": null, @@ -56922,7 +56873,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 104.540985, + "_score": 97.1904, "_version": null, "_explanation": null, "sort": null, @@ -56971,7 +56922,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 102.19244, + "_score": 95.60009, "_version": null, "_explanation": null, "sort": null, @@ -57025,7 +56976,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 279.59338, + "_score": 259.9965, "_version": null, "_explanation": null, "sort": null, @@ -57037,54 +56988,6 @@ }, "id": "617d5841196a67bb40f7c1de" }, - { - "sections": [ - "Link your applications to Kubernetes", - "Tip", - "Compatibility and requirements", - "Kubernetes requirements", - "Network requirements", - "APM agent compatibility", - "Openshift requirements", - "Important", - "Configure the injection of metadata", - "Default configuration", - "Custom configuration", - "Manage custom certificates", - "Validate the injection of metadata", - "Disable the injection of metadata", - "Troubleshooting" - ], - "title": "Link your applications to Kubernetes", - "type": "docs", - "tags": [ - "Link apps and services", - "Kubernetes integration", - "Integrations" - ], - "external_id": "0fe0951312aaf683f6614d5956f8c402b9693780", - "image": "", - "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/link-your-applications/link-your-applications-kubernetes/", - "published_at": "2022-02-06T01:24:10Z", - "updated_at": "2022-02-06T01:24:10Z", - "document_type": "page", - "popularity": 1, - "body": "You can surface Kubernetes metadata and link it to your APM agents as distributed traces to explore performance issues and troubleshoot transaction errors. For more information, see this New Relic blog post. You can quickly start monitoring Kubernetes clusters using our Auto-telemetry with Pixie integration, which doesn't require a language agent. Learn more about Auto-telemetry with Pixie. The metadata injection product uses a MutatingAdmissionWebhook to add the following environment variables to pods: NEW_RELIC_METADATA_KUBERNETES_CLUSTER_NAME NEW_RELIC_METADATA_KUBERNETES_NODE_NAME NEW_RELIC_METADATA_KUBERNETES_NAMESPACE_NAME NEW_RELIC_METADATA_KUBERNETES_DEPLOYMENT_NAME NEW_RELIC_METADATA_KUBERNETES_POD_NAME NEW_RELIC_METADATA_KUBERNETES_CONTAINER_NAME NEW_RELIC_METADATA_KUBERNETES_CONTAINER_IMAGE_NAME Copy Tip Our Kubernetes metadata injection project is open source. Here's the code to link APM and infrastructure data and the code to automatically manage certificates. Compatibility and requirements Before linking Kubernetes metadata to your APM agents, make sure you meet the following requirements: Kubernetes requirements Network requirements APM agent compatibility OpenShift requirements Kubernetes requirements To link your applications and Kubernetes, your cluster must have the MutatingAdmissionWebhook controller enabled, which requires Kubernetes 1.9 or higher. To verify that your cluster is compatible, run the following command: kubectl api-versions | grep admissionregistration.k8s.io/v1beta1 admissionregistration.k8s.io/v1beta1 Copy If you see a different result, follow the Kubernetes documentation to enable admission control in your cluster. Network requirements For Kubernetes to speak to our MutatingAdmissionWebhook, the master node (or the API server container, depending on how the cluster is set up) should be allowed egress for HTTPS traffic on port 443 to pods in all of the other nodes in the cluster. This might require specific configuration depending on how the infrastructure is set up (on-premises, AWS, Google Cloud, etc). Tip Until Kubernetes v1.14, users were only allowed to register admission webhooks on port 443. Since v1.15 it's possible to register them on different ports. To ensure backward compatibility, the webhook is registered by default on port 443 in the YAML config file we distribute. APM agent compatibility The following New Relic agents collect Kubernetes metadata: Go 2.3.0 or higher Java 4.10.0 or higher Node.js 5.3.0 or higher Python 4.14.0 or higher Ruby 6.1.0 or higher .NET 8.17.438 or higher Openshift requirements To link Openshift and Kubernetes you must enable mutating admission webhooks, which requires Openshift 3.9 or higher. During the process, install a resource that requires admin permissions to the cluster. Run this to log in as admin: oc login -u system:admin Copy Check that webhooks are correctly configured. If they are not, update the master-config.yaml file. admissionConfig: pluginConfig: MutatingAdmissionWebhook: configuration: apiVersion: apiserver.config.k8s.io/v1alpha1 kubeConfigFile: /dev/null kind: WebhookAdmission ValidatingAdmissionWebhook: configuration: apiVersion: apiserver.config.k8s.io/v1alpha1 kubeConfigFile: /dev/null kind: WebhookAdmission location: \"\" Copy Important Add kubeConfigFile: /dev/null to address some issues in Openshift. Enable certificate signing by editing the YAML file and updating your configuration: kubernetesMasterConfig: controllerArguments: cluster-signing-cert-file: - \"/etc/origin/master/ca.crt\" cluster-signing-key-file: - \"/etc/origin/master/ca.key\" Copy Restart the Openshift services in the master node. Configure the injection of metadata By default, all the pods you create that include APM agents have the correct environment variables set and the metadata injection applies to the entire cluster. To check that the environment variables have been set, any container that is running must be stopped, and a new instance started (see Validate the injection of metadata). This default configuration also uses the Kubernetes certificates API to automatically manage the certificates required for the injection. If needed, you can limit the injection of metadata to specific namespaces in your cluster or self-manage your certificates. Default configuration We offer instructions for deploying our integration using Helm. Just be sure that, when you are configuring the chart, the webhook that inject the metadata is enabled. Notice that we are specifying --dry-run and --debug, so nothing will be installed in this step: helm upgrade --install newrelic newrelic/nri-bundle \\ --dry-run \\ --debug \\ --namespace newrelic --create-namespace \\ --set global.licenseKey=YOUR_NEW_RELIC_LICENSE_KEY \\ --set global.cluster=K8S_CLUSTER_NAME \\ --set ksm.enabled=true \\ --set newrelic-infrastructure.privileged=true \\ --set infrastructure.enabled=true \\ --set prometheus.enabled=true \\ --set webhook.enabled= true \\ --set kubeEvents.enabled=true \\ --set logging.enabled=true Copy Custom configuration You can limit the injection of metadata only to specific namespaces by using labels. To enable this feature, edit nri-bundle Helm values.yaml file: nri-metadata-injection: injectOnlyLabeledNamespaces: true Copy Or add a --set when installing or upgrading your Helm release: helm upgrade --install newrelic newrelic/nri-bundle \\ --dry-run \\ --debug \\ --namespace newrelic --create-namespace \\ --set global.licenseKey=YOUR_NEW_RELIC_LICENSE_KEY \\ --set global.cluster=K8S_CLUSTER_NAME \\ --set ksm.enabled=true \\ --set newrelic-infrastructure.privileged=true \\ --set infrastructure.enabled=true \\ --set prometheus.enabled=true \\ --set webhook.enabled= true \\ --set nri-metadata-injection.injectOnlyLabeledNamespaces=true \\ --set kubeEvents.enabled=true \\ --set logging.enabled=true Copy With this option, injection is only applied to those namespaces that have the newrelic-metadata-injection label set to enabled: kubectl label namespace YOUR_NAMESPACE newrelic-metadata-injection=enabled Copy Manage custom certificates To use custom certificates you need to disable the automatic installation of certificates when you are installing using Helm. To disable the installation for certificates just modify nri-bundle Helm values.yaml like this: nri-metadata-injection: customTLSCertificate: true Copy Or add a --set when installing or upgrading your Helm release: helm upgrade --install newrelic newrelic/nri-bundle \\ --dry-run \\ --debug \\ --namespace newrelic --create-namespace \\ --set global.licenseKey=YOUR_NEW_RELIC_LICENSE_KEY \\ --set global.cluster=K8S_CLUSTER_NAME \\ --set ksm.enabled=true \\ --set newrelic-infrastructure.privileged=true \\ --set infrastructure.enabled=true \\ --set prometheus.enabled=true \\ --set webhook.enabled= true \\ --set nri-metadata-injection.customTLSCertificate=true \\ --set kubeEvents.enabled=true \\ --set logging.enabled=true Copy Now you can proceed with the custom certificate management option. You need your certificate, server key, and Certification Authority (CA) bundle encoded in PEM format. If you have them in the standard certificate format (X.509), install openssl, and run the following: openssl x509 -in CERTIFICATE_FILENAME -outform PEM -out CERTIFICATE_FILENAME.pem openssl x509 -in SERVER_KEY_FILENAME -outform PEM -out SERVER_KEY_FILENAME.pem openssl x509 -in CA_BUNDLE_FILENAME -outform PEM -out BUNDLE_FILENAME.pem Copy If your certificate/key pair are in another format, see the Digicert knowledgebase for more help. Create the TLS secret with the signed certificate/key pair, and patch the mutating webhook configuration with the CA using the following commands: kubectl create secret tls newrelic-metadata-injection-admission \\ --key=PEM_ENCODED_SERVER_KEY \\ --cert=PEM_ENCODED_CERTIFICATE \\ --dry-run -o yaml | kubectl -n newrelic apply -f - caBundle=$(cat PEM_ENCODED_CA_BUNDLE | base64 | td -d $'\\n') kubectl patch mutatingwebhookconfiguration newrelic-metadata-injection-cfg --type='json' -p \"[{'op': 'replace', 'path': '/webhooks/0/clientConfig/caBundle', 'value':'${caBundle}'}]\" Copy Important Certificates signed by Kubernetes have an expiration of one year. For more information, see the Kubernetes source code in GitHub. Validate the injection of metadata In order to validate that the webhook (responsible for injecting the metadata) was installed correctly, deploy a new pod and check for the New Relic environment variables. Create a dummy pod containing Busybox by running: kubectl create -f https://git.io/vPieo Copy Check if New Relic environment variables were injected: kubectl exec busybox0 -- env | grep NEW_RELIC_METADATA_KUBERNETES NEW_RELIC_METADATA_KUBERNETES_CLUSTER_NAME=fsi NEW_RELIC_METADATA_KUBERNETES_NODE_NAME=nodea NEW_RELIC_METADATA_KUBERNETES_NAMESPACE_NAME=default NEW_RELIC_METADATA_KUBERNETES_POD_NAME=busybox0 NEW_RELIC_METADATA_KUBERNETES_CONTAINER_NAME=busybox Copy Disable the injection of metadata To disable/uninstall the injection of metadata, use the following commands: Delete the Kubernetes objects using the yaml file: kubectl delete -f k8s-metadata-injection-latest.yaml Copy Delete the TLS secret containing the certificate/key pair: kubectl delete secret/newrelic-metadata-injection-secret Copy Troubleshooting Follow these troubleshooting tips as needed. No Kubernetes metadata in APM or distributed tracing transactions Problem The creation of the secret by the k8s-webhook-cert-manager job used to fail due to the kubectl version used by the image when running in Kubernetes version 1.19.x, The new version 1.3.2 fixes this issue, therefore it is enough to run again the job using an update version of the image to fix the issue. Solution Update the image k8s-webhook-cert-manager (to a version >= 1.3.2) and re-run the job. The secret will be correctly created and the k8s-metadata-injection pod will be able to start. Note that the new version of the manifest and of the nri-bundle are already updated with the correct version of the image. Problem In OpenShift version 4.x, the CA that is used in order to patch the mutatingwebhookconfiguration resource is not the one used when signing the certificates. This is a known issue currently tracked here. In the logs of the Pod nri-metadata-injection, you'll see the following error message: TLS handshake error from 10.131.0.29:37428: remote error: tls: unknown certificate authority TLS handshake error from 10.129.0.1:49314: remote error: tls: bad certificate Copy Workaround Manually update the certificate stored in the mutatingwebhookconfiguration object. The correct CA locations might change according to the cluster configuration. However, you can usually find the CA in the secret csr-signer in the namespace openshift-kube-controller-manager. Problem There is no Kubernetes metadata included in the transactions' attributes of your APM agent or in distributed tracing. Solution Verify that the environment variables are being correctly injected by following the instructions described in the Validate your installation step. If they are not present, get the name of the metadata injection pod by running: kubectl get pods | grep newrelic-metadata-injection-deployment kubectl logs -f pod/podname Copy In another terminal, create a new pod (for example, see Validate your installation), and inspect the logs of the metadata injection deployment for errors. For every created pod there should be a set of 4 new entries in the logs like: {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.107Z\",\"caller\":\"server/main.go:139\",\"msg\":\"POST https://newrelic-metadata-injection-svc.default.svc:443/mutate?timeout=30s HTTP/2.0\\\" from 10.11.49.2:32836\"} {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.110Z\",\"caller\":\"server/webhook.go:168\",\"msg\":\"received admission review\",\"kind\":\"/v1, Kind=Pod\",\"namespace\":\"default\",\"name\":\"\",\"pod\":\"busybox1\",\"UID\":\"6577519b-7a61-11ea-965e-0e46d1c9335c\",\"operation\":\"CREATE\",\"userinfo\":{\"username\":\"admin\",\"uid\":\"admin\",\"groups\":[\"system:masters\",\"system:authenticated\"]}} {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.111Z\",\"caller\":\"server/webhook.go:182\",\"msg\":\"admission response created\",\"response\":\"[{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env\\\",\\\"value\\\":[{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_CLUSTER_NAME\\\",\\\"value\\\":\\\"adn_kops\\\"}]},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_NODE_NAME\\\",\\\"valueFrom\\\":{\\\"fieldRef\\\":{\\\"fieldPath\\\":\\\"spec.nodeName\\\"}}}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_NAMESPACE_NAME\\\",\\\"valueFrom\\\":{\\\"fieldRef\\\":{\\\"fieldPath\\\":\\\"metadata.namespace\\\"}}}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_POD_NAME\\\",\\\"valueFrom\\\":{\\\"fieldRef\\\":{\\\"fieldPath\\\":\\\"metadata.name\\\"}}}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_CONTAINER_NAME\\\",\\\"value\\\":\\\"busybox\\\"}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_CONTAINER_IMAGE_NAME\\\",\\\"value\\\":\\\"busybox\\\"}}]\"} {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.111Z\",\"caller\":\"server/webhook.go:257\",\"msg\":\"writing response\"} Copy If there are no new entries on the logs, it means that the apiserver is not being able to communicate with the webhook service, this could be due to networking rules or security groups rejecting the communication. To check if the apiserver is not being able to communicate with the webhook you should inspect the apiserver logs for errors like: failed calling webhook \"metadata-injection.newrelic.com\": ERROR_REASON Copy To get the apiserver logs: Start a proxy to the Kubernetes API server by the executing the following command in a terminal window and keep it running. kubectl proxy --port=8001 Copy Create a new pod in your cluster, this will make the apiserver try to communicate with the webhook. The following command will create a busybox. kubectl create -f https://git.io/vPieo Copy Retrieve the apiserver logs. curl localhost:8001/logs/kube-apiserver.log > apiserver.log Copy Delete the busybox container. kubectl delete -f https://git.io/vPieo Copy Inspect the logs for errors. grep -E 'failed calling webhook' apiserver.log Copy Remember that one of the requirements for the metadata injection is that the apiserver must be allowed egress to the pods running on the cluster. If you encounter errors regarding connection timeouts or failed connections, make sure to check the security groups and firewall rules of the cluster. If there are no log entries in either the apiserver logs or the metadata injection deployment, it means that the webhook was not properly registered. Ensure the metadata injection setup job ran successfully by inspecting the output of: kubectl get job newrelic-metadata-setup Copy If the job is not completed, investigate the logs of the setup job: kubectl logs job/newrelic-metadata-setup Copy Ensure the CertificateSigningRequest is approved and issued by running: kubectl get csr newrelic-metadata-injection-svc.default Copy Ensure the TLS secret is present by running: kubectl get secret newrelic-metadata-injection-secret Copy Ensure the CA bundle is present in the mutating webhook configuration: kubectl get mutatingwebhookconfiguration newrelic-metadata-injection-cfg -o json Copy Ensure the TargetPort of the Service resource matches the Port of the Deployment's container: kubectl describe service/newrelic-metadata-injection-svc kubectl describe deployment/newrelic-metadata-injection-deployment Copy", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 252.9624, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "title": "Link your applications to Kubernetes", - "sections": "Link your applications to Kubernetes", - "tags": "Kubernetes integration", - "body": " the following commands: Delete the Kubernetes objects using the yaml file: kubectl delete -f k8s-metadata-injection-latest.yaml Copy Delete the TLS secret containing the certificate/key pair: kubectl delete secret/newrelic-metadata-injection-secret Copy Troubleshooting Follow these troubleshooting tips" - }, - "id": "617daead28ccbc662b7ffe23" - }, { "sections": [ "Link your applications to Kubernetes", @@ -57127,7 +57030,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 249.56396, + "_score": 244.69106, "_version": null, "_explanation": null, "sort": null, @@ -57139,57 +57042,6 @@ }, "id": "61fd3c9d196a675ff3e80980" }, - { - "sections": [ - "Kubernetes integration: what's changed in v3", - "v3 BETA", - "Integration version and Chart version", - "Overview", - "Architectural changes", - "Kube-state-metrics component", - "Kubelet component", - "Control plane component", - "Helm Charts", - "Migration Guide", - "KSM configuration", - "Tip", - "Control plane configuration", - "Agent configuration", - "Integrations configuration", - "Miscellaneous chart values", - "Upgrade from v2", - "Important" - ], - "title": "Kubernetes integration: what's changed in v3", - "type": "docs", - "tags": [ - "Changelog", - "Kubernetes integration", - "Integrations" - ], - "external_id": "a78ca20934f98fd2b43c7a9fbc2453c802c24ab8", - "image": "", - "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/get-started/changes-since-v3/", - "published_at": "2022-02-15T19:13:49Z", - "updated_at": "2022-02-13T15:01:38Z", - "document_type": "page", - "popularity": 1, - "body": "v3 BETA Kubernetes integration v3 is currently in a late stage beta, and we expect to make it generally available during the second quarter of 2022. We encourage you to give it a try and let us know what you think! Integration version and Chart version The Kubernetes integration v3 (appVersion) is included on the nri-bundle chart version 4. Overview v3 BETA Data reported by the Kubernetes Integration version 3 has not changed with respect to version 2. For this major release, we focused in configurability, stability, and user experience. From version 3 onwards, New Relic's Kubernetes solution features a new architecture which aims to be more modular and configurable, giving you more power to choose how the solution is deployed and making it compatible with more environments. Architectural changes In this new version, the main component of the integration, the newrelic-infrastructure DaemonSet, is divided in three different components: nrk8s-ksm, nrk8s-kubelet, and nrk8s-controlplane, with the first being a deployment and the next two being DaemonSets. This makes it easier to make decisions at scheduling and deployment time, rather than runtime. Moreover, we also changed the lifecycle of the scraping process. We went from a one-shot, short-lived process, to a long-lived one, allowing it to leverage higher-level Kubernetes APIs like the Kubernetes informers, that provide built-in caching and watching of cluster objects. For this reason, each of the components has two containers: A container for the integration, responsible for collecting metrics. A container with the New Relic Infrastructure Agent, which is used to send the metrics to the New Relic Platform. Kube-state-metrics component We build our cluster state metrics on top of the OSS project kube-state-metrics, which is housed under the Kubernetes organization itself. Previously, as our solution was comprised by just one DaemonSet, an election process was made to decide which pod was going to be in charge of scraping the metrics. This process was based merely on locality. The pod in charge would be the one that shares a node with the KSM deployment. As the KSM output contains data for the whole cluster, parsing this output requires a substantial amount of resources. While this is something that big cluster operators can assume, the fact that it's one arbitrary instance of the DaemonSet the one that will need this big amount of resources forces cluster operators to allow such consumption to the whole DaemonSet, where only one actually needed them. Another problem with KSM scraping was figuring out in which node the KSM pod lived. To do this, we need to contact the API Server and filter pods by some labels, but given the short-lived nature of the integration, caches and watchers were not being used effectively by it. This caused that, on large clusters, all instances of the DaemonSet flooded the control plane with non-namespaced pod list requests as an attempt to figure out whether the KSM pod was living next to them. We decided to tackle this problem by making two big changes to how KSM is scraped: Split the responsibility of scraping KSM out of the DaemonSet pods to a different, single instance Deployment. Refactor the code and make it long-running, so we can leverage Kubernetes informers which provide built-in caching and watching mechanisms. Thus, a specific Deployment nrk8s-ksm now takes care of finding KSM and scraping it. With this pod now being long-lived and single, it can safely use an endpoints informer to locate the IP of the KSM pod and scrape it. The informer will automatically cache the list of informers in the cluster locally and watch for new ones, avoiding storming the API Server with requests to figure out where the pod was located. While a sharded KSM setup is not supported yet, this new code was built with this future improvement in mind. Kubelet component The Kubelet is the “Kubernetes agent”, a service that runs on every Kubernetes node and is responsible for creating the containers as instructed by the control plane. Since it's the Kubelet who partners closely with the Container Runtime, it's the main source of infrastructure metrics for our integration, such as use of CPU, memory, disk, network, etc. Although not thoroughly documented, the Kubelet API is the de-facto standard source for most Kubernetes metrics. Scraping the Kubelet is typically a low-resource operation. Given this, and our intent to minimize internode traffic whenever possible, nrk8s-kubelet is run as a DaemonSet where each instance gathers metric from the Kubelet running in the same node as it is. nrk8s-kubelet no longer requires hostNetwork to run properly, and instead it connects to the Kubelet using the Node IP. If this process fails, nrk8s-kubelet will fall back to reach the node through the API Server proxy. This fallback mechanism is not new, but we do encourage you to mind this if you have very large clusters, as proxying many kubelets might increase the load in the API server. You can check if the API Server is being used as a proxy by looking for a message like this in the logs: Trying to connect to kubelet through API proxy Copy Control plane component Enabling the integration to successfully find and connect to CP components was probably one of the hardest parts of this effort. The main reason for this is the amount of ways in which CP components can be configured: inside or outside the cluster, with one or many replicas, with or without dedicated nodes, etc. Moreover, different CP components might be configured directly. We built the current approach with the following scenarios in mind: CP monitoring should work out of the box for those environments in which the CP is reachable out of the box, e.g. Kubeadm or even Minikube. For setups where the CP cannot be autodiscovered. For example, if it lives out of the cluster, we should provide a way for the user to specify their own endpoints. Failure to autodiscover shouldn't cause the deployment to fail, but failure to hit a manually defined endpoint should. As major Kubernetes distributions such as Kubeadm deploy CP components configured to listen only in localhost on the host's network namespace, we chose to deploy nrk8s-controlplane as a DaemonSet with hostNetwork: true. We structured the configuration to support autodiscover and static endpoints. To be compatible with a wide range of distributions out of the box, we provide a wide range of known defaults as configuration entries. Doing this in the configuration instead of the code allows you to tweak autodiscovery to your needs. Another improvement was adding the possibility of having multiple endpoints per selector and adding a probe mechanism which automatically detects the correct one. This allows you to try different configurations such as ports or protocols by using the same selector. Scraping configuration for the etcd CP component looks like the following where the same structure and features applies for all components: config: etcd: enabled: true autodiscover: - selector: \"tier=control-plane,component=etcd\" namespace: kube-system matchNode: true endpoints: - url: https://localhost:4001 insecureSkipVerify: true auth: type: bearer - url: http://localhost:2381 staticEndpoint: url: https://url:port insecureSkipVerify: true auth: {} Copy If staticEndpoint is set, the component will try to scrape it. If it can't hit the endpoint, the integration will fail so there are no silent errors when manual endpoints are configured. If staticEndpoint is not set, the component will iterate over the autodiscover entries looking for the first pod that matches the selector in the specified namespace, and optionally is running in the same node of the DaemonSet (if matchNode is set to true). After a pod is discovered, the component probes, issuing an http HEAD request, the listed endpoints in order and scrapes the first successful probed one using the authorization type selected. While above we show a config excerpt for the etcd component, the scraping logic is the same for other components. For more detailed instructions on how to configure control plane monitoring, please check the control plane monitoring page. Helm Charts Helm is the primary means we offer to deploy our solution into your clusters. Chart complexity was also significantly increased from the previous version, where it only had to manage one DaemonSet. Now, it has to manage one deployment and two DaemonSets where each has slightly different configurations. This will give you more flexibilty to adapt the solution to your needs, whithout the need to apply manual patches on top of the chart and the generated manifests. Some of the new features that our new Helm chart exposes are: Full control of the securityContext for all pods Full control of pod labels and annotations for all pods Ability to add extra environment variables, volumes, and volumeMounts Full control on the integration configuration, including which endpoints are reached, autodiscovery behavior, and scraping intervals Better alignment with Helm idioms and standards You can check full details on all the switches that can be flipped in the Chart's README.md. Migration Guide In order to make migration from earlier versions as easy as possible, we developed a compatibility layer that will translate most of the options that were possible to specify in the old newrelic-infrastructure chart to their new counterparts. This compatibility layer is temporary and will be removed in the future, so we encourage you to read carefully this guide and migrate the configuration with human supervision. KSM configuration Tip KSM monitoring works out of the box for most configurations, most users will not need to change this config. disableKubeStateMetrics has been replaced by ksm.enabled. The default is still the same (KSM scraping enabled). kubeStateMetricsScheme, kubeStateMetricsPort, kubeStateMetricsUrl, kubeStateMetricsPodLabel, and kubeStateMetricsNamespace have been replaced by the more comprehensive and flexible ksm.config. The ksm.config object has the following structure: ksm: config: # When autodiscovering KSM, force the following scheme. By default, `http` is used. scheme: \"http\" # Label selector to find kube-state-metrics endpoints. Defaults to `app.kubernetes.io/name=kube-state-metrics`. selector: \"app.kubernetes.io/name=kube-state-metrics\" # Restrict KSM discovery to this particular namespace. Defaults to all namespaces. namespace: \"\" # When autodiscovering, only consider endpoints that use this port. By default, all ports from the discovered `endpoint` are probed. #port: 8080 # Override autodiscovery mechanism completely and specify the KSM url directly instead #staticUrl: \"http://test.io:8080/metrics\" Copy Control plane configuration Control plane configuration has changed substantially. If you previously had control plane monitoring enabled, we encourage you to take a look at the Configure control plane monitoring dedicated page. The following options have been replaced by more comprehensive configuration, covered in the section linked above: apiServerSecurePort etcdTlsSecretName etcdTlsSecretNamespace controllerManagerEndpointUrl, etcdEndpointUrl, apiServerEndpointUrl, and schedulerEndpointUrl Agent configuration Agent config file, previously specified in config has been moved to common.agentConfig. Format of the file has not changed, and the full range of options that can be configured can be found here. The following agent options were previously \"aliased\" in the root of the values.yml file, and are no longer available: logFile has been replaced by common.agentConfig.log_file. eventQueueDepth has been replaced by common.agentConfig.event_queue_depth. customAttributes has changed in format to a yaml object. The previous format, a manually json-encoded string e.g. {\"team\": \"devops\"}, is still accepted although discouraged. Previously, customAttributes had a default clusterName entry which might have unwanted consequences if removed. This is no longer the case, users may now safely override customAttributes on its entirety. discoveryCacheTTL has been completely removed, as the discovery is now performed using kubernetes informers which have a built-in cache. Integrations configuration Integrations were previously configured under integrations_config, using an array format: integrations_config: - name: nri-redis.yaml data: discovery: # ... integrations: # ... Copy The mechanism remains the same, but we have changed the format to be more user-friendly: integrations: nri-redis-sampleapp: discovery: # ... integrations: # ... Copy Moreover, now the --port and --tls flags are mandatory on the discovery command. In the past, the following would work: integrations: nri-redis-sampleapp: discovery: command: exec: /var/db/newrelic-infra/nri-discovery-kubernetes Copy From v3 onwards, you must specify --port and --tls: integrations: nri-redis-sampleapp: discovery: command: exec: /var/db/newrelic-infra/nri-discovery-kubernetes --tls --port 10250 Copy This change is required because in v2 and below, the nrk8s-kubelet component (or its equivalent) ran with hostNetwork: true, so nri-discovery-kubernetes could connect to the kubelet using localhost and plain http. For security reasons, this is no longer the case, hence the need to specify both flags from now on. For more details on how to configure on-host integrations in Kubernetes please check the Monitor services in Kubernetes page. Miscellaneous chart values While not related to the integration configuration, the following miscellaneous options for the helm chart have also changed: runAsUser has been replaced by securityContext, which is templated directly into the pods and more configurable. resources has been removed, as now we deploy three different workloads. Resources for each one can be configured individually under: ksm.resources kubelet.resources controlPlane.resources Similarly, tolerations has been split into three and the previous one is no longer valid: ksm.tolerations kubelet.tolerations controlPlane.tolerations All three default to tolerate any value for NoSchedule and NoExecute image and all its subkeys have been replaced by individual sections for each of the three images that are now deployed: images.forwarder.* to configure the infrastructure-agent forwarder. images.agent.* to configure the image bundling the infrastructure-agent and on-host integrations. images.integration.* to configure the image in charge of scraping k8s data. Upgrade from v2 In order to upgrade from the Kubernetes integration version 2 (included in nri-bundle chart versions 3.x), we strongly encourage you to create a values-newrelic.yaml file with your desired License Key and configuration. If you had previously installed our chart from the CLI directly, for example using a command like the following: bash Copy $ helm install newrelic/nri-bundle \\ > --set global.licenseKey= \\ > --set global.cluster= \\ > --set infrastructure.enabled=true \\ > --set prometheus.enabled=true \\ > --set webhook.enabled=true \\ > --set ksm.enabled=true \\ > --set kubeEvents.enabled=true \\ > --set logging.enabled=true You can take the provided --set arguments and put them in a yaml file like the following: # values-newrelic.yaml global: licenseKey: cluster: infrastructure: enabled: true prometheus: enabled: true webhook: enabled: true ksm: enabled: true kubeEvents: enabled: true logging: enabled: true Copy After doing this, and adapting any other setting you might have changed according to the section above, you can upgrade by running the following command: bash Copy $ helm upgrade newrelic newrelic/nri-bundle \\ > --namespace newrelic --create-namespace \\ > -f values-newrelic.yaml \\ > --devel The --devel flag will instruct helm to download the v3 version of the integration (version 4.x of the nri-bundle chart). Important The --reuse-values flag is not supported for upgrading from v2 to v3.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 229.22168, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "title": "Kubernetes integration: what's changed in v3", - "sections": "Kubernetes integration: what's changed in v3", - "tags": "Kubernetes integration", - "body": " the infrastructure-agent and on-host integrations. images.integration.* to configure the image in charge of scraping k8s data. Upgrade from v2 In order to upgrade from the Kubernetes integration version 2 (included in nri-bundle chart versions 3.x), we strongly encourage you to create a values" - }, - "id": "61fd3c9d28ccbc72eec0dcda" - }, { "sections": [ "New Relic Metrics Adapter", @@ -57223,7 +57075,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 227.97119, + "_score": 223.34859, "_version": null, "_explanation": null, "sort": null, @@ -57232,6 +57084,104 @@ "body": " in the cluster. Installation To install the New Relic Metrics Adapter, we provide the newrelic-k8s-metrics-adapter Helm chart, which is also included in the nri-bundle chart used to deploy all New Relic Kubernetes components. If not already installed, install our Kubernetes integration. Upgrade" }, "id": "61fd193d196a672daae826d6" + }, + { + "sections": [ + "Kubernetes integration: Predefined alert policy", + "Predefined alert conditions", + "Container CPU usage % is too high", + "Container memory usage % is too high", + "Pod was unable to be scheduled", + "Pod is not ready", + "Container is running out of space", + "ReplicaSet doesn't have desired amount of pods", + "etcd open file descriptors", + "Create new alert conditions" + ], + "title": "Kubernetes integration: Predefined alert policy", + "type": "docs", + "tags": [ + "Installation", + "Kubernetes integration", + "Integrations" + ], + "external_id": "7c92831c394c4c087bad8b481250e55557e4b794", + "image": "", + "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/advanced-configuration/kubernetes-integration-predefined-alert-policy/", + "published_at": "2022-02-15T19:17:51Z", + "updated_at": "2022-02-04T14:47:09Z", + "document_type": "page", + "popularity": 1, + "body": "When deploying the New Relic Kubernetes integration for the first time in an account, we deploy a default set of alert conditions to your account. The predefined alert policy, named Kubernetes default alert policy, doesn't have a notification channel by default to avoid unwanted notifications. The alert conditions' thresholds can be customized to your environment and the alert policy updated to send notifications. For more information, see the Infrastructure alerts documentation. Predefined alert conditions Container CPU usage % is too high Setting Value Event type K8sContainerSample SELECT value (cpuUsedCores/cpuLimitCores)*100 Warning threshold > 90% for at least 5 minutes Critical threshold > 95% for at least 5 mins Container memory usage % is too high Setting Value Event type K8sContainerSample SELECT value memoryWorkingSetUtilization Warning threshold > 85% for at least 5 minutes Critical threshold > 95% for at least 5 mins Pod was unable to be scheduled Setting Value Event type K8sPodSample SELECT value isScheduled Warning threshold Critical threshold isScheduled = 0 for at least 7 minutes Pod is not ready Setting Value Event type K8sPodSample SELECT value isReady Warning threshold Critical threshold isReady = 0 for at least 10 minutes Container is running out of space Setting Value Event type K8sContainerSample SELECT value fsUsedPercent Warning threshold > 75% for at least 5 minutes Critical threshold > 90% for at least 5 minutes ReplicaSet doesn't have desired amount of pods Setting Value Event type K8sReplicaSetSample SELECT value podsDesired - podsReady Warning threshold Critical threshold 0 for at least 5 minutes etcd open file descriptors Setting Value Event type K8sEtcdSample SELECT value (processOpenFds/processMaxFds)*100 Warning threshold > 75% for at least 3 minutes Critical threshold > 90% for at least 5 minutes Create new alert conditions To create new alert conditions based on Kubernetes metric data, see Understand and use data.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 219.00739, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "title": "Kubernetes integration: Predefined alert policy", + "sections": "Kubernetes integration: Predefined alert policy", + "tags": "Kubernetes integration", + "body": ". The alert conditions' thresholds can be customized to your environment and the alert policy updated to send notifications. For more information, see the Infrastructure alerts documentation. Predefined alert conditions Container CPU usage % is too high Setting Value Event type K8sContainerSample SELECT" + }, + "id": "61fd3c6de7b9d279f45e6625" + }, + { + "sections": [ + "Configure control plane monitoring", + "Tip", + "Features", + "Compatibility and requirements", + "Control plane component", + "Autodiscovery and default configuration", + "hostNetwork and privileged", + "Custom autodiscovery", + "mTLS", + "Static endpoints", + "Limitations", + "Important", + "Control plane monitoring for managed and cloud environments", + "Monitoring control plane with integration version 2", + "Control plane monitoring on Integration version 2", + "Discovery of master nodes and control plane components", + "Configuration", + "etcd", + "API server", + "OpenShift configuration", + "OpenShift configuration on Integration version 2", + "Set up mTLS for etcd in OpenShift", + "See your data" + ], + "title": "Configure control plane monitoring", + "type": "docs", + "tags": [ + "Installation", + "Kubernetes integration", + "Integrations" + ], + "external_id": "33b7b8ae3dab9a2ca553dcf8ea0c97499478a85a", + "image": "https://docs.newrelic.com/static/209f301630c770f87ea8cbb1cace8e6e/8c557/new-relic-one-k8s-cluster-explorer-control-plane-parameters.png", + "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/advanced-configuration/configure-control-plane-monitoring/", + "published_at": "2022-02-15T19:15:25Z", + "updated_at": "2022-02-04T12:15:37Z", + "document_type": "page", + "popularity": 1, + "body": "New Relic provides control plane support for your Kubernetes integration, allowing you to monitor and collect metrics from your cluster's control plane components. That data can then be found in New Relic and used to create queries and charts. Tip Unless otherwise specified, this page refers to the Kubernetes integration v3. Details on how to configure control plane monitoring for v2 can be found in a specific section below. Features We monitor and collect metrics from the following control plane components: etcd: leader information, resident memory size, number of OS threads, consensus proposals data, etc. For a list of supported metrics, see etcd data. API server: rate of apiserver requests, breakdown of apiserver requests by HTTP method and response code, etc. For the complete list of supported metrics, see API server data. Scheduler: requested CPU/memory vs available on the node, tolerations to taints, any set affinity or anti-affinity, etc. For the complete list of supported metrics, see Scheduler data. Controller manager: resident memory size, number of OS threads created, goroutines currently existing, etc. For the complete list of supported metrics, see Controller manager data. Compatibility and requirements Control plane monitoring support is limited for managed clusters. This is because most cloud providers do not expose the metrics endpoints for the control plane components, so New Relic cannot access them. When deploying the solution in unprivileged mode, control plane setup will require extra steps and some caveats might apply. OpenShift 4.x uses control plane component metric endpoints that are different than the default. Control plane component The task of monitoring the Kubernetes control plane is a responsibility of the nrk8s-controlplane component, which by default is deployed as a DaemonSet. This component is automatically deployed to master nodes, through the use of a default list of nodeSelectorTerms which includes labels commonly used to identify master nodes, such as node-role.kubernetes.io/control-plane or node-role.kubernetes.io/master. Regardless, this selector is exposed in the values.yml file and therefore can be reconfigured to fit other environments. Clusters that do not have any node matching these selectors will not get any pod scheduled, thus not wasting any resources and being functionally equivalent of disabling control plane monitoring altogether by setting controlPlane.enabled to false in the Helm Chart. Each component of the control plane has a dedicated section, which allows to individually: Enable or disable monitoring of that component Define specific selectors and namespaces for discovering that component Define the endpoints and paths that will be used to fetch metrics for that component Define the authentication mechanisms that need to be used to get metrics for that component Manually specify endpoints that skip autodiscovery completely Autodiscovery and default configuration By default, our Helm Chart ships a configuration that should work out of the box for some control plane components for on-premise distributions that run the control plane inside the cluster, such as Kubeadm or minikube. hostNetwork and privileged Most users and Kubernetes distributions configure the control plane metrics endpoints to listen only in the loopback interface, i.e. localhost. For this reason, the control plane component is deployed with hostNetwork: true by default when privileged is set to true (the default). When the integration is deployed using privileged: false, the hostNetwork setting for the control plane component will be also be set to false. We chose to do it this way because otherwise, we would not be honoring the intent users have when they set privileged: false. Unfortunately, deploying without hostNetwork will cause control plane scraping to fail in most environments, which will result in missing metrics or the nrk8s-controlplane pods getting stuck into a CrashLoopBackoff state. This is a limitation of Kubernetes itself, as control plane cannot be monitored without hostNetwork unless components are manually configured to do so. As it is a common setting to deploy the integration in unprivileged mode (privileged: false), but still consider acceptable running the control plane pods with hostNetwork. This can be achieved by setting controlPlane.unprivilegedHostNetwork to true: This will tell the chart to deploy the control plane component with hostNetwork: true, despite the value of the higher-level privileged flag. If running pods with hostNetwork is not acceptable whatsoever, due to cluster or other policies, control plane monitoring is not possible and should be disabled by setting controlPlane.enabled to false. Custom autodiscovery Selectors used for autodiscovery are completely exposed as configuration entries in the values.yaml file, which means they can be tweaked or replaced to fit almost any environment where the control plane is run as a part of the cluster. An autodiscovery section looks like the following: autodiscover: - selector: \"tier=control-plane,component=etcd\" namespace: kube-system # Set to true to consider only pods sharing the node with the scraper pod. # This should be set to `true` if Kind is Daemonset, `false` otherwise. matchNode: true # Try to reach etcd using the following endpoints. endpoints: - url: https://localhost:4001 insecureSkipVerify: true auth: type: bearer - url: http://localhost:2381 - selector: \"k8s-app=etcd-manager-main\" namespace: kube-system matchNode: true endpoints: - url: https://localhost:4001 insecureSkipVerify: true auth: type: bearer Copy The autodiscover section contains a list of autodiscovery entries. Each entry has: selector: A string-encoded label selector that will be used to look for pods. matchNode: If set to true, it will additionally limit discovery to pods running in the same node as the particular instance of the DaemonSet performing discovery. endpoints: A list of endpoints to try if a pod is found for the specified selector. Additionally, each endpoint has: url: URL to target, including scheme. Can be http or https. insecureSkipVerify: If set to true, certificate will not be checked for https URLs. auth.type: Which mechanism to use to authenticate the request. Currently, the following methods are supported: None: If auth is not specified, the request will not contain any authentication whatsoever. bearer: The same bearer token used to authenticate against the Kubernetes API will be sent to this request. mtls: mTLS will be used to perform the request. mTLS For the mtls type, the following needs to be specified: endpoints: - url: https://localhost:4001 auth: type: mtls mtls: secretName: secret-name secretNamespace: secret-namespace Copy Where secret-name is the name of a Kubernetes TLS Secret, which lives in secret-namespace, and contains the certificate, key, and CA required to connect to that particular endpoint. The integration fetches this secret in runtime rather than mounting it, which means it requires an RBAC role granting it access to it. Our Helm Chart automatically detects auth.mtls entries at render time and will automatically create entries for these particular secrets and namespaces for you, unless rbac.create is set to false. Our integration accepts a secret with the following keys: cert: The PEM-encoded certificate that will be presented to etcd key: The PEM-encoded private key corresponding to the certificate above These certificates should be signed by the same CA etcd is using to operate. How to generate these certificates is out of the scope of this documentation, as it will vary greatly between different Kubernetes distribution. Please refer to your distribution's documentation to see how to fetch the required etcd peer certificates. In Kubeadm, for example, they can be found in /etc/kubernetes/pki/etcd/peer.{crt,key} in the master node. Once you have located or generated the etcd peer certificates, you should rename the files to match the keys we expect to be present in the secret, and create the secret in the cluster bash Copy $ mv peer.crt cert $ mv peer.key key $ mv ca.crt cacert $ $ kubectl -n newrelic create secret generic newrelic-etcd-tls-secret --from-file=./cert --from-file=./key --from-file=./cacert Finally, you can input the secret name (newrelic-etcd-tls-secret) and namespace (newrelic) in the config snippet shown at the beginning of this section. Remember that the Helm Chart will automatically parse this config and create an RBAC role to grant access to this specific secret and namespace for the nrk8s-controlplane component, so there's no manual action needed in that regard. Static endpoints While autodiscovery should cover cases where the control plane lives inside the Kubernetes clusters, some distributions or sophisticated Kubernetes environments run the control plane elsewhere, for a variety of reasons including availability or resource isolation. For these cases, the integration can be configured to scrape an arbitrary, fixed URL regardless of whether a pod with a control plane label is found in the node. This is done by specifying a staticEndpoint entry. For example, one for an external etcd instance would look like this: controlPlane: etcd: staticEndpoint: url: https://url:port insecureSkipVerify: true auth: {} Copy staticEndpoint is the same type of entry as endpoints in the autodiscover entry, whose fields are described above. The authentication mechanisms and schemas are supported here. Please keep in mind that if staticEndpoint is set, the autodiscover section will be ignored in its entirety. Limitations Important If you are using staticEndpoint pointing to an out-of-node (i.e. not localhost) endpoint, you must change controlPlane.kind from DaemonSet to Deployment. When using staticEndpoint, all nrk8s-controlplane pods will attempt to reach and scrape said endpoint. This means that, if nrk8s-controlplane is a DaemonSet (the default), all instances of the DaemonSet will scrape this endpoint. While this is fine if you are pointing them to localhost, if the endpoint is not local to the node you could potentially produce to duplicate metrics and increased billable usage. If you are using staticEndpoint and pointing it to a non-local URL, make sure to change controlPlane.kind to Deployment. For the same reason above, it is currently not possible to use autodiscovery for some control plane components, and a static endpoint for others. This is a known limitation we are working to address in future versions of the integration. Lastly, staticEndpoint allows only to define a single endpoint per component. This means that if you have multiple control plane shards in different hosts, it is currently not possible to point to them separately. This is also a known limitation we are working to address in future versions. For the time being, a workaround could be to aggregate metrics for different shards elsewhere, and point the staticEndpoint URL to the aggregated output. Control plane monitoring for managed and cloud environments Some cloud environments, like EKS or GKE, allow retrieving metrics from the Kubernetes API Server. This can be easily configured as an static endpoint: controlPlane: affinity: nodeAffinity: false # https://github.com/helm/helm/issues/9136 kind: Deployment config: etcd: enabled: false scheduler: enabled: false controllerManager: enabled: false apiServer: staticEndpoint: url: \"https://kubernetes.default:443\" insecureSkipVerify: true auth: type: bearer Copy Please note that this only applies to the API Server and that etcd, the scheduler, and the controller manager remain inaccessible in cloud environments. Monitoring control plane with integration version 2 This section covers how to configure control plane monitoring on versions 2 and earlier of the integration. Please note that these versions had a less flexible autodiscovery options, and did not support external endpoints. We strongly recommend you to update to version 3 at your earliest convenience. See what's changed of the Kubernetes integration. Control plane monitoring on Integration version 2 Discovery of master nodes and control plane components The Kubernetes integration relies on the kubeadm labeling conventions to discover the master nodes and the control plane components. This means that master nodes should be labeled with node-role.kubernetes.io/master=\"\" or kubernetes.io/role=\"master\". The control plane components should have either the k8s-app or the tier and component labels. Refer to the following table for accepted label combinations and values: Component Label Endpoint API server Kubeadm / Kops / ClusterAPI k8s-app=kube-apiserver tier=control-plane component=kube-apiserver OpenShift app=openshift-kube-apiserver apiserver=true localhost:443/metrics by default (can be configured) if the request fails falls back to localhost:8080/metrics etcd Kubeadm / Kops / ClusterAPI k8s-app=etcd-manager-main tier=control-plane component=etcd OpenShift k8s-app=etcd localhost:4001/metrics Scheduler Kubeadm / Kops / ClusterAPI k8s-app=kube-scheduler tier=control-plane component=kube-scheduler OpenShift app=openshift-kube-scheduler scheduler=true localhost:10251/metrics Controller manager Kubeadm / Kops / ClusterAPI k8s-app=kube-controller-manager tier=control-plane component=kube-controller-manager​ OpenShift app=kube-controller-manager kube-controller-manager=true localhost:10252/metrics When the integration detects that it is running inside a master node, it tries to find which components are running on the node by looking for pods that match the labels listed in the table above. For every running component, the integration makes a request to its metrics endpoint. Configuration Control plane monitoring is automatic for agents running inside master nodes. The only component that requires an extra step to run is etcd, because it uses mutual TLS authentication (mTLS) for client requests. The API Server can also be configured to be queried using the Secure Port. Important Control plane monitoring for OpenShift 4.x requires additional configuration. For more information, see the OpenShift 4.x Configuration section. etcd In order to set mTLS for querying etcd, there are two configuration options that need to be set: Option Value ETCD_TLS_SECRET_NAME Name of a Kubernetes secret that contains the mTLS configuration. The secret should contain the following keys: cert: the certificate that identifies the client making the request. It should be signed by an etcd trusted CA. key: the private key used to generate the client certificate. cacert: the root CA used to identify the etcd server certificate. If the ETCD_TLS_SECRET_NAME option is not set, etcd metrics won't be fetched. ETCD_TLS_SECRET_NAMESPACE The namespace where the secret specified in the ETCD_TLS_SECRET_NAME was created. If not set, the default namespace is used. API server By default, the API server metrics are queried using the localhost:8080 unsecured endpoint. If this port is disabled, you can also query these metrics over the secure port. To enable this, set the following configuration option in the Kubernetes integration manifest file: Option Value API_SERVER_ENDPOINT_URL The (secure) URL to query the metrics. The API server uses localhost:443 by default Ensure that the ClusterRole has been updated to the newest version found in the manifest Added in version 1.15.0 Important Note that the port can be different according to the secure port used by the API server. For example, in Minikube the API server secure port is 8443 and therefore API_SERVER_ENDPOINT_URL should be set to https://localhost:8443 OpenShift configuration Version 3 of the Kubernetes Integration includes default settings that will autodiscover control plane components in OpenShift clusters, so it should work out of the box for all components except etcd. Etcd is not supported out of the box as the metrics endpoint is configured to require mTLS authentication in OpenShift environments. Our integration supports mTLS authentication to fetch etcd metrics in this configuration, however you will need to create the required mTLS certificate manually. This is necessary to avoid granting wide permissions to our integration without the explicit approval from the user. To create an mTLS secret, please follow the steps in this section below, and then configure the integration to use the newly created secret as described in the mtls section. OpenShift configuration on Integration version 2 Important When installing openshift through Helm, specify the configuration to automatically include these endpoints. Setting openshift.enabled=true and openshift.version=\"4.x\" will include the secure endpoints and enable the /var/run/crio.sock runtime. Control plane components on OpenShift 4.x use endpoint URLs that require SSL and service account based authentication. Therefore, the default endpoint URLs can not be used. To configure control plane monitoring on OpenShift, uncomment the following environment variables in the customized manifest. URL values are pre-configured to the default base URLs for the control plane monitoring metrics endpoints in OpenShift 4.x. - name: \"SCHEDULER_ENDPOINT_URL\" value: \"https://localhost:10259 - name: \"ETCD_ENDPOINT_URL\" value: \"https://localhost:9979\" - name: \"CONTROLLER_MANAGER_ENDPOINT_URL\" value: \"https://localhost:10257\" - name: \"API_SERVER_ENDPOINT_URL\" value: \"https://localhost:6443\" Copy Important Even though the custom ETCD_ENDPOINT_URL is defined, etcd requires HTTPS and mTLS authentication to be configured. For more on configuring mTLS for etcd in OpenShift, see Set up mTLS for etcd in OpenShift. Set up mTLS for etcd in OpenShift Follow these instructions to set up mutual TLS authentication for etcd in OpenShift 4.x: Export the etcd client certificates from the cluster to an opaque secret. In a default managed OpenShift cluster, the secret is named kube-etcd-client-certs and it is stored in the openshift-monitoring namespace. bash Copy $ kubectl get secret kube-etcd-client-certs -n openshift-monitoring -o yaml > etcd-secret.yaml Open the secret file and change the keys: Rename the certificate authority to cacert. Rename the client certificate to cert. Rename the client key to key. Optionally, change the secret name and namespace to something meaningful. Remove these unnecessary keys in the metadata section: creationTimestamp resourceVersion selfLink uid Install the manifest with its new name and namespace: bash Copy $ kubectl apply -n newrelic -f etcd-secret.yaml Configure the integration to use the newly created secret as described in the mtls section. See your data If the integration has been been set up correctly, the Kubernetes cluster explorer contains all the control plane components and their status in a dedicated section, as shown below. one.newrelic.com > Kubernetes Cluster Explorer: Use the Kubernetes cluster explorer to monitor and collect metrics from your cluster's Control Plane components. You can also check for control plane data with this NRQL query: SELECT latest(timestamp) FROM K8sApiServerSample, K8sEtcdSample, K8sSchedulerSample, K8sControllerManagerSample FACET entityName where clusterName = '_MY_CLUSTER_NAME_' Copy Tip If you still can't see Control Plane data, try the solution described in Kubernetes integration troubleshooting: Not seeing data.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 218.7926, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "sections": "Static endpoints", + "tags": "Kubernetes integration", + "body": " and the control plane components. This means that master nodes should be labeled with node-role.kubernetes.io/master="" or kubernetes.io/role="master". The control plane components should have either the k8s-app or the tier and component labels. Refer to the following table for accepted label combinations" + }, + "id": "61fd18e9e7b9d2b5cc5e7358" } ], "/logstash-plugin-for-logs/fc6425fb-4fe9-40c7-be05-a27214cb5772": [ @@ -57270,7 +57220,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.30777, + "_score": 168.5256, "_version": null, "_explanation": null, "sort": null, @@ -57321,7 +57271,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.3023, + "_score": 168.52115, "_version": null, "_explanation": null, "sort": null, @@ -57379,7 +57329,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 145.08545, + "_score": 142.9581, "_version": null, "_explanation": null, "sort": null, @@ -57433,7 +57383,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 144.11217, + "_score": 136.87427, "_version": null, "_explanation": null, "sort": null, @@ -57474,7 +57424,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 143.57912, + "_score": 136.43945, "_version": null, "_explanation": null, "sort": null, @@ -57524,7 +57474,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 85.16167, + "_score": 80.60472, "_version": null, "_explanation": null, "sort": null, @@ -57566,7 +57516,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 71.07054, + "_score": 68.373886, "_version": null, "_explanation": null, "sort": null, @@ -57623,7 +57573,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 60.36673, + "_score": 59.388115, "_version": null, "_explanation": null, "sort": null, @@ -57635,6 +57585,48 @@ }, "id": "6043fa6c196a678ae2960f31" }, + { + "sections": [ + "Configure the infrastructure agent", + "Configuration methods and precedence", + "Configuration file (newrelic-infra.yml) location and description", + "Environment variable syntax and description", + "Using environment variable passthroughs with on-host integrations", + "Tip", + "Important", + "Configuration file structure", + "Configuration management tools" + ], + "title": "Configure the infrastructure agent", + "type": "docs", + "tags": [ + "Configuration", + "Install the infrastructure agent", + "Infrastructure" + ], + "external_id": "d0abc48b6fc9dfb8448a5c149138b37430643a39", + "image": "https://docs.newrelic.com/static/65588543c8b94a2a4bfb29712aeda7d8/c1b63/infrastructure-config-cascade-%2528orders-of-precedence%2529.png", + "url": "https://docs.newrelic.com/docs/infrastructure/install-infrastructure-agent/configuration/configure-infrastructure-agent/", + "published_at": "2022-02-14T09:28:12Z", + "updated_at": "2022-02-04T02:37:59Z", + "document_type": "page", + "popularity": 1, + "body": "The infrastructure agent can be configured in a variety of ways. The default method is to edit the configuration variables in the newrelic-infra.yml file. A configuration file template is available in Infrastructure config file template. You can also override the config file by setting environment variables, or use config management tools, such as Ansible or Puppet. To configure on-host integrations that work with the infrastructure agent, see a specific integration's documentation. Want to try out our infrastructure agent? Create a New Relic account for free! No credit card required. Configuration methods and precedence The default method to configure the infrastructure agent is the newrelic-infra.yml config file. You can override the config file by setting environment variables, which is ideal for containerized environments. Environment variables override the config file. The config file overrides the agent defaults. Here are detailed descriptions of each configuration method: Configuration file (newrelic-infra.yml) location and description To configure the infrastructure agent, use the newrelic-infra.yml file. Its default location is: Linux: /etc/newrelic-infra.yml Windows: C:\\Program Files\\New Relic\\newrelic-infra\\newrelic-infra.yml For a sample config file, see our infrastructure config file template. Environment variable syntax and description To set an environment variable, use the variable name from the config file in all caps and prefix with NRIA_; for example, NRIA_LICENSE_KEY. Only use environment variables with containerized environments. The options described in the infrastructure config file template include their corresponding environment variables, labeled as Env var. Environment variables are also listed in Infrastructure configuration settings. Starting with infrastructure agent 1.14.0, environment variables can also be defined in the configuration file with the following notation {{MY_ENV_VAR}}, for example: license_key: {{MY_LICENSE_KEY}} Copy Using environment variable passthroughs with on-host integrations You can use environment variables to control config settings for on-host integrations, which can then be passed through to the infrastructure agent. Variables that can be used with each on-host integration are listed in the documentation for each integration, and can be set through one of the following methods. From inside the config file: add an entry to the infrastructure config file to allow the agent to get the environment variables you've set. passthrough_environment: - HTTPS_PROXY - HTTP_PROXY Copy From the command line: use another environment variable to pass through these settings. NRIA_PASSTHROUGH_ENVIRONMENT=\"HTTPS_PROXY,HTTP_PROXY\" Copy Each integration has a set of variables that can be overwritten by defining and passing an environment variable with the exact same name as the argument (for example, a PORT environment variable to replace the redis port argument). Starting with infrastructure agent 1.14.0, environment variables can be defined in the integrations config file with the {{}} notation. For example: integration_name: com.newrelic.redis instances: - name: redis-metrics command: metrics arguments: hostname: localhost port: 6379 password: {{MY_REDIS_PASSWORD}} Copy Tip Make sure the newrelic-infra service has visibility to any custom environment variable defined in the host. Service managers such as systemd will not set all env vars by default and might need additional steps. Important For a better understanding of configuration settings, see Infrastructure configuration settings, which contains a detailed explanation of each variable. Configuration file structure When editing newrelic-infra.yml, you must conform to the YAML format and adhere to the following rules: Do not modify the casing of the configuration options: YAML keys are case sensitive. Respect the indentation levels. All indentations are in similar increments, typically of two space characters. Data in the same stanza of the file must use the same level of indentation. Indent any sub-stanzas by an additional two spaces (see examples in Custom attributes and Network interface filters). A template of newrelic-infra.yml is available in the infrastructure agent repository. Use a validator to ensure the syntax is accurate before using the file with the infrastructure agent. Important Always restart the agent or your web server after changing settings. Configuration management tools To dynamically create your configuration file and manage the infrastructure agent with configuration management tools, see the documentation for: Ansible Chef Puppet", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 56.429726, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "title": "Configure the infrastructure agent", + "sections": "Configure the infrastructure agent", + "tags": "Install the infrastructure agent", + "body": "-infra.yml is available in the infrastructure agent repository. Use a validator to ensure the syntax is accurate before using the file with the infrastructure agent. Important Always restart the agent or your web server after changing settings. Configuration management tools To dynamically create your configuration file and manage the infrastructure agent with configuration management tools, see the documentation for: Ansible Chef Puppet" + }, + "id": "60440acbe7b9d24feb5799b0" + }, { "sections": [ "Use integration data in New Relic dashboards", @@ -57672,7 +57664,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 59.39962, + "_score": 56.21028, "_version": null, "_explanation": null, "sort": null, @@ -57681,43 +57673,6 @@ "body": " with the Integrations SDK. For a general look at how to find and use integration data, see New Relic data types. Get started with integration data Here are some tips for finding and exploring your integration data in New Relic: From the one.newrelic.com > Infrastructure > Third-party services page, select" }, "id": "617dad54196a6740e2f7df3f" - }, - { - "sections": [ - "Understand and use data from infrastructure integrations", - "Explore your infrastructure integration's data", - "EOL NOTICE", - "Create alert conditions" - ], - "title": "Understand and use data from infrastructure integrations", - "type": "docs", - "tags": [ - "Get started", - "Infrastructure integrations", - "Integrations" - ], - "external_id": "74fbfa8de2ee02bdf8dd4aad22fab7f654e96904", - "image": "", - "url": "https://docs.newrelic.com/docs/infrastructure/infrastructure-integrations/get-started/understand-use-data-infrastructure-integrations/", - "published_at": "2022-02-14T11:14:56Z", - "updated_at": "2022-02-14T11:14:56Z", - "document_type": "page", - "popularity": 1, - "body": "With our infrastructure integrations, you can monitor the performance of many popular services. Our infrastructure integrations are separated into two main categories: Cloud integrations: Integrations for cloud platform services, including AWS, Azure, and GCP. On-host integrations: \"On-host\" refers to core services integrations that you can install directly on a host. Examples: MySQL, NGINX, Kubernetes, Redis. Here are some tips on how to find, understand, and use data reported from infrastructure integrations. Explore your infrastructure integration's data The best way to understand infrastructure integrations's data and see what you can do with it is to enable an integration and explore the data in the New Relic UI. Some recommendations for exploring: View dashboards: You can find your dashboards in New Relic One. For details, see Integration dashboards. Query data: You can run custom queries and charts of your integration data. For more information, see Query New Relic data. Create alert conditions: See Alert conditions. Learn more about what metrics and inventory data an integration reports: See an integration's documentation for cloud integrations and on-host integrations. EOL NOTICE After March 2022, we're discontinuing support for several capabilities, including inventory data for cloud integrations. For more details, including how you can easily prepare for this transition, see our Explorers Hub post. Create alert conditions To create an alert condition for integration data in infrastructure, Go to one.newrelic.com > Infrastructure, choose an integration, and then select an available alert option. For more information, see Infrastructure and alerts. You can also create alert conditions using NRQL queries.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 59.39747, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "title": "Understand and use data from infrastructure integrations", - "sections": "Understand and use data from infrastructure integrations", - "tags": "Infrastructure integrations", - "body": "With our infrastructure integrations, you can monitor the performance of many popular services. Our infrastructure integrations are separated into two main categories: Cloud integrations: Integrations for cloud platform services, including AWS, Azure, and GCP. On-host integrations: "On-host" refers" - }, - "id": "617dc61d28ccbcceb080096e" } ], "/mlops-aporia/879a5e0d-eda0-4af9-aa73-08e49a8a46c8": [ @@ -57746,7 +57701,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 490.7266, + "_score": 485.8348, "_version": null, "_explanation": null, "sort": null, @@ -57787,7 +57742,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 379.0248, + "_score": 375.4058, "_version": null, "_explanation": null, "sort": null, @@ -57825,7 +57780,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 354.65588, + "_score": 349.952, "_version": null, "_explanation": null, "sort": null, @@ -57861,7 +57816,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 348.75412, + "_score": 344.2619, "_version": null, "_explanation": null, "sort": null, @@ -57896,7 +57851,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 330.00595, + "_score": 325.6102, "_version": null, "_explanation": null, "sort": null, @@ -57941,7 +57896,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99103, + "_score": 95.215935, "_version": null, "_explanation": null, "sort": null, @@ -57983,7 +57938,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99097, + "_score": 95.21587, "_version": null, "_explanation": null, "sort": null, @@ -58025,7 +57980,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99097, + "_score": 95.21587, "_version": null, "_explanation": null, "sort": null, @@ -58067,7 +58022,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99081, + "_score": 95.21571, "_version": null, "_explanation": null, "sort": null, @@ -58106,7 +58061,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 93.49031, + "_score": 93.73342, "_version": null, "_explanation": null, "sort": null, @@ -58144,7 +58099,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 97.48205, + "_score": 97.34163, "_version": null, "_explanation": null, "sort": null, @@ -58181,7 +58136,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 84.07785, + "_score": 78.526474, "_version": null, "_explanation": null, "sort": null, @@ -58208,7 +58163,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 78.352554, + "_score": 78.03318, "_version": null, "_explanation": null, "sort": null, @@ -58217,6 +58172,52 @@ }, "id": "604cc036e7b9d2dc8e309501" }, + { + "sections": [ + "Python custom metrics", + "Important", + "Charting custom metrics", + "Push versus pull interfaces", + "Recording a single metric", + "Recording multiple metrics", + "Naming of custom metrics", + "Pre-aggregated metrics", + "Custom metric data sources", + "Registering a data source", + "Initialization of a data source", + "Instances of a data source", + "Life cycle of a data source", + "Configuring a data source", + "Setup from configuration file" + ], + "title": "Python custom metrics", + "type": "docs", + "tags": [ + "Supported features", + "Python agent", + "Agents" + ], + "external_id": "f9a53099356096d43e66677875a5d909a2fbf340", + "image": "", + "url": "https://docs.newrelic.com/docs/apm/agents/python-agent/supported-features/python-custom-metrics/", + "published_at": "2022-02-14T16:18:18Z", + "updated_at": "2021-10-30T21:56:47Z", + "document_type": "page", + "popularity": 1, + "body": "Custom metrics allow you to record arbitrary metrics using APIs provided by the Python agent. These may be used to record metrics related to the business functions implemented by your web application, or may be additional metrics used to evaluate the performance of the web application. Recommendation: To avoid potential data problems, keep the total number of unique metrics introduced by custom metrics under 2000. Important Before using custom metrics, you must get the agent initialized and integrated with the target process. For instructions, see Python agent integration. Charting custom metrics To view custom metrics, query your data to search metrics and create customizable charts. Push versus pull interfaces The Python agent provides two different ways of recording custom metrics. The first is a push-style API where you can decide when to record a custom metric. The second is a pull-style API where you register a custom metric data source, and the agent polls your code for metrics once per harvest cycle. The pull-style API is important where you need to generate rate or utilization metrics over the period of the harvest cycle. This is because you can properly calculate the duration of the harvest cycle and also ensure that only one metric is recorded for the harvest cycle. Recording a single metric To record a single custom metric, the Python agent provides the function: newrelic.agent.record_custom_metric(name, value, application=None) Copy When called without an application object as newrelic.agent.record_custom_metric('Custom/Value', value) Copy then it must be called within the context of a transaction that is being monitored by the agent. This is because the current transaction will be looked up and the custom metrics will initially be attached to that transaction. So long as the transaction is not subsequently marked to be ignored, the custom metrics will then be aggregated with other metrics for the application the transaction is being reported to, when the transaction completes. If this API function is called outside of the context of a monitored transaction, such as in a background thread (which isn't being tracked as a background task), then the call does nothing and the data is discarded. In order to be able to record custom metrics in such a situation, it is necessary to supply the application object corresponding to the application against which the custom metrics should be recorded. application = newrelic.agent.register_application() def report_custom_metrics(): while True: newrelic.agent.record_custom_metric('Custom/Value', value(), application) time.sleep(60.0) thread = threading.Thread(target=report_custom_metrics) thread.setDaemon(True) thread.start() Copy In the case of recording custom metrics against the current transaction (by not supplying an application object), no thread locking is required at the time of the API call, as the custom metrics will be attached to the transaction object initially. It is only when the whole transaction is being recorded at completion that a thread lock needs to be acquired. This is the same lock though as needs to be acquired to merge all metrics from the transaction with the metric table for the current harvest cycle. So, no additional locking is required on top of what is already required. Where the API call is being supplied the application object however, it is necessary to acquire a lock for each call to record a custom metric. Recording metrics one at a time in this way for a large number of metrics may therefore have undue effects due to thread lock contention. Recording multiple metrics If you are recording multiple metrics in one go, to reduce the need for thread locking you can instead use the function: newrelic.agent.record_custom_metrics(metrics, application=None) Copy This works the same way as the record_custom_metric() call except that an iterable can be provided in place of the name and value arguments. The iterable can be a list, tuple or other iterable object, including a generator function. The iterable must return a tuple consisting of the name and value for the custom metric. import psutil import os def memory_metrics(): pid = os.getpid() p = psutil.Process(os.getpid()) m = p.get_memory_info() yield ('Custom/Memory/Physical', float(m.rss)/(1024*1024)) yield ('Custom/Memory/Virtual', float(m.vms)/(1024*1024)) application = newrelic.agent.register_application() def report_custom_metrics(): while True: newrelic.agent.record_custom_metrics(memory_metrics(), application) time.sleep(60.0) thread = threading.Thread(target=report_custom_metrics) thread.setDaemon(True) thread.start() Copy When used with an application object, no matter how many custom metrics are being recorded, thread locking will only need to be performed once for each call. Naming of custom metrics All custom metrics reported by the Python agent should start with the prefix Custom/. This would typically be followed with a category name and label segment. If the Custom/ metric is not used, then the custom metrics may not be available for selection in the Data explorer. Pre-aggregated metrics When recording a set of metrics by passing an iterable over the set of available metrics, the same named metric may appear more than once. In this situation the agent would then aggregate the indvidual values into one sample. Although possible, if retaining and then later passing all the individual raw samples for a single metric in this way is not practical, then the source of the metrics can instead pre aggregate metrics and provide the resulting aggregrated data sample. Instead therefore of the value being a numerical value, a dictionary would be passed for the value. The fields within the dictionary would be: count total min max sum_of_squares An implementation of a helper class that you could use to perform aggregation for a single metric is: class Stats(dict): def __init__(self, count=0, total=0.0, min=0.0, max=0.0, sum_of_squares=0.0): self.count = count self.total = total self.min = min self.max = max self.sum_of_squares = sum_of_squares def __setattr__(self, name, value): self[name] = value def __getattr__(self, name): return self[name] def merge_stats(self, other): self.total += other.total self.min = self.count and min(self.min, other.min) or other.min self.max = max(self.max, other.max) self.sum_of_squares += other.sum_of_squares self.count += other.count def merge_value(self, value): self.total += value self.min = self.count and min(self.min, value) or value self.max = max(self.max, value) self.sum_of_squares += value ** 2 self.count += 1 Copy This class is itself a dictionary and so an instance of it can be passed directly as the value. This might then be used as: application = newrelic.agent.register_application() def sample_value(): return ... def report_custom_metrics(): count = 0 stats = Stats() while True: count += 1 stats.merge_value(sample_value()) if count % 60 == 0: newrelic.agent.record_custom_metric('Custom/Value', stats, application) stats = Stats() time.sleep(1.0) thread = threading.Thread(target=report_custom_metrics) thread.setDaemon(True) thread.start() Copy Custom metric data sources The record_custom_metric() and record_custom_metrics() API calls still require explicit action on your part to push custom metrics to the agent. Pushing data to the agent, especially if being done from a background thread and done on a 60 second interval, can be problematic though. This is because when the data is pushed it may not sync precisely with when the agent is reporting data back to the data collector. If a background thread was pre aggregating metrics over a 60 second period and then recording them, if that falls close to the time when the agent is reporting data, it could occur either just before or just after the agent reports the data. This lack of synchronization in time could therefore result in no metrics for that sample being reported in one harvest cycle and two in the next, where as the intent would be that there is one per harvest cycle. The solution to this is for the agent to pull custom metrics from the producer of the metrics as part of the process of reporting data to ensure they will be reported immediately and synchronised with the harvest cycle. The source of such metrics in this pull-style API is called a metric data source. Registering a data source The API function for registering a metric data source is: newrelic.agent.register_data_source(source, application=None, name=None, settings=None, **properties) Copy Because of varying requirements around how custom metrics may need to be produced, a number of different ways are available of implementing the data source. The simplest type of data source is one which is providing a gauge metric. That is one where some value at that particular point in time is relevant and what has happened historically doesn't matter. import psutil import os @newrelic.agent.data_source_generator(name='Memory Usage') def memory_metrics(): pid = os.getpid() p = psutil.Process(os.getpid()) m = p.get_memory_info() yield ('Custom/Memory/Physical', float(m.rss)/(1024*1024)) yield ('Custom/Memory/Virtual', float(m.vms)/(1024*1024)) newrelic.agent.register_data_source(memory_metrics) Copy The decorator used here is: newrelic.agent.data_source_generator(name=None, **properties) Copy It is specifically for wrapping a generator function, or a function which otherwise returns an iterable when called. The name when registering a data source is optional. It exists mainly so that when logging errors the message can give a more recognisable name for the data source. If name isn't passed to register_data_source(), then any name associated with the actual data source using the decorator will be used instead, or the name of the function if the data source itself is not named. If an application object is not provided when registering a data source, then the data source will be automatically associated with all applications for which data is being reported by the agent in that process. If an application is provided, the data source will only be associated with that specific application. Whether a data source is registered against an application explicitly or is applied to all applications, the agent needs to first be registered for that application. This would normally happen if using a data source in an existing web application process which was being monitored. If however you are using a data source in a standalone program to report only custom metrics, you still need to ensure that the API call register_application() is used if necessary to force the registration of the agent for an application before any data will be collected. Initialization of a data source Although the decorator provides the ability to name a data source, the more important reason for the decorator is that it hides the complexity of a sequence of setup steps to get a data source running. The sequence of these steps is: The data source is initialized, with a dictionary holding any configuration being passed to it to set it up to run in a particular way. Upon being initialized, the data source returns a dictionary of properties describing the data source. This includes a reference to a factory function for creating a specific instance of the data source provider. An instance of the data source provider is then created for a specific consumer (application) by calling the factory. The factory function is passed a dictionary describing the environment in which it is running, including the name of the consumer. Rewriting the above example so as to not rely on the decorator, we would have: import os import psutil def memory_metrics_data_source(settings): def memory_metrics(): pid = os.getpid() p = psutil.Process(os.getpid()) m = p.get_memory_info() yield ('Custom/Memory/Physical', float(m.rss)/(1024*1024)) yield ('Custom/Memory/Virtual', float(m.vms)/(1024*1024)) def memory_metrics_factory(environ): return memory_metrics properties = {} properties['name'] = 'Memory Usage' properties['factory'] = memory_metrics_factory return properties newrelic.agent.register_data_source(memory_metrics_data_source) Copy The purpose of the more complex underlying protocol is to provide sufficient hook points to properly initialize data sources and customise them based on that configuration and the specifics of the consumer. Instances of a data source Nothing more needed to be done in the prior example because gauge metrics, which don't care about the last time they were generated, were being returned. Where a metric reflects something happening over time, and therefore needs to retain some state, we need though an ability to be able to create an instance of the data source. The factory function therefore provides the ability for an instance of a data source to be created for each application against which metrics are being reported. There is allowance for one instance of the data source per application rather than one per process, because the start and end times for the harvest cycle for different applications may be different. If there was only one per process in this scenario and the metric had a connection to the duration of the harvest cycle, then the resulting metrics wouldn't be correct for each application. The ability is therefore provided for a data source instance to be application specific. Using nested functions as above, a data source which needs to maintain state could therefore be written as. import os import time import multiprocessing @newrelic.agent.data_source_factory(name='CPU Usage') def cpu_metrics_data_source(settings, environ): state = {} state['last_timestamp'] = time.time() state['times'] = os.times() def cpu_metrics(): now = time.time() new_times = os.times() elapsed_time = now - state['last_timestamp'] user_time = new_times[0] - state['times'][0] utilization = user_time / (elapsed_time*multiprocessing.cpu_count()) state['last_timestamp'] = now state['times'] = new_times yield ('Custom/CPU/User Time', user_time) yield ('Custom/CPU/User/Utilization', utilization) return cpu_metrics newrelic.agent.register_data_source(cpu_metrics_data_source) Copy The decorator used here is: newrelic.agent.data_source_factory(name=None, **properties) Copy For this case the decorator is wrapping a factory function. Because the decorator is automatically returning the properties for the data source when required, the factory takes both the settings and the description of the environ it is being used in. Using nested functions is a bit magic and requires the code to use a dictionary on the stack of the outer function to hold the state. The alternative is to implement the data source as an actual class with the decorator applied to the class. import os import time import multiprocessing @newrelic.agent.data_source_factory(name='CPU Usage') class CPUMetricsDataSource(object): def __init__(self, settings, environ): self.last_timestamp = time.time() self.times = os.times() def __call__(self): now = time.time() new_times = os.times() elapsed_time = now - self.last_timestamp user_time = new_times[0] - self.times[0] utilization = user_time / (elapsed_time*multiprocessing.cpu_count()) self.last_timestamp = now self.times = new_times yield ('Custom/CPU/User Time', user_time) yield ('Custom/CPU/User/Utilization', utilization) newrelic.agent.register_data_source(CPUMetricsDataSource) Copy Life cycle of a data source Although a data source could produce metrics at any time, the agent itself isn't always reporting metrics for an application. Specifically, it will only start collecting metrics and report them once the agent has managed to register itself with the data collector for a specific application. This distinction is important for data sources which generate metrics based on a time period. It would be required to only have metrics produced by a data source to cover the period back to the point at which registration occurred, or back to the last time that metrics were reported by the agent. If this isn't done, the reported metrics will not align and so it will not be possible to ensure that they correlate properly with metrics from tracking of web transactions or background tasks. For this reason, the factory for a data source will only be called to create an instance of the data source when registration for the application has completed and metrics collection started. This ensures that any reference timestamp will be correct. If the agent run for a particular application is terminated, due to a server side forced restart resulting from server side configuration changes, or because of successive failures to report data to the data collector, then the data source will be dropped. A new instance of the data source will then be created when the agent has been able to reregister itself again for the application. The correct cleanup of a data source in this case will depend on prompt destruction of the data source object when it is dropped. Because of object reference count cycles, this cannot be relied upon. It is also desirable to avoid a data source needing to add a __del__() method in order to trigger cleanup actions because of the problems that a __del__() method introduces in the way of actually preventing prompt destruction of the object. For this reason, if a data source needs more control over setup and shutdown, including perhaps being able to stay persistent in memory and not be dropped, yet suspend calculations for metrics, then it can provide start() and stop() methods when being implemented as a class instance. import os import time import multiprocessing @newrelic.agent.data_source_factory(name='CPU Usage') class CPUMetricsDataSource(object): def __init__(self, settings, environ): self.last_timestamp = None self.times = None def start(self): self.last_timestamp = time.time() self.times = os.times() def stop(self): self.last_timestamp = None self.times = None def __call__(self): if self.times is None: return now = time.time() new_times = os.times() elapsed_time = now - self.last_timestamp user_time = new_times[0] - self.times[0] utilization = user_time / (elapsed_time*multiprocessing.cpu_count()) self.last_timestamp = now self.times = new_times yield ('CPU/User Time', user_time) yield ('CPU/User/Utilization', utilization) newrelic.agent.register_data_source(CPUMetricsDataSource) Copy With the start() and stop() methods defined, the instance of the data source will not be destroyed at the termination of the agent run but kept around. The agent at this point is then expecting that the data source will itself deal with the suspension of any aggregation of metrics, dropping any accumulated metrics and ensure that when the agent reregisters the application with the data collector and calls start() again, only then would tracking for metrics be resumed. Configuring a data source Data sources may not always be bound to one specific information source. It may be necessary to register a data source against different underlying information sources from which metrics are generated. In this case distinct settings can be passed when registering a data source using the register_data_source() function. When using a data factory, these settings will then be available when the data source is being initialized. @newrelic.agent.data_source_factory() class HostMonitorDataSource(object): def __init__(self, settings, environ): self.hostname = settings['hostname'] def __call__(self): ... newrelic.agent.register_data_source(HostMonitorDataSource, name='Host Monitor (host-1)', settings=dict(hostname='host-1')) newrelic.agent.register_data_source(HostMonitorDataSource, name='Host Monitor (host-2)', settings=dict(hostname='host-2')) Copy If provision of settings is optional, the data source should only attempt to access settings if the settings option is not None. Even if supplied a dictionary, it should also cope with missing settings in the dictionary. Setup from configuration file Although the examples here showed the use of the register_data_source() API call, this would not be the normal way by which data sources would be registered. This is not the preferred way as it would require modifications to the application to import the module for the data source and register it. Instead, the primary way for defining and integrating data sources into an existing monitored web application would be to list them in the agent configuration file. This entails adding an additional section in the agent configuration file for each data source with prefix data-source:: [data-source:process-info] enabled = true function = samplers.process_info:process_info_data_source Copy If registering a data source from the agent configuration file, there should be no separate registration for the same data source being performed using the register_data_source() function occuring in your application code or in the module defining the data source. If there is, then two instances of the data source would end up being registerd. If needing to provide specific settings for a data source, this can be done by creating a separate section in the agent configuration file and referring to the section name in the settings value in the data source configuration. [data-source:host-monitor] enabled = true function = samplers.process_info:process_info_data_source name = Host Monitor (host-1) settings = host-monitor:host-1 [host-monitor:host-1] hostname = host-1 Copy As data source settings supplied via the configuration file will always be passed as string values, it is recommended that even when using register_data_source() with application code to register a data source and provide settings explicitly, that strings be used for setting values. The data source should then deal with the conversion to a different type such as a numeric value or list of values.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 66.135, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "tags": "Supported features", + "body": " transaction, such as in a background thread (which isn't being tracked as a background task), then the call does nothing and the data is discarded. In order to be able to record custom metrics in such a situation, it is necessary to supply the application object corresponding to the application against" + }, + "id": "617dbf9f196a67adbbf7dfa4" + }, { "sections": [ "Get started with Incident Intelligence", @@ -58267,7 +58268,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 66.84017, + "_score": 66.132385, "_version": null, "_explanation": null, "sort": null, @@ -58276,52 +58277,6 @@ "body": ". Copy the URL, and paste it into your new Grafana webhook. PagerDuty EOL NOTICE As of October 2021, we've discontinued support for several capabilities with PagerDuty, including suggested responders, golden signals, and component enrichment. For more details, including how you can easily make" }, "id": "603ea62e64441f119f4e883f" - }, - { - "sections": [ - "Python custom metrics", - "Important", - "Charting custom metrics", - "Push versus pull interfaces", - "Recording a single metric", - "Recording multiple metrics", - "Naming of custom metrics", - "Pre-aggregated metrics", - "Custom metric data sources", - "Registering a data source", - "Initialization of a data source", - "Instances of a data source", - "Life cycle of a data source", - "Configuring a data source", - "Setup from configuration file" - ], - "title": "Python custom metrics", - "type": "docs", - "tags": [ - "Supported features", - "Python agent", - "Agents" - ], - "external_id": "f9a53099356096d43e66677875a5d909a2fbf340", - "image": "", - "url": "https://docs.newrelic.com/docs/apm/agents/python-agent/supported-features/python-custom-metrics/", - "published_at": "2022-02-14T16:18:18Z", - "updated_at": "2021-10-30T21:56:47Z", - "document_type": "page", - "popularity": 1, - "body": "Custom metrics allow you to record arbitrary metrics using APIs provided by the Python agent. These may be used to record metrics related to the business functions implemented by your web application, or may be additional metrics used to evaluate the performance of the web application. Recommendation: To avoid potential data problems, keep the total number of unique metrics introduced by custom metrics under 2000. Important Before using custom metrics, you must get the agent initialized and integrated with the target process. For instructions, see Python agent integration. Charting custom metrics To view custom metrics, query your data to search metrics and create customizable charts. Push versus pull interfaces The Python agent provides two different ways of recording custom metrics. The first is a push-style API where you can decide when to record a custom metric. The second is a pull-style API where you register a custom metric data source, and the agent polls your code for metrics once per harvest cycle. The pull-style API is important where you need to generate rate or utilization metrics over the period of the harvest cycle. This is because you can properly calculate the duration of the harvest cycle and also ensure that only one metric is recorded for the harvest cycle. Recording a single metric To record a single custom metric, the Python agent provides the function: newrelic.agent.record_custom_metric(name, value, application=None) Copy When called without an application object as newrelic.agent.record_custom_metric('Custom/Value', value) Copy then it must be called within the context of a transaction that is being monitored by the agent. This is because the current transaction will be looked up and the custom metrics will initially be attached to that transaction. So long as the transaction is not subsequently marked to be ignored, the custom metrics will then be aggregated with other metrics for the application the transaction is being reported to, when the transaction completes. If this API function is called outside of the context of a monitored transaction, such as in a background thread (which isn't being tracked as a background task), then the call does nothing and the data is discarded. In order to be able to record custom metrics in such a situation, it is necessary to supply the application object corresponding to the application against which the custom metrics should be recorded. application = newrelic.agent.register_application() def report_custom_metrics(): while True: newrelic.agent.record_custom_metric('Custom/Value', value(), application) time.sleep(60.0) thread = threading.Thread(target=report_custom_metrics) thread.setDaemon(True) thread.start() Copy In the case of recording custom metrics against the current transaction (by not supplying an application object), no thread locking is required at the time of the API call, as the custom metrics will be attached to the transaction object initially. It is only when the whole transaction is being recorded at completion that a thread lock needs to be acquired. This is the same lock though as needs to be acquired to merge all metrics from the transaction with the metric table for the current harvest cycle. So, no additional locking is required on top of what is already required. Where the API call is being supplied the application object however, it is necessary to acquire a lock for each call to record a custom metric. Recording metrics one at a time in this way for a large number of metrics may therefore have undue effects due to thread lock contention. Recording multiple metrics If you are recording multiple metrics in one go, to reduce the need for thread locking you can instead use the function: newrelic.agent.record_custom_metrics(metrics, application=None) Copy This works the same way as the record_custom_metric() call except that an iterable can be provided in place of the name and value arguments. The iterable can be a list, tuple or other iterable object, including a generator function. The iterable must return a tuple consisting of the name and value for the custom metric. import psutil import os def memory_metrics(): pid = os.getpid() p = psutil.Process(os.getpid()) m = p.get_memory_info() yield ('Custom/Memory/Physical', float(m.rss)/(1024*1024)) yield ('Custom/Memory/Virtual', float(m.vms)/(1024*1024)) application = newrelic.agent.register_application() def report_custom_metrics(): while True: newrelic.agent.record_custom_metrics(memory_metrics(), application) time.sleep(60.0) thread = threading.Thread(target=report_custom_metrics) thread.setDaemon(True) thread.start() Copy When used with an application object, no matter how many custom metrics are being recorded, thread locking will only need to be performed once for each call. Naming of custom metrics All custom metrics reported by the Python agent should start with the prefix Custom/. This would typically be followed with a category name and label segment. If the Custom/ metric is not used, then the custom metrics may not be available for selection in the Data explorer. Pre-aggregated metrics When recording a set of metrics by passing an iterable over the set of available metrics, the same named metric may appear more than once. In this situation the agent would then aggregate the indvidual values into one sample. Although possible, if retaining and then later passing all the individual raw samples for a single metric in this way is not practical, then the source of the metrics can instead pre aggregate metrics and provide the resulting aggregrated data sample. Instead therefore of the value being a numerical value, a dictionary would be passed for the value. The fields within the dictionary would be: count total min max sum_of_squares An implementation of a helper class that you could use to perform aggregation for a single metric is: class Stats(dict): def __init__(self, count=0, total=0.0, min=0.0, max=0.0, sum_of_squares=0.0): self.count = count self.total = total self.min = min self.max = max self.sum_of_squares = sum_of_squares def __setattr__(self, name, value): self[name] = value def __getattr__(self, name): return self[name] def merge_stats(self, other): self.total += other.total self.min = self.count and min(self.min, other.min) or other.min self.max = max(self.max, other.max) self.sum_of_squares += other.sum_of_squares self.count += other.count def merge_value(self, value): self.total += value self.min = self.count and min(self.min, value) or value self.max = max(self.max, value) self.sum_of_squares += value ** 2 self.count += 1 Copy This class is itself a dictionary and so an instance of it can be passed directly as the value. This might then be used as: application = newrelic.agent.register_application() def sample_value(): return ... def report_custom_metrics(): count = 0 stats = Stats() while True: count += 1 stats.merge_value(sample_value()) if count % 60 == 0: newrelic.agent.record_custom_metric('Custom/Value', stats, application) stats = Stats() time.sleep(1.0) thread = threading.Thread(target=report_custom_metrics) thread.setDaemon(True) thread.start() Copy Custom metric data sources The record_custom_metric() and record_custom_metrics() API calls still require explicit action on your part to push custom metrics to the agent. Pushing data to the agent, especially if being done from a background thread and done on a 60 second interval, can be problematic though. This is because when the data is pushed it may not sync precisely with when the agent is reporting data back to the data collector. If a background thread was pre aggregating metrics over a 60 second period and then recording them, if that falls close to the time when the agent is reporting data, it could occur either just before or just after the agent reports the data. This lack of synchronization in time could therefore result in no metrics for that sample being reported in one harvest cycle and two in the next, where as the intent would be that there is one per harvest cycle. The solution to this is for the agent to pull custom metrics from the producer of the metrics as part of the process of reporting data to ensure they will be reported immediately and synchronised with the harvest cycle. The source of such metrics in this pull-style API is called a metric data source. Registering a data source The API function for registering a metric data source is: newrelic.agent.register_data_source(source, application=None, name=None, settings=None, **properties) Copy Because of varying requirements around how custom metrics may need to be produced, a number of different ways are available of implementing the data source. The simplest type of data source is one which is providing a gauge metric. That is one where some value at that particular point in time is relevant and what has happened historically doesn't matter. import psutil import os @newrelic.agent.data_source_generator(name='Memory Usage') def memory_metrics(): pid = os.getpid() p = psutil.Process(os.getpid()) m = p.get_memory_info() yield ('Custom/Memory/Physical', float(m.rss)/(1024*1024)) yield ('Custom/Memory/Virtual', float(m.vms)/(1024*1024)) newrelic.agent.register_data_source(memory_metrics) Copy The decorator used here is: newrelic.agent.data_source_generator(name=None, **properties) Copy It is specifically for wrapping a generator function, or a function which otherwise returns an iterable when called. The name when registering a data source is optional. It exists mainly so that when logging errors the message can give a more recognisable name for the data source. If name isn't passed to register_data_source(), then any name associated with the actual data source using the decorator will be used instead, or the name of the function if the data source itself is not named. If an application object is not provided when registering a data source, then the data source will be automatically associated with all applications for which data is being reported by the agent in that process. If an application is provided, the data source will only be associated with that specific application. Whether a data source is registered against an application explicitly or is applied to all applications, the agent needs to first be registered for that application. This would normally happen if using a data source in an existing web application process which was being monitored. If however you are using a data source in a standalone program to report only custom metrics, you still need to ensure that the API call register_application() is used if necessary to force the registration of the agent for an application before any data will be collected. Initialization of a data source Although the decorator provides the ability to name a data source, the more important reason for the decorator is that it hides the complexity of a sequence of setup steps to get a data source running. The sequence of these steps is: The data source is initialized, with a dictionary holding any configuration being passed to it to set it up to run in a particular way. Upon being initialized, the data source returns a dictionary of properties describing the data source. This includes a reference to a factory function for creating a specific instance of the data source provider. An instance of the data source provider is then created for a specific consumer (application) by calling the factory. The factory function is passed a dictionary describing the environment in which it is running, including the name of the consumer. Rewriting the above example so as to not rely on the decorator, we would have: import os import psutil def memory_metrics_data_source(settings): def memory_metrics(): pid = os.getpid() p = psutil.Process(os.getpid()) m = p.get_memory_info() yield ('Custom/Memory/Physical', float(m.rss)/(1024*1024)) yield ('Custom/Memory/Virtual', float(m.vms)/(1024*1024)) def memory_metrics_factory(environ): return memory_metrics properties = {} properties['name'] = 'Memory Usage' properties['factory'] = memory_metrics_factory return properties newrelic.agent.register_data_source(memory_metrics_data_source) Copy The purpose of the more complex underlying protocol is to provide sufficient hook points to properly initialize data sources and customise them based on that configuration and the specifics of the consumer. Instances of a data source Nothing more needed to be done in the prior example because gauge metrics, which don't care about the last time they were generated, were being returned. Where a metric reflects something happening over time, and therefore needs to retain some state, we need though an ability to be able to create an instance of the data source. The factory function therefore provides the ability for an instance of a data source to be created for each application against which metrics are being reported. There is allowance for one instance of the data source per application rather than one per process, because the start and end times for the harvest cycle for different applications may be different. If there was only one per process in this scenario and the metric had a connection to the duration of the harvest cycle, then the resulting metrics wouldn't be correct for each application. The ability is therefore provided for a data source instance to be application specific. Using nested functions as above, a data source which needs to maintain state could therefore be written as. import os import time import multiprocessing @newrelic.agent.data_source_factory(name='CPU Usage') def cpu_metrics_data_source(settings, environ): state = {} state['last_timestamp'] = time.time() state['times'] = os.times() def cpu_metrics(): now = time.time() new_times = os.times() elapsed_time = now - state['last_timestamp'] user_time = new_times[0] - state['times'][0] utilization = user_time / (elapsed_time*multiprocessing.cpu_count()) state['last_timestamp'] = now state['times'] = new_times yield ('Custom/CPU/User Time', user_time) yield ('Custom/CPU/User/Utilization', utilization) return cpu_metrics newrelic.agent.register_data_source(cpu_metrics_data_source) Copy The decorator used here is: newrelic.agent.data_source_factory(name=None, **properties) Copy For this case the decorator is wrapping a factory function. Because the decorator is automatically returning the properties for the data source when required, the factory takes both the settings and the description of the environ it is being used in. Using nested functions is a bit magic and requires the code to use a dictionary on the stack of the outer function to hold the state. The alternative is to implement the data source as an actual class with the decorator applied to the class. import os import time import multiprocessing @newrelic.agent.data_source_factory(name='CPU Usage') class CPUMetricsDataSource(object): def __init__(self, settings, environ): self.last_timestamp = time.time() self.times = os.times() def __call__(self): now = time.time() new_times = os.times() elapsed_time = now - self.last_timestamp user_time = new_times[0] - self.times[0] utilization = user_time / (elapsed_time*multiprocessing.cpu_count()) self.last_timestamp = now self.times = new_times yield ('Custom/CPU/User Time', user_time) yield ('Custom/CPU/User/Utilization', utilization) newrelic.agent.register_data_source(CPUMetricsDataSource) Copy Life cycle of a data source Although a data source could produce metrics at any time, the agent itself isn't always reporting metrics for an application. Specifically, it will only start collecting metrics and report them once the agent has managed to register itself with the data collector for a specific application. This distinction is important for data sources which generate metrics based on a time period. It would be required to only have metrics produced by a data source to cover the period back to the point at which registration occurred, or back to the last time that metrics were reported by the agent. If this isn't done, the reported metrics will not align and so it will not be possible to ensure that they correlate properly with metrics from tracking of web transactions or background tasks. For this reason, the factory for a data source will only be called to create an instance of the data source when registration for the application has completed and metrics collection started. This ensures that any reference timestamp will be correct. If the agent run for a particular application is terminated, due to a server side forced restart resulting from server side configuration changes, or because of successive failures to report data to the data collector, then the data source will be dropped. A new instance of the data source will then be created when the agent has been able to reregister itself again for the application. The correct cleanup of a data source in this case will depend on prompt destruction of the data source object when it is dropped. Because of object reference count cycles, this cannot be relied upon. It is also desirable to avoid a data source needing to add a __del__() method in order to trigger cleanup actions because of the problems that a __del__() method introduces in the way of actually preventing prompt destruction of the object. For this reason, if a data source needs more control over setup and shutdown, including perhaps being able to stay persistent in memory and not be dropped, yet suspend calculations for metrics, then it can provide start() and stop() methods when being implemented as a class instance. import os import time import multiprocessing @newrelic.agent.data_source_factory(name='CPU Usage') class CPUMetricsDataSource(object): def __init__(self, settings, environ): self.last_timestamp = None self.times = None def start(self): self.last_timestamp = time.time() self.times = os.times() def stop(self): self.last_timestamp = None self.times = None def __call__(self): if self.times is None: return now = time.time() new_times = os.times() elapsed_time = now - self.last_timestamp user_time = new_times[0] - self.times[0] utilization = user_time / (elapsed_time*multiprocessing.cpu_count()) self.last_timestamp = now self.times = new_times yield ('CPU/User Time', user_time) yield ('CPU/User/Utilization', utilization) newrelic.agent.register_data_source(CPUMetricsDataSource) Copy With the start() and stop() methods defined, the instance of the data source will not be destroyed at the termination of the agent run but kept around. The agent at this point is then expecting that the data source will itself deal with the suspension of any aggregation of metrics, dropping any accumulated metrics and ensure that when the agent reregisters the application with the data collector and calls start() again, only then would tracking for metrics be resumed. Configuring a data source Data sources may not always be bound to one specific information source. It may be necessary to register a data source against different underlying information sources from which metrics are generated. In this case distinct settings can be passed when registering a data source using the register_data_source() function. When using a data factory, these settings will then be available when the data source is being initialized. @newrelic.agent.data_source_factory() class HostMonitorDataSource(object): def __init__(self, settings, environ): self.hostname = settings['hostname'] def __call__(self): ... newrelic.agent.register_data_source(HostMonitorDataSource, name='Host Monitor (host-1)', settings=dict(hostname='host-1')) newrelic.agent.register_data_source(HostMonitorDataSource, name='Host Monitor (host-2)', settings=dict(hostname='host-2')) Copy If provision of settings is optional, the data source should only attempt to access settings if the settings option is not None. Even if supplied a dictionary, it should also cope with missing settings in the dictionary. Setup from configuration file Although the examples here showed the use of the register_data_source() API call, this would not be the normal way by which data sources would be registered. This is not the preferred way as it would require modifications to the application to import the module for the data source and register it. Instead, the primary way for defining and integrating data sources into an existing monitored web application would be to list them in the agent configuration file. This entails adding an additional section in the agent configuration file for each data source with prefix data-source:: [data-source:process-info] enabled = true function = samplers.process_info:process_info_data_source Copy If registering a data source from the agent configuration file, there should be no separate registration for the same data source being performed using the register_data_source() function occuring in your application code or in the module defining the data source. If there is, then two instances of the data source would end up being registerd. If needing to provide specific settings for a data source, this can be done by creating a separate section in the agent configuration file and referring to the section name in the settings value in the data source configuration. [data-source:host-monitor] enabled = true function = samplers.process_info:process_info_data_source name = Host Monitor (host-1) settings = host-monitor:host-1 [host-monitor:host-1] hostname = host-1 Copy As data source settings supplied via the configuration file will always be passed as string values, it is recommended that even when using register_data_source() with application code to register a data source and provide settings explicitly, that strings be used for setting values. The data source should then deal with the conversion to a different type such as a numeric value or list of values.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 65.99595, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "tags": "Supported features", - "body": " transaction, such as in a background thread (which isn't being tracked as a background task), then the call does nothing and the data is discarded. In order to be able to record custom metrics in such a situation, it is necessary to supply the application object corresponding to the application against" - }, - "id": "617dbf9f196a67adbbf7dfa4" } ], "/postresql/18ee5d2e-f5dd-4e0a-9616-af9deaa12cb2": [ @@ -58356,7 +58311,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 110.59566, + "_score": 102.699814, "_version": null, "_explanation": null, "sort": null, @@ -58398,7 +58353,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 109.75413, + "_score": 101.91943, "_version": null, "_explanation": null, "sort": null, @@ -58440,7 +58395,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 109.7467, + "_score": 101.913666, "_version": null, "_explanation": null, "sort": null, @@ -58482,7 +58437,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 105.93249, + "_score": 98.52904, "_version": null, "_explanation": null, "sort": null, @@ -58525,7 +58480,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 101.185425, + "_score": 95.344635, "_version": null, "_explanation": null, "sort": null, @@ -58537,54 +58492,6 @@ } ], "/kentik-firehose-pack/a781c760-fd25-458f-a170-dae8c722b2ea": [ - { - "sections": [ - "ktranslate Docker container health monitoring", - "Logs from ktranslate", - "Tip", - "Common log searches", - "What version of ktranslate am I running?", - "What arguments were passed to Docker at runtime?", - "What errors am I experiencing?", - "Is my match_attributes filter working on my device?", - "Metrics from ktranslate", - "Common metrics searches", - "What are the current versions of my ktranslate applications?", - "What is the health of my ktranslate application?", - "What is the health of my deliveries to New Relic One?", - "What is the health of my SNMP collection overall?", - "What devices are failing SNMP collection?", - "What is the health of my flow data collection?", - "What is the health of my syslog collection?" - ], - "title": "ktranslate Docker container health monitoring", - "type": "docs", - "tags": [ - "Network performance monitoring", - "Integrations" - ], - "external_id": "a6a4a6194c420409a30f75d437e27998235b84bc", - "image": "", - "url": "https://docs.newrelic.com/docs/network-performance-monitoring/advanced/ktranslate-container-health/", - "published_at": "2022-02-14T02:48:38Z", - "updated_at": "2022-02-14T02:48:38Z", - "document_type": "page", - "popularity": 1, - "body": "While running the ktranslate Docker container for New Relic network performance monitoring, you can monitor the health of the container to proactively detect potential issues. The ktranslate container image has the -tee_logs=true and -metrics=jchf settings available during runtime, which allow it to send health metrics into New Relic One directly. These are enabled by default when installing network performance monitoring via the New Relic One guided install. We recommend you to set them up when installing network performance monitoring manually. Logs from ktranslate Tip If you want to check the logs locally from the Docker host, run docker logs $CONTAINER_NAME. For example, docker logs ktranslate-snmp. The -tee_logs=true option sends logs to New Relic One when polling devices. To see them, do the following: Go to one.newrelic.com > Logs. In Find logs where, enter collector.name:\"ktranslate\" and click Query logs. Common log searches Below are some common searches that can be used during troubleshooting to gather data for support: What version of ktranslate am I running? Logs UI: bash Copy $ collector.name:\"ktranslate\" message:\"*KTranslate Running -- Version*\" NRQL: FROM Log SELECT * WHERE `collector.name` = 'ktranslate' AND `message` LIKE '%KTranslate Running -- Version%' Copy Expected Results: bash Copy $ KTranslate Running -- Version kt-2021-12-06-1546870234; Build Mon Dec 6 22:22:56 UTC 2021 What arguments were passed to Docker at runtime? Logs UI: bash Copy $ collector.name:\"ktranslate\" message:\"*KTranslate CLI:*\" NRQL: FROM Log SELECT * WHERE `collector.name` = 'ktranslate' AND `message` LIKE '%KTranslate CLI:%' Copy Expected Results: bash Copy $ KTranslate CLI: [ktranslate -listen off -mapping /etc/ktranslate/config.json -geo /etc/ktranslate/GeoLite2-Country.mmdb -udrs /etc/ktranslate/udr.csv -api_devices /etc/ktranslate/devices.json -asn /etc/ktranslate/GeoLite2-ASN.mmdb -log_level info -snmp /snmp-base.yaml -nr_account_id=2583772 -log_level=info -metrics=jchf -tee_logs=true -service_name=snmp nr1.snmp] What errors am I experiencing? Without a parsing rule applied to your logs Logs UI: bash Copy $ collector.name:\"ktranslate\" message:-*\\[Info\\]* NRQL: FROM Log SELECT * WHERE `collector.name` = 'ktranslate' AND `message` NOT LIKE '%[Info]%' Copy With a parsing rule applied to your logs Logs UI: bash Copy $ collector.name:\"ktranslate\" severity:-\"Info\" NRQL: FROM Log SELECT * WHERE `collector.name` = 'ktranslate' AND `severity` != 'Info' Copy Expected Results: bash Copy $ KTranslate>cisco-7513 There was an SNMP polling error with the CustomDeviceMetrics walking OID .1.3.6.1.2.1.4.31.1.1.21 after 0 retries: request timeout (after 0 retries). Is my match_attributes filter working on my device? Logs UI: bash Copy $ collector.name:\"ktranslate\" message:\"*Match Attribute*\" NRQL: FROM Log SELECT * WHERE `collector.name` = 'ktranslate' AND `message` LIKE '%Match Attribute%' Copy Expected Results: bash Copy $ KTranslate>cisco-7513 Added 1 Match Attribute(s) All devices are expected to have at least 1 Match Attribute inherited from the default monitor_admin_shut: true configuration. You should expect a value of 2 to be shown for a device that you have added a single match attribute to. Tip You can further filter these results by adding the device name to your query: collector.name:\"ktranslate\" message:\"*$DEVICE_NAME*Match Attribute*\". Metrics from ktranslate The -metrics option captures the following performance metrics when polling devices: Metric Granularity Description baseserver_healthcheck_execution_total Top Level Rate of internal health checks. Shows mostly that things are not deadlocked and should always be greater than 0. inputq Top Level Messages per second (msg/sec) received over the last 60 seconds from all SNMP, Flow, and VPC inputs combined. jchfq Top Level Gauge rate with number of available pre-allocated buffers. It should be about 8,000. delivery_metrics_nr Delivery to New Relic One Batches per second (batches/sec) sent over the last 60 seconds for all metrics to New Relic One. delivery_logs_nr Delivery to New Relic One Logs per second (logs/sec) sent over the last 60 seconds for all logs to New Relic One. delivery_wins_nr Delivery to New Relic One Wins per second (wins/sec) of 200 HTTP codes received over the last 60 seconds from sending metrics and events to New Relic One. device_metrics SNMP Polls per second (polls/sec) of SNMP polling over the last 60 seconds for device level metrics. interface_metrics SNMP Polls per second (polls/sec) of SNMP polling over the last 60 seconds for interface level metrics. snmp_fail SNMP Gauge to monitor if SNMP polling is working faceted by device_name. Where 1 means good and 2 means fail. netflow.flows Netflow Flows per second (fps) received over the last 60 seconds for all device flow data: IPFIX, NetFlow, or sFlow. syslog_queue Syslog Gauge of syslog messages waiting to be processed. syslog_errors Syslog Errors per second (errors/sec) over the last 60 seconds while processing syslog messages. syslog_messages Syslog Messages per second (msg/sec) received over the last 60 seconds for all syslog data. Common metrics searches To see these metrics in New Relic One: Go to one.newrelic.com and click Query your data. Enter one of the following NRQL queries: What are the current versions of my ktranslate applications? FROM Metric SELECT latest(ver) AS 'image_version' FACET host AS 'docker_host', svc AS 'container_service' WHERE provider = 'kentik-agent' AND instrumentation.name = 'heartbeat' Copy What is the health of my ktranslate application? FROM Metric SELECT latest(kentik.ktranslate.chf.kkc.baseserver_healthcheck_execution_total) AS 'healthcheck_total', latest(kentik.ktranslate.chf.kkc.inputq) AS 'input_per_second', latest(kentik.ktranslate.chf.kkc.jchfq) AS 'buffer' FACET host AS 'docker_host', svc AS 'container_service' WHERE provider = 'kentik-agent' AND instrumentation.name = 'heartbeat' Copy What is the health of my deliveries to New Relic One? FROM Metric SELECT latest(kentik.ktranslate.chf.kkc.delivery_metrics_nr) AS 'delivery_metric_batches_per_second', latest(kentik.ktranslate.chf.kkc.delivery_logs_nr) AS 'delivery_logs_per_second', latest(kentik.ktranslate.chf.kkc.delivery_wins_nr) AS 'delivery_wins_per_second' FACET host AS 'docker_host', svc AS 'container_service' WHERE provider = 'kentik-agent' AND instrumentation.name = 'heartbeat' Copy What is the health of my SNMP collection overall? FROM Metric SELECT latest(kentik.ktranslate.chf.kkc.device_metrics) AS 'device_polls_per_second', latest(kentik.ktranslate.chf.kkc.interface_metrics) AS 'interface_polls_per_second' FACET host AS 'docker_host', svc AS 'container_service' WHERE provider = 'kentik-agent' AND instrumentation.name = 'heartbeat' Copy What devices are failing SNMP collection? SELECT max(snmp_fail) FROM ( FROM Metric SELECT latest(kentik.ktranslate.chf.kkc.snmp_fail) AS 'snmp_fail' FACET host AS 'docker_host', svc AS 'container_service', device_name AS 'snmp_device' WHERE provider = 'kentik-agent' AND instrumentation.name = 'heartbeat' ) FACET docker_host, container_service, snmp_device WHERE snmp_fail = 2 Copy What is the health of my flow data collection? FROM Metric SELECT max(kentik.ktranslate.chf.kkc.netflow) AS 'flows_per_second' FACET host AS 'docker_host', svc AS 'container_service', device_name AS 'flow_device' WHERE provider = 'kentik-agent' AND instrumentation.name = 'heartbeat' Copy What is the health of my syslog collection? FROM Metric SELECT latest(kentik.ktranslate.chf.kkc.syslog_queue) AS 'syslog_queue_total', latest(kentik.ktranslate.chf.kkc.syslog_errors) AS 'syslog_errors_per_second', latest(kentik.ktranslate.chf.kkc.syslog_messages) AS 'syslog_messages_per_second' FACET host AS 'docker_host', svc AS 'container_service' WHERE provider = 'kentik-agent' AND instrumentation.name = 'heartbeat' Copy", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 906.2646, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "sections": "Common log searches", - "tags": "Network performance monitoring", - "body": " seconds for all device flow data: IPFIX, NetFlow, or sFlow. syslog_queue Syslog Gauge of syslog messages waiting to be processed. syslog_errors Syslog Errors per second (errors/sec) over the last 60 seconds while processing syslog messages. syslog_messages Syslog Messages per second (msg/sec) received" - }, - "id": "61b9389528ccbcb4d396ee5e" - }, { "sections": [ "Set up network flow data monitoring", @@ -58621,7 +58528,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 876.329, + "_score": 871.927, "_version": null, "_explanation": null, "sort": null, @@ -58633,6 +58540,54 @@ }, "id": "612724e128ccbc4ac9f2612a" }, + { + "sections": [ + "ktranslate Docker container health monitoring", + "Logs from ktranslate", + "Tip", + "Common log searches", + "What version of ktranslate am I running?", + "What arguments were passed to Docker at runtime?", + "What errors am I experiencing?", + "Is my match_attributes filter working on my device?", + "Metrics from ktranslate", + "Common metrics searches", + "What are the current versions of my ktranslate applications?", + "What is the health of my ktranslate application?", + "What is the health of my deliveries to New Relic One?", + "What is the health of my SNMP collection overall?", + "What devices are failing SNMP collection?", + "What is the health of my flow data collection?", + "What is the health of my syslog collection?" + ], + "title": "ktranslate Docker container health monitoring", + "type": "docs", + "tags": [ + "Network performance monitoring", + "Integrations" + ], + "external_id": "a6a4a6194c420409a30f75d437e27998235b84bc", + "image": "", + "url": "https://docs.newrelic.com/docs/network-performance-monitoring/advanced/ktranslate-container-health/", + "published_at": "2022-02-14T02:48:38Z", + "updated_at": "2022-02-14T02:48:38Z", + "document_type": "page", + "popularity": 1, + "body": "While running the ktranslate Docker container for New Relic network performance monitoring, you can monitor the health of the container to proactively detect potential issues. The ktranslate container image has the -tee_logs=true and -metrics=jchf settings available during runtime, which allow it to send health metrics into New Relic One directly. These are enabled by default when installing network performance monitoring via the New Relic One guided install. We recommend you to set them up when installing network performance monitoring manually. Logs from ktranslate Tip If you want to check the logs locally from the Docker host, run docker logs $CONTAINER_NAME. For example, docker logs ktranslate-snmp. The -tee_logs=true option sends logs to New Relic One when polling devices. To see them, do the following: Go to one.newrelic.com > Logs. In Find logs where, enter collector.name:\"ktranslate\" and click Query logs. Common log searches Below are some common searches that can be used during troubleshooting to gather data for support: What version of ktranslate am I running? Logs UI: bash Copy $ collector.name:\"ktranslate\" message:\"*KTranslate Running -- Version*\" NRQL: FROM Log SELECT * WHERE `collector.name` = 'ktranslate' AND `message` LIKE '%KTranslate Running -- Version%' Copy Expected Results: bash Copy $ KTranslate Running -- Version kt-2021-12-06-1546870234; Build Mon Dec 6 22:22:56 UTC 2021 What arguments were passed to Docker at runtime? Logs UI: bash Copy $ collector.name:\"ktranslate\" message:\"*KTranslate CLI:*\" NRQL: FROM Log SELECT * WHERE `collector.name` = 'ktranslate' AND `message` LIKE '%KTranslate CLI:%' Copy Expected Results: bash Copy $ KTranslate CLI: [ktranslate -listen off -mapping /etc/ktranslate/config.json -geo /etc/ktranslate/GeoLite2-Country.mmdb -udrs /etc/ktranslate/udr.csv -api_devices /etc/ktranslate/devices.json -asn /etc/ktranslate/GeoLite2-ASN.mmdb -log_level info -snmp /snmp-base.yaml -nr_account_id=2583772 -log_level=info -metrics=jchf -tee_logs=true -service_name=snmp nr1.snmp] What errors am I experiencing? Without a parsing rule applied to your logs Logs UI: bash Copy $ collector.name:\"ktranslate\" message:-*\\[Info\\]* NRQL: FROM Log SELECT * WHERE `collector.name` = 'ktranslate' AND `message` NOT LIKE '%[Info]%' Copy With a parsing rule applied to your logs Logs UI: bash Copy $ collector.name:\"ktranslate\" severity:-\"Info\" NRQL: FROM Log SELECT * WHERE `collector.name` = 'ktranslate' AND `severity` != 'Info' Copy Expected Results: bash Copy $ KTranslate>cisco-7513 There was an SNMP polling error with the CustomDeviceMetrics walking OID .1.3.6.1.2.1.4.31.1.1.21 after 0 retries: request timeout (after 0 retries). Is my match_attributes filter working on my device? Logs UI: bash Copy $ collector.name:\"ktranslate\" message:\"*Match Attribute*\" NRQL: FROM Log SELECT * WHERE `collector.name` = 'ktranslate' AND `message` LIKE '%Match Attribute%' Copy Expected Results: bash Copy $ KTranslate>cisco-7513 Added 1 Match Attribute(s) All devices are expected to have at least 1 Match Attribute inherited from the default monitor_admin_shut: true configuration. You should expect a value of 2 to be shown for a device that you have added a single match attribute to. Tip You can further filter these results by adding the device name to your query: collector.name:\"ktranslate\" message:\"*$DEVICE_NAME*Match Attribute*\". Metrics from ktranslate The -metrics option captures the following performance metrics when polling devices: Metric Granularity Description baseserver_healthcheck_execution_total Top Level Rate of internal health checks. Shows mostly that things are not deadlocked and should always be greater than 0. inputq Top Level Messages per second (msg/sec) received over the last 60 seconds from all SNMP, Flow, and VPC inputs combined. jchfq Top Level Gauge rate with number of available pre-allocated buffers. It should be about 8,000. delivery_metrics_nr Delivery to New Relic One Batches per second (batches/sec) sent over the last 60 seconds for all metrics to New Relic One. delivery_logs_nr Delivery to New Relic One Logs per second (logs/sec) sent over the last 60 seconds for all logs to New Relic One. delivery_wins_nr Delivery to New Relic One Wins per second (wins/sec) of 200 HTTP codes received over the last 60 seconds from sending metrics and events to New Relic One. device_metrics SNMP Polls per second (polls/sec) of SNMP polling over the last 60 seconds for device level metrics. interface_metrics SNMP Polls per second (polls/sec) of SNMP polling over the last 60 seconds for interface level metrics. snmp_fail SNMP Gauge to monitor if SNMP polling is working faceted by device_name. Where 1 means good and 2 means fail. netflow.flows Netflow Flows per second (fps) received over the last 60 seconds for all device flow data: IPFIX, NetFlow, or sFlow. syslog_queue Syslog Gauge of syslog messages waiting to be processed. syslog_errors Syslog Errors per second (errors/sec) over the last 60 seconds while processing syslog messages. syslog_messages Syslog Messages per second (msg/sec) received over the last 60 seconds for all syslog data. Common metrics searches To see these metrics in New Relic One: Go to one.newrelic.com and click Query your data. Enter one of the following NRQL queries: What are the current versions of my ktranslate applications? FROM Metric SELECT latest(ver) AS 'image_version' FACET host AS 'docker_host', svc AS 'container_service' WHERE provider = 'kentik-agent' AND instrumentation.name = 'heartbeat' Copy What is the health of my ktranslate application? FROM Metric SELECT latest(kentik.ktranslate.chf.kkc.baseserver_healthcheck_execution_total) AS 'healthcheck_total', latest(kentik.ktranslate.chf.kkc.inputq) AS 'input_per_second', latest(kentik.ktranslate.chf.kkc.jchfq) AS 'buffer' FACET host AS 'docker_host', svc AS 'container_service' WHERE provider = 'kentik-agent' AND instrumentation.name = 'heartbeat' Copy What is the health of my deliveries to New Relic One? FROM Metric SELECT latest(kentik.ktranslate.chf.kkc.delivery_metrics_nr) AS 'delivery_metric_batches_per_second', latest(kentik.ktranslate.chf.kkc.delivery_logs_nr) AS 'delivery_logs_per_second', latest(kentik.ktranslate.chf.kkc.delivery_wins_nr) AS 'delivery_wins_per_second' FACET host AS 'docker_host', svc AS 'container_service' WHERE provider = 'kentik-agent' AND instrumentation.name = 'heartbeat' Copy What is the health of my SNMP collection overall? FROM Metric SELECT latest(kentik.ktranslate.chf.kkc.device_metrics) AS 'device_polls_per_second', latest(kentik.ktranslate.chf.kkc.interface_metrics) AS 'interface_polls_per_second' FACET host AS 'docker_host', svc AS 'container_service' WHERE provider = 'kentik-agent' AND instrumentation.name = 'heartbeat' Copy What devices are failing SNMP collection? SELECT max(snmp_fail) FROM ( FROM Metric SELECT latest(kentik.ktranslate.chf.kkc.snmp_fail) AS 'snmp_fail' FACET host AS 'docker_host', svc AS 'container_service', device_name AS 'snmp_device' WHERE provider = 'kentik-agent' AND instrumentation.name = 'heartbeat' ) FACET docker_host, container_service, snmp_device WHERE snmp_fail = 2 Copy What is the health of my flow data collection? FROM Metric SELECT max(kentik.ktranslate.chf.kkc.netflow) AS 'flows_per_second' FACET host AS 'docker_host', svc AS 'container_service', device_name AS 'flow_device' WHERE provider = 'kentik-agent' AND instrumentation.name = 'heartbeat' Copy What is the health of my syslog collection? FROM Metric SELECT latest(kentik.ktranslate.chf.kkc.syslog_queue) AS 'syslog_queue_total', latest(kentik.ktranslate.chf.kkc.syslog_errors) AS 'syslog_errors_per_second', latest(kentik.ktranslate.chf.kkc.syslog_messages) AS 'syslog_messages_per_second' FACET host AS 'docker_host', svc AS 'container_service' WHERE provider = 'kentik-agent' AND instrumentation.name = 'heartbeat' Copy", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 858.88196, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "sections": "Common log searches", + "tags": "Network performance monitoring", + "body": " seconds for all device flow data: IPFIX, NetFlow, or sFlow. syslog_queue Syslog Gauge of syslog messages waiting to be processed. syslog_errors Syslog Errors per second (errors/sec) over the last 60 seconds while processing syslog messages. syslog_messages Syslog Messages per second (msg/sec) received" + }, + "id": "61b9389528ccbcb4d396ee5e" + }, { "sections": [ "Managing the 'ktranslate' Docker container", @@ -58661,7 +58616,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 418.2242, + "_score": 417.47586, "_version": null, "_explanation": null, "sort": null, @@ -58713,7 +58668,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 402.77734, + "_score": 381.71677, "_version": null, "_explanation": null, "sort": null, @@ -58762,7 +58717,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 240.59602, + "_score": 239.04964, "_version": null, "_explanation": null, "sort": null, @@ -58822,7 +58777,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 263.91895, + "_score": 261.83475, "_version": null, "_explanation": null, "sort": null, @@ -58869,7 +58824,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 66.9105, + "_score": 66.87514, "_version": null, "_explanation": null, "sort": null, @@ -58914,7 +58869,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 65.50365, + "_score": 64.939476, "_version": null, "_explanation": null, "sort": null, @@ -58980,7 +58935,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 49.778797, + "_score": 49.690002, "_version": null, "_explanation": null, "sort": null, @@ -59030,7 +58985,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 482.37805, + "_score": 456.6502, "_version": null, "_explanation": null, "sort": null, @@ -59084,7 +59039,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 359.55383, + "_score": 340.71954, "_version": null, "_explanation": null, "sort": null, @@ -59133,7 +59088,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 302.31683, + "_score": 300.78708, "_version": null, "_explanation": null, "sort": null, @@ -59177,7 +59132,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 299.10724, + "_score": 297.58392, "_version": null, "_explanation": null, "sort": null, @@ -59225,7 +59180,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 295.4348, + "_score": 293.94257, "_version": null, "_explanation": null, "sort": null, @@ -59264,7 +59219,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 367.2366, + "_score": 363.34937, "_version": null, "_explanation": null, "sort": null, @@ -59302,7 +59257,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 292.1078, + "_score": 287.9966, "_version": null, "_explanation": null, "sort": null, @@ -59337,7 +59292,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 279.46887, + "_score": 275.3806, "_version": null, "_explanation": null, "sort": null, @@ -59376,7 +59331,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 273.3993, + "_score": 270.6005, "_version": null, "_explanation": null, "sort": null, @@ -59417,7 +59372,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 251.47577, + "_score": 248.57774, "_version": null, "_explanation": null, "sort": null, @@ -59456,7 +59411,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 337.97803, + "_score": 334.5535, "_version": null, "_explanation": null, "sort": null, @@ -59492,7 +59447,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 246.11324, + "_score": 243.88467, "_version": null, "_explanation": null, "sort": null, @@ -59530,7 +59485,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 226.50583, + "_score": 223.72122, "_version": null, "_explanation": null, "sort": null, @@ -59565,7 +59520,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 190.09192, + "_score": 187.7861, "_version": null, "_explanation": null, "sort": null, @@ -59606,7 +59561,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 174.43962, + "_score": 172.83025, "_version": null, "_explanation": null, "sort": null, @@ -59651,7 +59606,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99103, + "_score": 95.215935, "_version": null, "_explanation": null, "sort": null, @@ -59693,7 +59648,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99097, + "_score": 95.21587, "_version": null, "_explanation": null, "sort": null, @@ -59735,7 +59690,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99097, + "_score": 95.21587, "_version": null, "_explanation": null, "sort": null, @@ -59777,7 +59732,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99081, + "_score": 95.21571, "_version": null, "_explanation": null, "sort": null, @@ -59816,7 +59771,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 93.49031, + "_score": 93.73342, "_version": null, "_explanation": null, "sort": null, @@ -59855,7 +59810,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 194.57608, + "_score": 193.384, "_version": null, "_explanation": null, "sort": null, @@ -59920,7 +59875,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 118.09776, + "_score": 111.34482, "_version": null, "_explanation": null, "sort": null, @@ -59949,7 +59904,7 @@ "external_id": "1f13326e09d6da78f08f645bc069c22342fbac6c", "image": "", "url": "https://docs.newrelic.com/docs/infrastructure/install-infrastructure-agent/config-management-tools/configure-infrastructure-agent-using-ansible/", - "published_at": "2022-02-14T09:31:37Z", + "published_at": "2022-02-16T01:42:02Z", "updated_at": "2022-02-04T11:22:48Z", "document_type": "page", "popularity": 1, @@ -59957,7 +59912,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 109.761856, + "_score": 107.752815, "_version": null, "_explanation": null, "sort": null, @@ -59996,7 +59951,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 106.56532, + "_score": 99.14235, "_version": null, "_explanation": null, "sort": null, @@ -60008,45 +59963,38 @@ }, { "sections": [ - "Amazon RDS Enhanced Monitoring integration", - "Important", - "Features", - "Enable enhanced monitoring", - "Stream logs to Lambda function", - "Configuration and polling", - "Find and use data", - "Metric data", - "Metric data for all DB engines (except MS SQL Server) [#DB metrics]", - "Metric data for MS SQL", - "Definitions" + "OS versions page", + "Viewing the OS versions page", + "Viewing drill-down details" ], - "title": "Amazon RDS Enhanced Monitoring integration", + "title": "OS versions page", "type": "docs", "tags": [ - "AWS integrations list", - "Amazon integrations", - "Integrations" + "Mobile app pages", + "Mobile monitoring UI", + "Mobile monitoring" ], - "external_id": "b8fc293ef2181c19a5e816449b9a320e44e13ab3", - "image": "", - "url": "https://docs.newrelic.com/docs/infrastructure/amazon-integrations/aws-integrations-list/aws-rds-enhanced-monitoring-integration/", - "published_at": "2022-02-15T20:48:24Z", - "updated_at": "2022-02-15T20:48:24Z", + "external_id": "370b6f1584d001a17f414066097692b9189e1a50", + "image": "https://docs.newrelic.com/static/8d84abf966c2f4b75ca298b362995c0e/c1b63/os-version-pic_0.png", + "url": "https://docs.newrelic.com/docs/mobile-monitoring/mobile-monitoring-ui/mobile-app-pages/os-versions-page/", + "published_at": "2022-02-14T10:34:40Z", + "updated_at": "2021-07-09T11:46:41Z", "document_type": "page", "popularity": 1, - "body": "Important Enable the AWS CloudWatch Metric Streams integration to monitor all CloudWatch metrics from your AWS services, including custom namespaces. Individual integrations are no longer our recommended option. New Relic infrastructure integrations include an integration for collecting enhanced Amazon RDS data; this supplements the basic New Relic RDS integration with real-time metrics about the operating system the database instance runs on. Features This New Relic integration allows you to monitor and alert on RDS Enhanced Monitoring. You can use integration data and alerts to monitor the DB processes and identify potential trouble spots as well as to profile the DB allowing you to improve and optimize their response and cost. Enable enhanced monitoring Important Enabling this integration will incur some additional charges to your Amazon CloudWatch account. In addition there are some limitations and CPU metric data collection differences that are explained in Amazon's enhanced monitoring documentation. You must first have the New Relic Amazon RDS monitoring integration enabled before enabling RDS Enhanced Monitoring. Be sure that you have completed the steps in Connect AWS services to New Relic. New Relic uses AWS Lambda in order to collect RDS logs and provide near-real time data of your RDS instances, this capability is called RDS Enhanced Monitoring in AWS. Follow these steps in order to enable RDS Enhanced Monitoring integration: Specify the RDS instances that need Enable RDS Enhanced Monitoring. You can specify this when creating or modifying the instance: under Monitoring, set Enable Enhanced Monitoring to Yes. We recommend setting the data Granularity to 15 seconds. After enhanced monitoring is enabled, a stream called RDSOSMetrics is created in AWS CloudWatch Logs. Enhanced monitoring metrics are available via this stream. Create a lambda function and subscribe it to that stream in the following steps to obtain the data. Create a new AWS Lambda function from the Serverless Repository: Go to Lambda > Create Function > Browse serverless App repository, check the box for Show apps that create custom IAM roles or resource policies, and search for NewRelic-log-ingestion. Populate the LICENSE_KEY environment variable with your New Relic account license key. Select Deploy to create a new CloudFormation stack, a new function called newrelic-log-ingestion, and the required role. Make sure that the NewRelic-log-ingestion function execution role has attached the arn:aws:iam::aws:policy/CloudWatchLogsReadOnlyAccess policy, giving it the appropriate permissions to read CloudWatch Logs. Go to the newrelic-log-ingestion function. Continue with the procedure to stream logs to the Lambda function. Stream logs to Lambda function To link the RDSOSMetrics log stream to the Lambda function (JSON format): From AWS Console > CloudWatch > Logs, select RDSOSMetrics log group, and apply Actions > Create Lambda subscription filter. For the Lambda function, select newrelic-log-ingestion. From the Log Format dropdown, select JSON as the Log format. At the bottom, click the Start streaming button to save your Lambda subscription filter. Once completed, the Lambda function will send all the log lines from RDSOSMetrics to New Relic's ingest services. Configuration and polling You can change the polling frequency and filter data using configuration options. Default polling information for the Amazon RDS Enhanced Monitoring integration: New Relic polling interval: 30 seconds on average (collected via CloudWatch Logs) Configurable when setting up AWS Lambda Amazon CloudWatch data interval: 1 minute Find and use data To find your integration data, go to one.newrelic.com > Infrastructure > AWS and select the RDS > Enhanced monitoring dashboard link. You can query and explore your data using the DatastoreSample event type, with a provider value of RdsDbInstance . For more on how to use your data, see Understand and use integration data. Metric data New Relic collects the following enhanced RDS data: Metric data for all DB engines (except MS SQL Server) [#DB metrics] Group Metrics Description General engine The database engine for the DB instance. instanceId The DB instance identifier. instanceResourceId A region-unique, immutable identifier for the DB instance, also used as the log stream identifier. numVCpus The number of virtual CPUs for the DB instance. timestamp The time at which the metrics were taken. uptime The amount of time that the DB instance has been active. version The version of the OS metrics' stream JSON format. cpuUtilization guest The percentage of CPU in use by guest programs. idle The percentage of CPU that is idle. irq The percentage of CPU in use by software interrupts. nice The percentage of CPU in use by programs running at lowest priority. steal The percentage of CPU in use by other virtual machines. system The percentage of CPU in use by the kernel. total The total percentage of the CPU in use. This value excludes the nice value. user The percentage of CPU in use by user programs. wait The percentage of CPU unused while waiting for I/O access. diskIO (not available for Amazon Aurora) avgQueueLen The number of requests waiting in the I/O device's queue. avgReqSz The average request size, in kilobytes. await The number of milliseconds required to respond to requests, including queue time and service time. device The identifier of the disk device in use. readIOsPS The number of read operations per second. readKb The total number of kilobytes read. readKbPS The number of kilobytes read per second. rrqmPS The number of merged read requests queued per second. tps The number of I/O transactions per second. util The percentage of CPU time during which requests were issued. writeIOsPS The number of write operations per second. writeKb The total number of kilobytes written. writeKbPS The number of kilobytes written per second. wrqmPS The number of merged write requests queued per second. fileSys maxFiles The maximum number of files that can be created for the file system. total The total number of disk space available for the file system, in kilobytes. used The amount of disk space used by files in the file system, in kilobytes. usedFilePercent The percentage of available files in use. usedFiles The number of files in the file system. usedPercent The percentage of the file-system disk space in use. loadAverageMinute fifteen The number of processes requesting CPU time over the last 15 minutes. five The number of processes requesting CPU time over the last 5 minutes. one The number of processes requesting CPU time over the last minute. memory active The amount of assigned memory, in kilobytes. buffers The amount of memory used for buffering I/O requests prior to writing to the storage device, in kilobytes. cached The amount of memory used for caching file system–based I/O. dirty The amount of memory pages in RAM that have been modified but not written to their related data block in storage, in kilobytes. free The amount of unassigned memory, in kilobytes. hugePagesFree The number of free huge pages. Huge pages are a feature of the Linux kernel. hugePagesRsvd The number of committed huge pages. hugePagesSize The size for each huge pages unit, in kilobytes. hugePagesSurp The number of available surplus huge pages over the total. hugePagesTotal The total number of huge pages for the system. inactive The amount of least-frequently used memory pages, in kilobytes. mapped The total amount of file-system contents that is memory mapped inside a process address space, in kilobytes. pageTables The amount of memory used by page tables, in kilobytes. slab The amount of reusable kernel data structures, in kilobytes. total The total amount of memory, in kilobytes. writeback The amount ofn kilobytes. network rx The number of bytes received per second. tx The number of bytes uploaded per second. process cpuUsedPc The percentage of CPU used by the process. rss The amount of RAM allocated to the process, in kilobytes. memoryUsedPc The amount of memory used by the process, in kilobytes. processName The name of the process. swap cached The amount of swap memory, in kilobytes, used as cache memory. free The total amount of swap memory free, in kilobytes. total The total amount of swap memory available, in kilobytes. tasks blocked The number of tasks that are blocked. running The number of tasks that are running. sleeping The number of tasks that are sleeping. stopped The number of tasks that are stopped. total The total number of tasks. zombie The number of child tasks that are inactive with an active parent task. Metric data for MS SQL Group Metrics Description disks totalKb The total space of the disk, in kilobytes. usedKb The amount of space used on the disk, in kilobytes. usedPc The percentage of space used on the disk. availKb The space available on the disk, in kilobytes. availPc The percentage of space available on the disk. rdCountPS The number of read operations per second rdBytesPS The number of bytes read per second. wrCountPS The number of write operations per second. wBytesPS The amount of bytes written per second. memory commitToKb The amount of pagefile-backed virtual address space in use, that is, the current commit charge. This value is composed of main memory (RAM) and disk (pagefiles). commitLimitKb The maximum possible value for the commitTotKb metric. This value is the sum of the current pagefile size plus the physical memory available for pageable contents–excluding RAM that is assigned to non-pageable areas. commitPeakKb The largest value of the commitTotKb metric since the operating system was last started. kernTotKb The sum of the memory in the paged and non-paged kernel pools, in kilobytes. kernPagedKb The amount of memory in the paged kernel pool, in kilobytes. kernNonpagedKb The amount of memory in the non-paged kernel pool, in kilobytes. pageSize The size of a page, in bytes. physTotKb The amount of physical memory, in kilobytes. physAvailKb The amount of available physical memory, in kilobytes. sqlServerTotKb The amount of memory committed to Microsoft SQL Server, in kilobytes. sysCacheKb The amount of system cache memory, in kilobytes. network rdBytesPS The number of bytes received per second. wrBytesPS The number of bytes sent per second. process cpuUsedPc The percentage of CPU used by the process. memUsedPc The amount of memory used by the process, in kilobytes. processName The name of the process. workingSetKb The amount of memory in the private working set plus the amount of memory that is in use by the process and can be shared with other processes, in kilobytes. workingSetPrivKb The amount of memory that is in use by a process, but can't be shared with other processes, in kilobytes. workingSetShareableKb The amount of memory that is in use by a process and can be shared with other processes, in kilobytes. virtKb The amount of virtual address space the process is using, in kilobytes. Use of virtual address space does not necessarily imply corresponding use of either disk or main memory pages. system handles The number of handles that the system is using. processes The number of processes running on the system. threads The number of threads running on the system. Definitions Term Description Event type DataStoreSample Provider RdsDbInstance Processes Enhanced Monitoring allows you to monitor the following processes associated with your RDS instances. : RDS Process: Shows a summary of the resources used by the RDS management agent, diagnostics monitoring processes, and other AWS processes that are required to support RDS DB instances. RDS Child Process: Nested under RDS Processes, shows a summary of the RDS processes that support the DB instance, for example aurora for Amazon Aurora DB clusters and mysqld for MySQL DB instances. OS Processes: Shows a summary of the kernel and system processes, which generally have minimal impact on performance.", + "body": "The OS versions page for mobile monitoring provides performance details about the top operating system versions hosting your mobile application, such as iOS and Android. Charts compare the OS versions by: HTTP request time Network failures Requests per minute Active devices From here you can drill down into details by a major or minor OS version (for example, iOS 8, iOS 7.1.1, Android 4.2.2). Viewing the OS versions page one.newrelic.com > Mobile > (select an app) > App > OS versions: Use this page to view, sort, or drill down into detailed information about the top five types of operation system versions using your mobile app. To view performance details about the operating system versions for your mobile app users: Go to one.newrelic.com > Mobile > (select an app) > App > OS versions. To select the mobile app versions or time period, use the Versions menu and time picker below the UI menu bar. Optional: Select the Sort by and Hide < 1% throughput options. To expand or collapse the list of operating systems to include versions, select the operating system's name (for example, iOS 7). Viewing drill-down details To drill down into detailed information, use any of our standard user interface functions and page functions to drill down into detailed information. In addition: To view details for the minor and point releases of a major OS version (including interaction time, HTTP request times, network failures, active devices, and slowest traces or all subversions), select a major OS version from the list. To view details for a specific OS version, select its name from the expanded OS list. To view trace details a slow transaction (if available), select its link. For more information, see Interactions page. To return to the main OS versions page, select the Close (X) button.", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 97.135895, + "_score": 95.48022, "_version": null, "_explanation": null, "sort": null, "highlight": { - "sections": "Features", - "body": " Amazon RDS data; this supplements the basic New Relic RDS integration with real-time metrics about the operating system the database instance runs on. Features This New Relic integration allows you to monitor and alert on RDS Enhanced Monitoring. You can use integration data and alerts to monitor" + "title": "OS versions page", + "sections": "OS versions page", + "body": "The OS versions page for mobile monitoring provides performance details about the top operating system versions hosting your mobile application, such as iOS and Android. Charts compare the OS versions by: HTTP request time Network failures Requests per minute Active devices From here you can drill" }, - "id": "617d6d5d64441fb952fbcb5d" + "id": "603eaee9e7b9d260112a0809" } ], "/amazon-eks-on-aws-fargate/066eb2a4-65a4-456d-a5ac-0e1fcc2740a2": [ @@ -60091,7 +60039,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 246.64851, + "_score": 229.29538, "_version": null, "_explanation": null, "sort": null, @@ -60136,7 +60084,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 227.97119, + "_score": 223.34863, "_version": null, "_explanation": null, "sort": null, @@ -60177,7 +60125,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 223.50299, + "_score": 219.00745, "_version": null, "_explanation": null, "sort": null, @@ -60233,7 +60181,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 223.22153, + "_score": 218.79266, "_version": null, "_explanation": null, "sort": null, @@ -60249,18 +60197,24 @@ "Link your applications to Kubernetes", "Tip", "Compatibility and requirements", - "Kubernetes requirements", "Network requirements", "APM agent compatibility", "Openshift requirements", "Important", - "Configure the injection of metadata", - "Default configuration", + "Set up the injection of metadata", "Custom configuration", + "Limit namespaces subject to injection", + "Use cert-manager to generate certs", "Manage custom certificates", "Validate the injection of metadata", "Disable the injection of metadata", - "Troubleshooting" + "Troubleshooting", + "No Kubernetes metadata in APM or distributed tracing transactions", + "Problem", + "Solution", + "The CA that is used to patch the `mutatingwebhookconfiguration` resource is not the one used when signing the certificates", + "Workaround", + "No Kubernetes metadata in the transactions' attributes" ], "title": "Link your applications to Kubernetes", "type": "docs", @@ -60269,18 +60223,18 @@ "Kubernetes integration", "Integrations" ], - "external_id": "0fe0951312aaf683f6614d5956f8c402b9693780", + "external_id": "2c91d68c28f1185f644320c1416aed347c8645ad", "image": "", - "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/link-your-applications/link-your-applications-kubernetes/", - "published_at": "2022-02-06T01:24:10Z", - "updated_at": "2022-02-06T01:24:10Z", + "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/advanced-configuration/link-your-applications-kubernetes/", + "published_at": "2022-02-15T19:14:51Z", + "updated_at": "2022-02-04T14:47:57Z", "document_type": "page", "popularity": 1, - "body": "You can surface Kubernetes metadata and link it to your APM agents as distributed traces to explore performance issues and troubleshoot transaction errors. For more information, see this New Relic blog post. You can quickly start monitoring Kubernetes clusters using our Auto-telemetry with Pixie integration, which doesn't require a language agent. Learn more about Auto-telemetry with Pixie. The metadata injection product uses a MutatingAdmissionWebhook to add the following environment variables to pods: NEW_RELIC_METADATA_KUBERNETES_CLUSTER_NAME NEW_RELIC_METADATA_KUBERNETES_NODE_NAME NEW_RELIC_METADATA_KUBERNETES_NAMESPACE_NAME NEW_RELIC_METADATA_KUBERNETES_DEPLOYMENT_NAME NEW_RELIC_METADATA_KUBERNETES_POD_NAME NEW_RELIC_METADATA_KUBERNETES_CONTAINER_NAME NEW_RELIC_METADATA_KUBERNETES_CONTAINER_IMAGE_NAME Copy Tip Our Kubernetes metadata injection project is open source. Here's the code to link APM and infrastructure data and the code to automatically manage certificates. Compatibility and requirements Before linking Kubernetes metadata to your APM agents, make sure you meet the following requirements: Kubernetes requirements Network requirements APM agent compatibility OpenShift requirements Kubernetes requirements To link your applications and Kubernetes, your cluster must have the MutatingAdmissionWebhook controller enabled, which requires Kubernetes 1.9 or higher. To verify that your cluster is compatible, run the following command: kubectl api-versions | grep admissionregistration.k8s.io/v1beta1 admissionregistration.k8s.io/v1beta1 Copy If you see a different result, follow the Kubernetes documentation to enable admission control in your cluster. Network requirements For Kubernetes to speak to our MutatingAdmissionWebhook, the master node (or the API server container, depending on how the cluster is set up) should be allowed egress for HTTPS traffic on port 443 to pods in all of the other nodes in the cluster. This might require specific configuration depending on how the infrastructure is set up (on-premises, AWS, Google Cloud, etc). Tip Until Kubernetes v1.14, users were only allowed to register admission webhooks on port 443. Since v1.15 it's possible to register them on different ports. To ensure backward compatibility, the webhook is registered by default on port 443 in the YAML config file we distribute. APM agent compatibility The following New Relic agents collect Kubernetes metadata: Go 2.3.0 or higher Java 4.10.0 or higher Node.js 5.3.0 or higher Python 4.14.0 or higher Ruby 6.1.0 or higher .NET 8.17.438 or higher Openshift requirements To link Openshift and Kubernetes you must enable mutating admission webhooks, which requires Openshift 3.9 or higher. During the process, install a resource that requires admin permissions to the cluster. Run this to log in as admin: oc login -u system:admin Copy Check that webhooks are correctly configured. If they are not, update the master-config.yaml file. admissionConfig: pluginConfig: MutatingAdmissionWebhook: configuration: apiVersion: apiserver.config.k8s.io/v1alpha1 kubeConfigFile: /dev/null kind: WebhookAdmission ValidatingAdmissionWebhook: configuration: apiVersion: apiserver.config.k8s.io/v1alpha1 kubeConfigFile: /dev/null kind: WebhookAdmission location: \"\" Copy Important Add kubeConfigFile: /dev/null to address some issues in Openshift. Enable certificate signing by editing the YAML file and updating your configuration: kubernetesMasterConfig: controllerArguments: cluster-signing-cert-file: - \"/etc/origin/master/ca.crt\" cluster-signing-key-file: - \"/etc/origin/master/ca.key\" Copy Restart the Openshift services in the master node. Configure the injection of metadata By default, all the pods you create that include APM agents have the correct environment variables set and the metadata injection applies to the entire cluster. To check that the environment variables have been set, any container that is running must be stopped, and a new instance started (see Validate the injection of metadata). This default configuration also uses the Kubernetes certificates API to automatically manage the certificates required for the injection. If needed, you can limit the injection of metadata to specific namespaces in your cluster or self-manage your certificates. Default configuration We offer instructions for deploying our integration using Helm. Just be sure that, when you are configuring the chart, the webhook that inject the metadata is enabled. Notice that we are specifying --dry-run and --debug, so nothing will be installed in this step: helm upgrade --install newrelic newrelic/nri-bundle \\ --dry-run \\ --debug \\ --namespace newrelic --create-namespace \\ --set global.licenseKey=YOUR_NEW_RELIC_LICENSE_KEY \\ --set global.cluster=K8S_CLUSTER_NAME \\ --set ksm.enabled=true \\ --set newrelic-infrastructure.privileged=true \\ --set infrastructure.enabled=true \\ --set prometheus.enabled=true \\ --set webhook.enabled= true \\ --set kubeEvents.enabled=true \\ --set logging.enabled=true Copy Custom configuration You can limit the injection of metadata only to specific namespaces by using labels. To enable this feature, edit nri-bundle Helm values.yaml file: nri-metadata-injection: injectOnlyLabeledNamespaces: true Copy Or add a --set when installing or upgrading your Helm release: helm upgrade --install newrelic newrelic/nri-bundle \\ --dry-run \\ --debug \\ --namespace newrelic --create-namespace \\ --set global.licenseKey=YOUR_NEW_RELIC_LICENSE_KEY \\ --set global.cluster=K8S_CLUSTER_NAME \\ --set ksm.enabled=true \\ --set newrelic-infrastructure.privileged=true \\ --set infrastructure.enabled=true \\ --set prometheus.enabled=true \\ --set webhook.enabled= true \\ --set nri-metadata-injection.injectOnlyLabeledNamespaces=true \\ --set kubeEvents.enabled=true \\ --set logging.enabled=true Copy With this option, injection is only applied to those namespaces that have the newrelic-metadata-injection label set to enabled: kubectl label namespace YOUR_NAMESPACE newrelic-metadata-injection=enabled Copy Manage custom certificates To use custom certificates you need to disable the automatic installation of certificates when you are installing using Helm. To disable the installation for certificates just modify nri-bundle Helm values.yaml like this: nri-metadata-injection: customTLSCertificate: true Copy Or add a --set when installing or upgrading your Helm release: helm upgrade --install newrelic newrelic/nri-bundle \\ --dry-run \\ --debug \\ --namespace newrelic --create-namespace \\ --set global.licenseKey=YOUR_NEW_RELIC_LICENSE_KEY \\ --set global.cluster=K8S_CLUSTER_NAME \\ --set ksm.enabled=true \\ --set newrelic-infrastructure.privileged=true \\ --set infrastructure.enabled=true \\ --set prometheus.enabled=true \\ --set webhook.enabled= true \\ --set nri-metadata-injection.customTLSCertificate=true \\ --set kubeEvents.enabled=true \\ --set logging.enabled=true Copy Now you can proceed with the custom certificate management option. You need your certificate, server key, and Certification Authority (CA) bundle encoded in PEM format. If you have them in the standard certificate format (X.509), install openssl, and run the following: openssl x509 -in CERTIFICATE_FILENAME -outform PEM -out CERTIFICATE_FILENAME.pem openssl x509 -in SERVER_KEY_FILENAME -outform PEM -out SERVER_KEY_FILENAME.pem openssl x509 -in CA_BUNDLE_FILENAME -outform PEM -out BUNDLE_FILENAME.pem Copy If your certificate/key pair are in another format, see the Digicert knowledgebase for more help. Create the TLS secret with the signed certificate/key pair, and patch the mutating webhook configuration with the CA using the following commands: kubectl create secret tls newrelic-metadata-injection-admission \\ --key=PEM_ENCODED_SERVER_KEY \\ --cert=PEM_ENCODED_CERTIFICATE \\ --dry-run -o yaml | kubectl -n newrelic apply -f - caBundle=$(cat PEM_ENCODED_CA_BUNDLE | base64 | td -d $'\\n') kubectl patch mutatingwebhookconfiguration newrelic-metadata-injection-cfg --type='json' -p \"[{'op': 'replace', 'path': '/webhooks/0/clientConfig/caBundle', 'value':'${caBundle}'}]\" Copy Important Certificates signed by Kubernetes have an expiration of one year. For more information, see the Kubernetes source code in GitHub. Validate the injection of metadata In order to validate that the webhook (responsible for injecting the metadata) was installed correctly, deploy a new pod and check for the New Relic environment variables. Create a dummy pod containing Busybox by running: kubectl create -f https://git.io/vPieo Copy Check if New Relic environment variables were injected: kubectl exec busybox0 -- env | grep NEW_RELIC_METADATA_KUBERNETES NEW_RELIC_METADATA_KUBERNETES_CLUSTER_NAME=fsi NEW_RELIC_METADATA_KUBERNETES_NODE_NAME=nodea NEW_RELIC_METADATA_KUBERNETES_NAMESPACE_NAME=default NEW_RELIC_METADATA_KUBERNETES_POD_NAME=busybox0 NEW_RELIC_METADATA_KUBERNETES_CONTAINER_NAME=busybox Copy Disable the injection of metadata To disable/uninstall the injection of metadata, use the following commands: Delete the Kubernetes objects using the yaml file: kubectl delete -f k8s-metadata-injection-latest.yaml Copy Delete the TLS secret containing the certificate/key pair: kubectl delete secret/newrelic-metadata-injection-secret Copy Troubleshooting Follow these troubleshooting tips as needed. No Kubernetes metadata in APM or distributed tracing transactions Problem The creation of the secret by the k8s-webhook-cert-manager job used to fail due to the kubectl version used by the image when running in Kubernetes version 1.19.x, The new version 1.3.2 fixes this issue, therefore it is enough to run again the job using an update version of the image to fix the issue. Solution Update the image k8s-webhook-cert-manager (to a version >= 1.3.2) and re-run the job. The secret will be correctly created and the k8s-metadata-injection pod will be able to start. Note that the new version of the manifest and of the nri-bundle are already updated with the correct version of the image. Problem In OpenShift version 4.x, the CA that is used in order to patch the mutatingwebhookconfiguration resource is not the one used when signing the certificates. This is a known issue currently tracked here. In the logs of the Pod nri-metadata-injection, you'll see the following error message: TLS handshake error from 10.131.0.29:37428: remote error: tls: unknown certificate authority TLS handshake error from 10.129.0.1:49314: remote error: tls: bad certificate Copy Workaround Manually update the certificate stored in the mutatingwebhookconfiguration object. The correct CA locations might change according to the cluster configuration. However, you can usually find the CA in the secret csr-signer in the namespace openshift-kube-controller-manager. Problem There is no Kubernetes metadata included in the transactions' attributes of your APM agent or in distributed tracing. Solution Verify that the environment variables are being correctly injected by following the instructions described in the Validate your installation step. If they are not present, get the name of the metadata injection pod by running: kubectl get pods | grep newrelic-metadata-injection-deployment kubectl logs -f pod/podname Copy In another terminal, create a new pod (for example, see Validate your installation), and inspect the logs of the metadata injection deployment for errors. For every created pod there should be a set of 4 new entries in the logs like: {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.107Z\",\"caller\":\"server/main.go:139\",\"msg\":\"POST https://newrelic-metadata-injection-svc.default.svc:443/mutate?timeout=30s HTTP/2.0\\\" from 10.11.49.2:32836\"} {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.110Z\",\"caller\":\"server/webhook.go:168\",\"msg\":\"received admission review\",\"kind\":\"/v1, Kind=Pod\",\"namespace\":\"default\",\"name\":\"\",\"pod\":\"busybox1\",\"UID\":\"6577519b-7a61-11ea-965e-0e46d1c9335c\",\"operation\":\"CREATE\",\"userinfo\":{\"username\":\"admin\",\"uid\":\"admin\",\"groups\":[\"system:masters\",\"system:authenticated\"]}} {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.111Z\",\"caller\":\"server/webhook.go:182\",\"msg\":\"admission response created\",\"response\":\"[{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env\\\",\\\"value\\\":[{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_CLUSTER_NAME\\\",\\\"value\\\":\\\"adn_kops\\\"}]},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_NODE_NAME\\\",\\\"valueFrom\\\":{\\\"fieldRef\\\":{\\\"fieldPath\\\":\\\"spec.nodeName\\\"}}}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_NAMESPACE_NAME\\\",\\\"valueFrom\\\":{\\\"fieldRef\\\":{\\\"fieldPath\\\":\\\"metadata.namespace\\\"}}}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_POD_NAME\\\",\\\"valueFrom\\\":{\\\"fieldRef\\\":{\\\"fieldPath\\\":\\\"metadata.name\\\"}}}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_CONTAINER_NAME\\\",\\\"value\\\":\\\"busybox\\\"}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_CONTAINER_IMAGE_NAME\\\",\\\"value\\\":\\\"busybox\\\"}}]\"} {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.111Z\",\"caller\":\"server/webhook.go:257\",\"msg\":\"writing response\"} Copy If there are no new entries on the logs, it means that the apiserver is not being able to communicate with the webhook service, this could be due to networking rules or security groups rejecting the communication. To check if the apiserver is not being able to communicate with the webhook you should inspect the apiserver logs for errors like: failed calling webhook \"metadata-injection.newrelic.com\": ERROR_REASON Copy To get the apiserver logs: Start a proxy to the Kubernetes API server by the executing the following command in a terminal window and keep it running. kubectl proxy --port=8001 Copy Create a new pod in your cluster, this will make the apiserver try to communicate with the webhook. The following command will create a busybox. kubectl create -f https://git.io/vPieo Copy Retrieve the apiserver logs. curl localhost:8001/logs/kube-apiserver.log > apiserver.log Copy Delete the busybox container. kubectl delete -f https://git.io/vPieo Copy Inspect the logs for errors. grep -E 'failed calling webhook' apiserver.log Copy Remember that one of the requirements for the metadata injection is that the apiserver must be allowed egress to the pods running on the cluster. If you encounter errors regarding connection timeouts or failed connections, make sure to check the security groups and firewall rules of the cluster. If there are no log entries in either the apiserver logs or the metadata injection deployment, it means that the webhook was not properly registered. Ensure the metadata injection setup job ran successfully by inspecting the output of: kubectl get job newrelic-metadata-setup Copy If the job is not completed, investigate the logs of the setup job: kubectl logs job/newrelic-metadata-setup Copy Ensure the CertificateSigningRequest is approved and issued by running: kubectl get csr newrelic-metadata-injection-svc.default Copy Ensure the TLS secret is present by running: kubectl get secret newrelic-metadata-injection-secret Copy Ensure the CA bundle is present in the mutating webhook configuration: kubectl get mutatingwebhookconfiguration newrelic-metadata-injection-cfg -o json Copy Ensure the TargetPort of the Service resource matches the Port of the Deployment's container: kubectl describe service/newrelic-metadata-injection-svc kubectl describe deployment/newrelic-metadata-injection-deployment Copy", + "body": "You can surface Kubernetes metadata and link it to your APM agents as distributed traces to explore performance issues and troubleshoot transaction errors. For more information, see this New Relic blog post. Additionally, you can quickly start monitoring Kubernetes clusters using our Auto-telemetry with Pixie, which is currently a beta release. This Pixie integration into New Relic does not require a language agent. Learn more about Auto-telemetry with Pixie here. The metadata injection product uses a MutatingAdmissionWebhook to add the following environment variables to pods: NEW_RELIC_METADATA_KUBERNETES_CLUSTER_NAME NEW_RELIC_METADATA_KUBERNETES_NODE_NAME NEW_RELIC_METADATA_KUBERNETES_NAMESPACE_NAME NEW_RELIC_METADATA_KUBERNETES_DEPLOYMENT_NAME NEW_RELIC_METADATA_KUBERNETES_POD_NAME NEW_RELIC_METADATA_KUBERNETES_CONTAINER_NAME NEW_RELIC_METADATA_KUBERNETES_CONTAINER_IMAGE_NAME Copy Tip Our Kubernetes metadata injection project is open source. Here's the code to link APM and infrastructure data. Compatibility and requirements To link your applications and Kubernetes, you must be able to deploy MutatingWebhookConfigurations to your Kubernetes cluster. To verify that you have the required permissions, you can run the following command: bash Copy $ kubectl auth can-i create mutatingwebhookconfigurations.admissionregistration.k8s.io -A The output for the command above should be something similar to: yes Copy If you see a different result, follow the Kubernetes documentation to enable admission control in your cluster. Network requirements For Kubernetes to speak to our MutatingAdmissionWebhook, the master node (or the API server container, depending on how the cluster is set up) should be allowed egress for HTTPS traffic on port 443 to pods in all of the other nodes in the cluster. This might require specific configuration depending on how the infrastructure is set up (on-premises, AWS, Google Cloud, etc). APM agent compatibility The following New Relic agents collect Kubernetes metadata: Go 2.3.0 or higher Java 4.10.0 or higher Node.js 5.3.0 or higher Python 4.14.0 or higher Ruby 6.1.0 or higher .NET 8.17.438 or higher Openshift requirements To link Openshift and Kubernetes you must enable mutating admission webhooks, which requires Openshift 3.9 or higher. During the process, install a resource that requires admin permissions to the cluster. Run this to log in as admin: bash Copy $ oc login -u system:admin Check that webhooks are correctly configured. If they are not, update the master-config.yaml file. admissionConfig: pluginConfig: MutatingAdmissionWebhook: configuration: apiVersion: apiserver.config.k8s.io/v1alpha1 kubeConfigFile: /dev/null kind: WebhookAdmission ValidatingAdmissionWebhook: configuration: apiVersion: apiserver.config.k8s.io/v1alpha1 kubeConfigFile: /dev/null kind: WebhookAdmission location: \"\" Copy Important Add kubeConfigFile: /dev/null to address some issues in Openshift. Enable certificate signing by editing the YAML file and updating your configuration: kubernetesMasterConfig: controllerArguments: cluster-signing-cert-file: - \"/etc/origin/master/ca.crt\" cluster-signing-key-file: - \"/etc/origin/master/ca.key\" Copy Restart the Openshift services in the master node. Set up the injection of metadata Injection of metadata is included when you install our integration using Helm. Just be sure that, when you are configuring the chart, the webhook that inject the metadata is enabled: webhook: enabled: true Copy By default, all the pods you create that include APM agents have the correct environment variables set and the metadata injection applies to the entire cluster. To check that the environment variables have been set, any container that is running must be stopped, and a new instance started (see Validate the injection of metadata). This default configuration also uses the Kubernetes certificates API to automatically manage the certificates required for the injection. If needed, you can limit the injection of metadata to specific namespaces in your cluster or self-manage your certificates. Custom configuration Limit namespaces subject to injection You can limit the injection of metadata only to specific namespaces by using labels. To enable this feature, add the following to values-newrelic.yaml file: nri-metadata-injection: injectOnlyLabeledNamespaces: true Copy With this option, injection is only applied to those namespaces that have the newrelic-metadata-injection label set to enabled: bash Copy $ kubectl label namespace YOUR_NAMESPACE newrelic-metadata-injection=enabled Use cert-manager to generate certs By default, our chart uses kube-webhook-certgen to automatically generate the required certificates for the webhook to run. However, if you have cert-manager installed, you can configure our chart to use cert-manager instead, which can significantly simplify the deployment process: nri-metadata-injection: certManager: enabled: true Copy Manage custom certificates Tip Manually managing webhook certificates is recommended for advanced users only. New Relic support team might not be able to help troubleshooting this configuration. To use custom certificates you need to disable the automatic installation of certificates when you are installing using Helm. To disable the installation for certificates just modify nri-bundle Helm values.yaml like this: nri-metadata-injection: customTLSCertificate: true Copy Now you can proceed with the custom certificate management option. You need your certificate, server key, and Certification Authority (CA) bundle encoded in PEM format. If you have them in the standard certificate format (X.509), install openssl, and run the following: openssl x509 -in CERTIFICATE_FILENAME -outform PEM -out CERTIFICATE_FILENAME.pem openssl x509 -in SERVER_KEY_FILENAME -outform PEM -out SERVER_KEY_FILENAME.pem openssl x509 -in CA_BUNDLE_FILENAME -outform PEM -out BUNDLE_FILENAME.pem Copy If your certificate/key pair are in another format, see the Digicert knowledgebase for more help. Create the TLS secret with the signed certificate/key pair, and patch the mutating webhook configuration with the CA using the following commands: kubectl create secret tls newrelic-metadata-injection-admission \\ --key=PEM_ENCODED_SERVER_KEY \\ --cert=PEM_ENCODED_CERTIFICATE \\ --dry-run -o yaml | kubectl -n newrelic apply -f - caBundle=$(cat PEM_ENCODED_CA_BUNDLE | base64 | td -d $'\\n') kubectl patch mutatingwebhookconfiguration newrelic-metadata-injection-cfg --type='json' -p \"[{'op': 'replace', 'path': '/webhooks/0/clientConfig/caBundle', 'value':'${caBundle}'}]\" Copy Important Certificates signed by Kubernetes have an expiration of one year. For more information, see the Kubernetes source code in GitHub. Validate the injection of metadata In order to validate that the webhook (responsible for injecting the metadata) was installed correctly, deploy a new pod and check for the New Relic environment variables. Create a dummy nginx pod by running: bash Copy $ kubectl run test-nginx --image nginx -n newrelic Check if New Relic environment variables were injected: bash Copy $ kubectl exec -n newrelic test-nginx -- env | grep NEW_RELIC_METADATA_KUBERNETES The expected output would be something like the following: NEW_RELIC_METADATA_KUBERNETES_CLUSTER_NAME=cluster-name NEW_RELIC_METADATA_KUBERNETES_NODE_NAME=nodea NEW_RELIC_METADATA_KUBERNETES_NAMESPACE_NAME=newrelic NEW_RELIC_METADATA_KUBERNETES_POD_NAME=test-nginx NEW_RELIC_METADATA_KUBERNETES_CONTAINER_NAME=nginx Copy Disable the injection of metadata To disable/uninstall the injection of metadata, change your values-newrelic.yaml file as follows: webhook: enabled: false Copy And re-run the installation command. Troubleshooting Follow these troubleshooting tips as needed. No Kubernetes metadata in APM or distributed tracing transactions Problem The creation of the secret by the k8s-webhook-cert-manager job used to fail due to the kubectl version used by the image when running in Kubernetes version 1.19.x, The new version 1.3.2 fixes this issue, therefore it is enough to run again the job using an update version of the image to fix the issue. Solution Update the image k8s-webhook-cert-manager (to a version >= 1.3.2) and re-run the job. The secret will be correctly created and the k8s-metadata-injection pod will be able to start. Note that the new version of the manifest and of the nri-bundle are already updated with the correct version of the image. The CA that is used to patch the `mutatingwebhookconfiguration` resource is not the one used when signing the certificates Problem In OpenShift version 4.x, the CA that is used to patch the mutatingwebhookconfiguration resource is not the one used when signing the certificates. This is a known issue currently tracked here. In the logs of the Pod nri-metadata-injection, you'll see the following error message: TLS handshake error from 10.131.0.29:37428: remote error: tls: unknown certificate authority TLS handshake error from 10.129.0.1:49314: remote error: tls: bad certificate Copy Workaround Manually update the certificate stored in the mutatingwebhookconfiguration object. The correct CA locations might change according to the cluster configuration. However, you can usually find the CA in the secret csr-signer in the namespace openshift-kube-controller-manager. No Kubernetes metadata in the transactions' attributes Problem There is no Kubernetes metadata included in the transactions' attributes of your APM agent or in distributed tracing. Solution Verify that the environment variables are being correctly injected by following the instructions described in the Validate your installation step. If they are not present, get the name of the metadata injection pod by running: bash Copy $ kubectl get pods | grep newrelic-metadata-injection-deployment $ kubectl logs -f pod/my-pod In another terminal, create a new pod (for example, see Validate your installation), and inspect the logs of the metadata injection deployment for errors. For every created pod there should be a set of 4 new entries in the logs like: {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.107Z\",\"caller\":\"server/main.go:139\",\"msg\":\"POST https://newrelic-metadata-injection-svc.default.svc:443/mutate?timeout=30s HTTP/2.0\\\" from 10.11.49.2:32836\"} {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.110Z\",\"caller\":\"server/webhook.go:168\",\"msg\":\"received admission review\",\"kind\":\"/v1, Kind=Pod\",\"namespace\":\"default\",\"name\":\"\",\"pod\":\"busybox1\",\"UID\":\"6577519b-7a61-11ea-965e-0e46d1c9335c\",\"operation\":\"CREATE\",\"userinfo\":{\"username\":\"admin\",\"uid\":\"admin\",\"groups\":[\"system:masters\",\"system:authenticated\"]}} {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.111Z\",\"caller\":\"server/webhook.go:182\",\"msg\":\"admission response created\",\"response\":\"[{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env\\\",\\\"value\\\":[{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_CLUSTER_NAME\\\",\\\"value\\\":\\\"adn_kops\\\"}]},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_NODE_NAME\\\",\\\"valueFrom\\\":{\\\"fieldRef\\\":{\\\"fieldPath\\\":\\\"spec.nodeName\\\"}}}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_NAMESPACE_NAME\\\",\\\"valueFrom\\\":{\\\"fieldRef\\\":{\\\"fieldPath\\\":\\\"metadata.namespace\\\"}}}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_POD_NAME\\\",\\\"valueFrom\\\":{\\\"fieldRef\\\":{\\\"fieldPath\\\":\\\"metadata.name\\\"}}}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_CONTAINER_NAME\\\",\\\"value\\\":\\\"busybox\\\"}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_CONTAINER_IMAGE_NAME\\\",\\\"value\\\":\\\"busybox\\\"}}]\"} {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.111Z\",\"caller\":\"server/webhook.go:257\",\"msg\":\"writing response\"} Copy If there are no new entries on the logs, it means that the apiserver is not being able to communicate with the webhook service, this could be due to networking rules or security groups rejecting the communication. To check if the apiserver is not being able to communicate with the webhook you should inspect the apiserver logs for errors like: failed calling webhook \"metadata-injection.newrelic.com\": ERROR_REASON Copy To get the apiserver logs: Start a proxy to the Kubernetes API server by the executing the following command in a terminal window and keep it running. bash Copy $ kubectl proxy --port=8001 Create a new pod in your cluster, this will make the apiserver try to communicate with the webhook. The following command will create a busybox. bash Copy $ kubectl create -f https://git.io/vPieo Retrieve the apiserver logs. bash Copy $ curl localhost:8001/logs/kube-apiserver.log > apiserver.log Delete the busybox container. bash Copy $ kubectl delete -f https://git.io/vPieo Inspect the logs for errors. bash Copy $ grep -E 'failed calling webhook' apiserver.log Tip One of the requirements for the metadata injection is that the apiserver must be allowed egress to the pods running on the cluster. If you encounter errors regarding connection timeouts or failed connections, make sure to check the security groups and firewall rules of the cluster. If there are no log entries in either the apiserver logs or the metadata injection deployment, it means that the webhook was not properly registered. Ensure the metadata injection setup job ran successfully by inspecting the output of: bash Copy $ kubectl get job newrelic-metadata-setup If the job is not completed, investigate the logs of the setup job: bash Copy $ kubectl logs job/newrelic-metadata-setup Ensure the CertificateSigningRequest is approved and issued by running: bash Copy $ kubectl get csr newrelic-metadata-injection-svc.default Ensure the TLS secret is present by running: bash Copy $ kubectl get secret newrelic-metadata-injection-secret Ensure the CA bundle is present in the mutating webhook configuration: bash Copy $ kubectl get mutatingwebhookconfiguration newrelic-metadata-injection-cfg -o json Ensure the TargetPort of the Service resource matches the Port of the Deployment's container: bash Copy $ kubectl describe service/newrelic-metadata-injection-svc $ kubectl describe deployment/newrelic-metadata-injection-deployment", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 219.85573, + "_score": 209.77762, "_version": null, "_explanation": null, "sort": null, @@ -60288,9 +60242,9 @@ "title": "Link your applications to Kubernetes", "sections": "Link your applications to Kubernetes", "tags": "Link apps and services", - "body": " the following commands: Delete the Kubernetes objects using the yaml file: kubectl delete -f k8s-metadata-injection-latest.yaml Copy Delete the TLS secret containing the certificate/key pair: kubectl delete secret/newrelic-metadata-injection-secret Copy Troubleshooting Follow these troubleshooting tips" + "body": " metadata in APM or distributed tracing transactions Problem The creation of the secret by the k8s-webhook-cert-manager job used to fail due to the kubectl version used by the image when running in Kubernetes version 1.19.x, The new version 1.3.2 fixes this issue, therefore it is enough to run again" }, - "id": "617daead28ccbc662b7ffe23" + "id": "61fd3c9d196a675ff3e80980" } ], "/mongodb/6183e12f-a398-48e6-8431-a48db03e4443": [ @@ -60325,7 +60279,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 110.59566, + "_score": 102.699814, "_version": null, "_explanation": null, "sort": null, @@ -60367,7 +60321,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 109.75413, + "_score": 101.91943, "_version": null, "_explanation": null, "sort": null, @@ -60409,7 +60363,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 109.7467, + "_score": 101.913666, "_version": null, "_explanation": null, "sort": null, @@ -60451,7 +60405,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 105.93249, + "_score": 98.52904, "_version": null, "_explanation": null, "sort": null, @@ -60494,7 +60448,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 101.185425, + "_score": 95.344635, "_version": null, "_explanation": null, "sort": null, @@ -60537,7 +60491,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 110.59566, + "_score": 102.699814, "_version": null, "_explanation": null, "sort": null, @@ -60579,7 +60533,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 109.75413, + "_score": 101.91943, "_version": null, "_explanation": null, "sort": null, @@ -60621,7 +60575,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 109.7467, + "_score": 101.913666, "_version": null, "_explanation": null, "sort": null, @@ -60663,7 +60617,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 105.93249, + "_score": 98.52904, "_version": null, "_explanation": null, "sort": null, @@ -60706,7 +60660,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 101.185425, + "_score": 95.344635, "_version": null, "_explanation": null, "sort": null, @@ -60743,7 +60697,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 338.17633, + "_score": 334.98285, "_version": null, "_explanation": null, "sort": null, @@ -60781,7 +60735,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 247.02386, + "_score": 243.99362, "_version": null, "_explanation": null, "sort": null, @@ -60817,7 +60771,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 211.91194, + "_score": 209.957, "_version": null, "_explanation": null, "sort": null, @@ -60850,7 +60804,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 208.61685, + "_score": 206.11299, "_version": null, "_explanation": null, "sort": null, @@ -60891,7 +60845,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 200.91837, + "_score": 199.03639, "_version": null, "_explanation": null, "sort": null, @@ -60946,7 +60900,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 320.31, + "_score": 297.41306, "_version": null, "_explanation": null, "sort": null, @@ -61000,7 +60954,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 273.3386, + "_score": 267.71362, "_version": null, "_explanation": null, "sort": null, @@ -61014,145 +60968,144 @@ }, { "sections": [ - "Link your applications to Kubernetes", + "New Relic Metrics Adapter", + "BETA FEATURE", + "Requirements", + "Installation", "Tip", - "Compatibility and requirements", - "Kubernetes requirements", - "Network requirements", - "APM agent compatibility", - "Openshift requirements", - "Important", - "Configure the injection of metadata", - "Default configuration", - "Custom configuration", - "Manage custom certificates", - "Validate the injection of metadata", - "Disable the injection of metadata", - "Troubleshooting" + "Configuration", + "How it works", + "Caution", + "Troubleshooting", + "Get verbose logs", + "Get raw metrics", + "Metrics not working" ], - "title": "Link your applications to Kubernetes", + "title": "New Relic Metrics Adapter", "type": "docs", "tags": [ "Link apps and services", "Kubernetes integration", "Integrations" ], - "external_id": "0fe0951312aaf683f6614d5956f8c402b9693780", + "external_id": "51fdc0c8df2fdc91fcc51556e323c62e7c12d48a", "image": "", - "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/link-your-applications/link-your-applications-kubernetes/", - "published_at": "2022-02-06T01:24:10Z", - "updated_at": "2022-02-06T01:24:10Z", + "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/advanced-configuration/newrelic-metrics-adapter/", + "published_at": "2022-02-15T19:18:45Z", + "updated_at": "2022-02-04T12:17:01Z", "document_type": "page", "popularity": 1, - "body": "You can surface Kubernetes metadata and link it to your APM agents as distributed traces to explore performance issues and troubleshoot transaction errors. For more information, see this New Relic blog post. You can quickly start monitoring Kubernetes clusters using our Auto-telemetry with Pixie integration, which doesn't require a language agent. Learn more about Auto-telemetry with Pixie. The metadata injection product uses a MutatingAdmissionWebhook to add the following environment variables to pods: NEW_RELIC_METADATA_KUBERNETES_CLUSTER_NAME NEW_RELIC_METADATA_KUBERNETES_NODE_NAME NEW_RELIC_METADATA_KUBERNETES_NAMESPACE_NAME NEW_RELIC_METADATA_KUBERNETES_DEPLOYMENT_NAME NEW_RELIC_METADATA_KUBERNETES_POD_NAME NEW_RELIC_METADATA_KUBERNETES_CONTAINER_NAME NEW_RELIC_METADATA_KUBERNETES_CONTAINER_IMAGE_NAME Copy Tip Our Kubernetes metadata injection project is open source. Here's the code to link APM and infrastructure data and the code to automatically manage certificates. Compatibility and requirements Before linking Kubernetes metadata to your APM agents, make sure you meet the following requirements: Kubernetes requirements Network requirements APM agent compatibility OpenShift requirements Kubernetes requirements To link your applications and Kubernetes, your cluster must have the MutatingAdmissionWebhook controller enabled, which requires Kubernetes 1.9 or higher. To verify that your cluster is compatible, run the following command: kubectl api-versions | grep admissionregistration.k8s.io/v1beta1 admissionregistration.k8s.io/v1beta1 Copy If you see a different result, follow the Kubernetes documentation to enable admission control in your cluster. Network requirements For Kubernetes to speak to our MutatingAdmissionWebhook, the master node (or the API server container, depending on how the cluster is set up) should be allowed egress for HTTPS traffic on port 443 to pods in all of the other nodes in the cluster. This might require specific configuration depending on how the infrastructure is set up (on-premises, AWS, Google Cloud, etc). Tip Until Kubernetes v1.14, users were only allowed to register admission webhooks on port 443. Since v1.15 it's possible to register them on different ports. To ensure backward compatibility, the webhook is registered by default on port 443 in the YAML config file we distribute. APM agent compatibility The following New Relic agents collect Kubernetes metadata: Go 2.3.0 or higher Java 4.10.0 or higher Node.js 5.3.0 or higher Python 4.14.0 or higher Ruby 6.1.0 or higher .NET 8.17.438 or higher Openshift requirements To link Openshift and Kubernetes you must enable mutating admission webhooks, which requires Openshift 3.9 or higher. During the process, install a resource that requires admin permissions to the cluster. Run this to log in as admin: oc login -u system:admin Copy Check that webhooks are correctly configured. If they are not, update the master-config.yaml file. admissionConfig: pluginConfig: MutatingAdmissionWebhook: configuration: apiVersion: apiserver.config.k8s.io/v1alpha1 kubeConfigFile: /dev/null kind: WebhookAdmission ValidatingAdmissionWebhook: configuration: apiVersion: apiserver.config.k8s.io/v1alpha1 kubeConfigFile: /dev/null kind: WebhookAdmission location: \"\" Copy Important Add kubeConfigFile: /dev/null to address some issues in Openshift. Enable certificate signing by editing the YAML file and updating your configuration: kubernetesMasterConfig: controllerArguments: cluster-signing-cert-file: - \"/etc/origin/master/ca.crt\" cluster-signing-key-file: - \"/etc/origin/master/ca.key\" Copy Restart the Openshift services in the master node. Configure the injection of metadata By default, all the pods you create that include APM agents have the correct environment variables set and the metadata injection applies to the entire cluster. To check that the environment variables have been set, any container that is running must be stopped, and a new instance started (see Validate the injection of metadata). This default configuration also uses the Kubernetes certificates API to automatically manage the certificates required for the injection. If needed, you can limit the injection of metadata to specific namespaces in your cluster or self-manage your certificates. Default configuration We offer instructions for deploying our integration using Helm. Just be sure that, when you are configuring the chart, the webhook that inject the metadata is enabled. Notice that we are specifying --dry-run and --debug, so nothing will be installed in this step: helm upgrade --install newrelic newrelic/nri-bundle \\ --dry-run \\ --debug \\ --namespace newrelic --create-namespace \\ --set global.licenseKey=YOUR_NEW_RELIC_LICENSE_KEY \\ --set global.cluster=K8S_CLUSTER_NAME \\ --set ksm.enabled=true \\ --set newrelic-infrastructure.privileged=true \\ --set infrastructure.enabled=true \\ --set prometheus.enabled=true \\ --set webhook.enabled= true \\ --set kubeEvents.enabled=true \\ --set logging.enabled=true Copy Custom configuration You can limit the injection of metadata only to specific namespaces by using labels. To enable this feature, edit nri-bundle Helm values.yaml file: nri-metadata-injection: injectOnlyLabeledNamespaces: true Copy Or add a --set when installing or upgrading your Helm release: helm upgrade --install newrelic newrelic/nri-bundle \\ --dry-run \\ --debug \\ --namespace newrelic --create-namespace \\ --set global.licenseKey=YOUR_NEW_RELIC_LICENSE_KEY \\ --set global.cluster=K8S_CLUSTER_NAME \\ --set ksm.enabled=true \\ --set newrelic-infrastructure.privileged=true \\ --set infrastructure.enabled=true \\ --set prometheus.enabled=true \\ --set webhook.enabled= true \\ --set nri-metadata-injection.injectOnlyLabeledNamespaces=true \\ --set kubeEvents.enabled=true \\ --set logging.enabled=true Copy With this option, injection is only applied to those namespaces that have the newrelic-metadata-injection label set to enabled: kubectl label namespace YOUR_NAMESPACE newrelic-metadata-injection=enabled Copy Manage custom certificates To use custom certificates you need to disable the automatic installation of certificates when you are installing using Helm. To disable the installation for certificates just modify nri-bundle Helm values.yaml like this: nri-metadata-injection: customTLSCertificate: true Copy Or add a --set when installing or upgrading your Helm release: helm upgrade --install newrelic newrelic/nri-bundle \\ --dry-run \\ --debug \\ --namespace newrelic --create-namespace \\ --set global.licenseKey=YOUR_NEW_RELIC_LICENSE_KEY \\ --set global.cluster=K8S_CLUSTER_NAME \\ --set ksm.enabled=true \\ --set newrelic-infrastructure.privileged=true \\ --set infrastructure.enabled=true \\ --set prometheus.enabled=true \\ --set webhook.enabled= true \\ --set nri-metadata-injection.customTLSCertificate=true \\ --set kubeEvents.enabled=true \\ --set logging.enabled=true Copy Now you can proceed with the custom certificate management option. You need your certificate, server key, and Certification Authority (CA) bundle encoded in PEM format. If you have them in the standard certificate format (X.509), install openssl, and run the following: openssl x509 -in CERTIFICATE_FILENAME -outform PEM -out CERTIFICATE_FILENAME.pem openssl x509 -in SERVER_KEY_FILENAME -outform PEM -out SERVER_KEY_FILENAME.pem openssl x509 -in CA_BUNDLE_FILENAME -outform PEM -out BUNDLE_FILENAME.pem Copy If your certificate/key pair are in another format, see the Digicert knowledgebase for more help. Create the TLS secret with the signed certificate/key pair, and patch the mutating webhook configuration with the CA using the following commands: kubectl create secret tls newrelic-metadata-injection-admission \\ --key=PEM_ENCODED_SERVER_KEY \\ --cert=PEM_ENCODED_CERTIFICATE \\ --dry-run -o yaml | kubectl -n newrelic apply -f - caBundle=$(cat PEM_ENCODED_CA_BUNDLE | base64 | td -d $'\\n') kubectl patch mutatingwebhookconfiguration newrelic-metadata-injection-cfg --type='json' -p \"[{'op': 'replace', 'path': '/webhooks/0/clientConfig/caBundle', 'value':'${caBundle}'}]\" Copy Important Certificates signed by Kubernetes have an expiration of one year. For more information, see the Kubernetes source code in GitHub. Validate the injection of metadata In order to validate that the webhook (responsible for injecting the metadata) was installed correctly, deploy a new pod and check for the New Relic environment variables. Create a dummy pod containing Busybox by running: kubectl create -f https://git.io/vPieo Copy Check if New Relic environment variables were injected: kubectl exec busybox0 -- env | grep NEW_RELIC_METADATA_KUBERNETES NEW_RELIC_METADATA_KUBERNETES_CLUSTER_NAME=fsi NEW_RELIC_METADATA_KUBERNETES_NODE_NAME=nodea NEW_RELIC_METADATA_KUBERNETES_NAMESPACE_NAME=default NEW_RELIC_METADATA_KUBERNETES_POD_NAME=busybox0 NEW_RELIC_METADATA_KUBERNETES_CONTAINER_NAME=busybox Copy Disable the injection of metadata To disable/uninstall the injection of metadata, use the following commands: Delete the Kubernetes objects using the yaml file: kubectl delete -f k8s-metadata-injection-latest.yaml Copy Delete the TLS secret containing the certificate/key pair: kubectl delete secret/newrelic-metadata-injection-secret Copy Troubleshooting Follow these troubleshooting tips as needed. No Kubernetes metadata in APM or distributed tracing transactions Problem The creation of the secret by the k8s-webhook-cert-manager job used to fail due to the kubectl version used by the image when running in Kubernetes version 1.19.x, The new version 1.3.2 fixes this issue, therefore it is enough to run again the job using an update version of the image to fix the issue. Solution Update the image k8s-webhook-cert-manager (to a version >= 1.3.2) and re-run the job. The secret will be correctly created and the k8s-metadata-injection pod will be able to start. Note that the new version of the manifest and of the nri-bundle are already updated with the correct version of the image. Problem In OpenShift version 4.x, the CA that is used in order to patch the mutatingwebhookconfiguration resource is not the one used when signing the certificates. This is a known issue currently tracked here. In the logs of the Pod nri-metadata-injection, you'll see the following error message: TLS handshake error from 10.131.0.29:37428: remote error: tls: unknown certificate authority TLS handshake error from 10.129.0.1:49314: remote error: tls: bad certificate Copy Workaround Manually update the certificate stored in the mutatingwebhookconfiguration object. The correct CA locations might change according to the cluster configuration. However, you can usually find the CA in the secret csr-signer in the namespace openshift-kube-controller-manager. Problem There is no Kubernetes metadata included in the transactions' attributes of your APM agent or in distributed tracing. Solution Verify that the environment variables are being correctly injected by following the instructions described in the Validate your installation step. If they are not present, get the name of the metadata injection pod by running: kubectl get pods | grep newrelic-metadata-injection-deployment kubectl logs -f pod/podname Copy In another terminal, create a new pod (for example, see Validate your installation), and inspect the logs of the metadata injection deployment for errors. For every created pod there should be a set of 4 new entries in the logs like: {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.107Z\",\"caller\":\"server/main.go:139\",\"msg\":\"POST https://newrelic-metadata-injection-svc.default.svc:443/mutate?timeout=30s HTTP/2.0\\\" from 10.11.49.2:32836\"} {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.110Z\",\"caller\":\"server/webhook.go:168\",\"msg\":\"received admission review\",\"kind\":\"/v1, Kind=Pod\",\"namespace\":\"default\",\"name\":\"\",\"pod\":\"busybox1\",\"UID\":\"6577519b-7a61-11ea-965e-0e46d1c9335c\",\"operation\":\"CREATE\",\"userinfo\":{\"username\":\"admin\",\"uid\":\"admin\",\"groups\":[\"system:masters\",\"system:authenticated\"]}} {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.111Z\",\"caller\":\"server/webhook.go:182\",\"msg\":\"admission response created\",\"response\":\"[{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env\\\",\\\"value\\\":[{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_CLUSTER_NAME\\\",\\\"value\\\":\\\"adn_kops\\\"}]},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_NODE_NAME\\\",\\\"valueFrom\\\":{\\\"fieldRef\\\":{\\\"fieldPath\\\":\\\"spec.nodeName\\\"}}}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_NAMESPACE_NAME\\\",\\\"valueFrom\\\":{\\\"fieldRef\\\":{\\\"fieldPath\\\":\\\"metadata.namespace\\\"}}}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_POD_NAME\\\",\\\"valueFrom\\\":{\\\"fieldRef\\\":{\\\"fieldPath\\\":\\\"metadata.name\\\"}}}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_CONTAINER_NAME\\\",\\\"value\\\":\\\"busybox\\\"}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_CONTAINER_IMAGE_NAME\\\",\\\"value\\\":\\\"busybox\\\"}}]\"} {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.111Z\",\"caller\":\"server/webhook.go:257\",\"msg\":\"writing response\"} Copy If there are no new entries on the logs, it means that the apiserver is not being able to communicate with the webhook service, this could be due to networking rules or security groups rejecting the communication. To check if the apiserver is not being able to communicate with the webhook you should inspect the apiserver logs for errors like: failed calling webhook \"metadata-injection.newrelic.com\": ERROR_REASON Copy To get the apiserver logs: Start a proxy to the Kubernetes API server by the executing the following command in a terminal window and keep it running. kubectl proxy --port=8001 Copy Create a new pod in your cluster, this will make the apiserver try to communicate with the webhook. The following command will create a busybox. kubectl create -f https://git.io/vPieo Copy Retrieve the apiserver logs. curl localhost:8001/logs/kube-apiserver.log > apiserver.log Copy Delete the busybox container. kubectl delete -f https://git.io/vPieo Copy Inspect the logs for errors. grep -E 'failed calling webhook' apiserver.log Copy Remember that one of the requirements for the metadata injection is that the apiserver must be allowed egress to the pods running on the cluster. If you encounter errors regarding connection timeouts or failed connections, make sure to check the security groups and firewall rules of the cluster. If there are no log entries in either the apiserver logs or the metadata injection deployment, it means that the webhook was not properly registered. Ensure the metadata injection setup job ran successfully by inspecting the output of: kubectl get job newrelic-metadata-setup Copy If the job is not completed, investigate the logs of the setup job: kubectl logs job/newrelic-metadata-setup Copy Ensure the CertificateSigningRequest is approved and issued by running: kubectl get csr newrelic-metadata-injection-svc.default Copy Ensure the TLS secret is present by running: kubectl get secret newrelic-metadata-injection-secret Copy Ensure the CA bundle is present in the mutating webhook configuration: kubectl get mutatingwebhookconfiguration newrelic-metadata-injection-cfg -o json Copy Ensure the TargetPort of the Service resource matches the Port of the Deployment's container: kubectl describe service/newrelic-metadata-injection-svc kubectl describe deployment/newrelic-metadata-injection-deployment Copy", + "body": "BETA FEATURE This feature is still in development, but we encourage you to try it out! You can use metrics from your New Relic account to autoscale applications and services in your Kubernetes cluster by deploying the New Relic Metrics Adapter. This adapter fetches the metric values from New Relic and makes them available for the Horizontal Pod Autoscalers. The newrelic-k8s-metrics-adapter implements the external.metrics.k8s.io API to support the use of external metrics based New Relic NRQL queries results. Once deployed, the value for each configured metric is fetched using the NerdGraph API based on the configured NRQL query. The metrics adapter exposes the metrics over a secured endpoint with TLS. New Relic metrics adapter in a cluster. Requirements Kubernetes 1.16 or higher. The New Relic Kubernetes integration. New Relic's user API key. No other External Metrics Adapter installed in the cluster. Installation To install the New Relic Metrics Adapter, we provide the newrelic-k8s-metrics-adapter Helm chart, which is also included in the nri-bundle chart used to deploy all New Relic Kubernetes components. If not already installed, install our Kubernetes integration. Upgrade the installation to include the New Relic Metrics Adapter with the following command: helm upgrade --install newrelic newrelic/nri-bundle \\ --namespace newrelic --create-namespace --reuse-values \\ --set metrics-adapter.enabled=true \\ --set newrelic-k8s-metrics-adapter.personalAPIKey=YOUR_NEW_RELIC_PERSONAL_API_KEY \\ --set newrelic-k8s-metrics-adapter.config.accountID=YOUR_NEW_RELIC_ACCOUNT_ID \\ --set newrelic-k8s-metrics-adapter.config.externalMetrics.external_metric_name.query=NRQL query Copy Please notice and adjust the following flags: metrics-adapter.enabled: Must be set to true so the metrics adapter chart is installed. newrelic-k8s-metrics-adapter.personalAPIKey: Must be set to valid New Relic Personal API key. newrelic-k8s-metrics-adapter.accountID: Must be set to valid New Relic account where metrics are going to be fetched from. newrelic-k8s-metrics-adapter.config.externalMetrics.external_metric_name.query: Adds a new external metric where: external_metric_name: The metric name. query: The base NRQL query that is used to get the value for the metric. Tip Alternatively, you can use a values.yaml file that can be passed to the helm command with the --values flag. Values files can contain all parameters needed to configure the metrics explained in the configuration section. Configuration You can configure multiple metrics in the metrics adapter and change some parameters to modify the behaviour of the metrics cache and filtering. To see the full list and descriptions of all parameters that can be modified, refer to the chart README.md and values.yaml files. How it works The following example is a Helm values file that enable the metrics adapter on the nri-bundle chart installation, and configures the nginx_average_requests metric: metrics-adapter: enabled: true newrelic-k8s-metrics-adapter: personalAPIKey: config: accountID: externalMetrics: nginx_average_requests: query: \"FROM Metric SELECT average(nginx.server.net.requestsPerSecond) SINCE 2 MINUTES AGO\" Copy Caution The default time span for metrics is 1h. Therefore, you should define queries with the SINCE clause to adjust the time span according to your environment and needs. There is an HPA consuming the external metric as follows: kind: HorizontalPodAutoscaler apiVersion: autoscaling/v2beta2 metadata: name: nginx-scaler spec: scaleTargetRef: apiVersion: apps/v1 kind: Deployment name: nginx minReplicas: 1 maxReplicas: 10 metrics: - type: External external: metric: name: nginx_average_requests selector: matchLabels: k8s.namespaceName: nginx target: type: Value value: 10000 Copy Based on the HPA definition, the controller manager fetches the metrics from the external metrics API which are served by the New Relic metrics adapter. The New Relic metrics adapter receives the query including the nginx_average_requests metric name and all the selectors, and searches for a matching metric name in the internal memory based on the configured metrics. Then, it adds the selectors to the query to form a final query that is executed using NerdGraph to fetch the value from New Relic. The above example will generate a query like the following: FROM Metric SELECT average(nginx.server.net.requestsPerSecond) WHERE clusterName= AND `k8s.namespaceName`='nginx' SINCE 2 MINUTES AGO Copy Notice that a clusterName filter has been automatically added to the query to exclude metrics from other clusters in the same account. You can remove it by using the removeClusterFilter configuration parameter. Also the value is cached for a period of time defined by the cacheTTLSeconds configuration parameter, whose default is 30 seconds. Troubleshooting Get verbose logs Most common errors are displayed in the standard (non-verbose) logs. If you're doing a more in-depth investigation on your own or with New Relic Support, you can enable verbose mode. To get verbose logging details for an integration using Helm: Enable verbose logging: bash Copy $ helm upgrade -n newrelic --reuse-values newrelic-bundle --set newrelic-k8s-metrics-adapter.verboseLog=true newrelic/nri-bundle Leave on verbose mode for a few minutes, or until enough activity has occurred. When you have the information you need, disable verbose logging: bash Copy $ helm upgrade --reuse-values newrelic-bundle --set newrelic-k8s-metrics-adapter.verboseLog=false newrelic/nri-bundle Caution Verbose mode increases significantly the amount of information sent to log files. Enable this mode temporarily, only for troubleshooting purposes, and reset the log level when finished. Get raw metrics Sometimes it's useful to get the list of available metrics and also to get the current value of an specific metric. To get the list of metrics available, run: bash Copy $ kubectl get --raw \"/apis/external.metrics.k8s.io/v1beta1/\" To get the value for a specific metric with a selector, run: bash Copy $ kubectl get --raw \"/apis/external.metrics.k8s.io/v1beta1/namespaces/*/__METRIC_NAME__?labelSelector=_SELECTOR_KEY_=_SELECTOR_VALUE_\" Tip You must replace , and with your values. Metrics not working There are some usual errors that could cause a metric fail to retrieve the value. These errors are showed in the status of the metrics when you describe the HPA or are printed when you get the raw metrics directly. executing query: NRQL Syntax Error: Error at line...: The query that is being run has syntax errors. The same error message gives you the executed query and position of the error. You can try this query inside the New Relic query builder and correct the configuration from the adapter. extracting return value: expected first value to be of type \"float64\", got %!q(): The query doesn't return any value. The same error message gives you the executed query so you can try this query inside the New Relic query builder and correct the configuration from the adapter or the match selectors in the HPA.", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 268.2477, + "_score": 222.93948, "_version": null, "_explanation": null, "sort": null, "highlight": { - "title": "Link your applications to Kubernetes", - "sections": "Link your applications to Kubernetes", "tags": "Kubernetes integration", - "body": " the following commands: Delete the Kubernetes objects using the yaml file: kubectl delete -f k8s-metadata-injection-latest.yaml Copy Delete the TLS secret containing the certificate/key pair: kubectl delete secret/newrelic-metadata-injection-secret Copy Troubleshooting Follow these troubleshooting tips" + "body": " in the cluster. Installation To install the New Relic Metrics Adapter, we provide the newrelic-k8s-metrics-adapter Helm chart, which is also included in the nri-bundle chart used to deploy all New Relic Kubernetes components. If not already installed, install our Kubernetes integration. Upgrade" }, - "id": "617daead28ccbc662b7ffe23" + "id": "61fd193d196a672daae826d6" }, { "sections": [ - "Kubernetes integration: what's changed in v3", - "v3 BETA", - "Integration version and Chart version", - "Overview", - "Architectural changes", - "Kube-state-metrics component", - "Kubelet component", - "Control plane component", - "Helm Charts", - "Migration Guide", - "KSM configuration", - "Tip", - "Control plane configuration", - "Agent configuration", - "Integrations configuration", - "Miscellaneous chart values", - "Upgrade from v2", - "Important" + "Kubernetes integration: Predefined alert policy", + "Predefined alert conditions", + "Container CPU usage % is too high", + "Container memory usage % is too high", + "Pod was unable to be scheduled", + "Pod is not ready", + "Container is running out of space", + "ReplicaSet doesn't have desired amount of pods", + "etcd open file descriptors", + "Create new alert conditions" ], - "title": "Kubernetes integration: what's changed in v3", + "title": "Kubernetes integration: Predefined alert policy", "type": "docs", "tags": [ - "Changelog", + "Installation", "Kubernetes integration", "Integrations" ], - "external_id": "a78ca20934f98fd2b43c7a9fbc2453c802c24ab8", + "external_id": "7c92831c394c4c087bad8b481250e55557e4b794", "image": "", - "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/get-started/changes-since-v3/", - "published_at": "2022-02-15T19:13:49Z", - "updated_at": "2022-02-13T15:01:38Z", + "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/advanced-configuration/kubernetes-integration-predefined-alert-policy/", + "published_at": "2022-02-15T19:17:51Z", + "updated_at": "2022-02-04T14:47:09Z", "document_type": "page", "popularity": 1, - "body": "v3 BETA Kubernetes integration v3 is currently in a late stage beta, and we expect to make it generally available during the second quarter of 2022. We encourage you to give it a try and let us know what you think! Integration version and Chart version The Kubernetes integration v3 (appVersion) is included on the nri-bundle chart version 4. Overview v3 BETA Data reported by the Kubernetes Integration version 3 has not changed with respect to version 2. For this major release, we focused in configurability, stability, and user experience. From version 3 onwards, New Relic's Kubernetes solution features a new architecture which aims to be more modular and configurable, giving you more power to choose how the solution is deployed and making it compatible with more environments. Architectural changes In this new version, the main component of the integration, the newrelic-infrastructure DaemonSet, is divided in three different components: nrk8s-ksm, nrk8s-kubelet, and nrk8s-controlplane, with the first being a deployment and the next two being DaemonSets. This makes it easier to make decisions at scheduling and deployment time, rather than runtime. Moreover, we also changed the lifecycle of the scraping process. We went from a one-shot, short-lived process, to a long-lived one, allowing it to leverage higher-level Kubernetes APIs like the Kubernetes informers, that provide built-in caching and watching of cluster objects. For this reason, each of the components has two containers: A container for the integration, responsible for collecting metrics. A container with the New Relic Infrastructure Agent, which is used to send the metrics to the New Relic Platform. Kube-state-metrics component We build our cluster state metrics on top of the OSS project kube-state-metrics, which is housed under the Kubernetes organization itself. Previously, as our solution was comprised by just one DaemonSet, an election process was made to decide which pod was going to be in charge of scraping the metrics. This process was based merely on locality. The pod in charge would be the one that shares a node with the KSM deployment. As the KSM output contains data for the whole cluster, parsing this output requires a substantial amount of resources. While this is something that big cluster operators can assume, the fact that it's one arbitrary instance of the DaemonSet the one that will need this big amount of resources forces cluster operators to allow such consumption to the whole DaemonSet, where only one actually needed them. Another problem with KSM scraping was figuring out in which node the KSM pod lived. To do this, we need to contact the API Server and filter pods by some labels, but given the short-lived nature of the integration, caches and watchers were not being used effectively by it. This caused that, on large clusters, all instances of the DaemonSet flooded the control plane with non-namespaced pod list requests as an attempt to figure out whether the KSM pod was living next to them. We decided to tackle this problem by making two big changes to how KSM is scraped: Split the responsibility of scraping KSM out of the DaemonSet pods to a different, single instance Deployment. Refactor the code and make it long-running, so we can leverage Kubernetes informers which provide built-in caching and watching mechanisms. Thus, a specific Deployment nrk8s-ksm now takes care of finding KSM and scraping it. With this pod now being long-lived and single, it can safely use an endpoints informer to locate the IP of the KSM pod and scrape it. The informer will automatically cache the list of informers in the cluster locally and watch for new ones, avoiding storming the API Server with requests to figure out where the pod was located. While a sharded KSM setup is not supported yet, this new code was built with this future improvement in mind. Kubelet component The Kubelet is the “Kubernetes agent”, a service that runs on every Kubernetes node and is responsible for creating the containers as instructed by the control plane. Since it's the Kubelet who partners closely with the Container Runtime, it's the main source of infrastructure metrics for our integration, such as use of CPU, memory, disk, network, etc. Although not thoroughly documented, the Kubelet API is the de-facto standard source for most Kubernetes metrics. Scraping the Kubelet is typically a low-resource operation. Given this, and our intent to minimize internode traffic whenever possible, nrk8s-kubelet is run as a DaemonSet where each instance gathers metric from the Kubelet running in the same node as it is. nrk8s-kubelet no longer requires hostNetwork to run properly, and instead it connects to the Kubelet using the Node IP. If this process fails, nrk8s-kubelet will fall back to reach the node through the API Server proxy. This fallback mechanism is not new, but we do encourage you to mind this if you have very large clusters, as proxying many kubelets might increase the load in the API server. You can check if the API Server is being used as a proxy by looking for a message like this in the logs: Trying to connect to kubelet through API proxy Copy Control plane component Enabling the integration to successfully find and connect to CP components was probably one of the hardest parts of this effort. The main reason for this is the amount of ways in which CP components can be configured: inside or outside the cluster, with one or many replicas, with or without dedicated nodes, etc. Moreover, different CP components might be configured directly. We built the current approach with the following scenarios in mind: CP monitoring should work out of the box for those environments in which the CP is reachable out of the box, e.g. Kubeadm or even Minikube. For setups where the CP cannot be autodiscovered. For example, if it lives out of the cluster, we should provide a way for the user to specify their own endpoints. Failure to autodiscover shouldn't cause the deployment to fail, but failure to hit a manually defined endpoint should. As major Kubernetes distributions such as Kubeadm deploy CP components configured to listen only in localhost on the host's network namespace, we chose to deploy nrk8s-controlplane as a DaemonSet with hostNetwork: true. We structured the configuration to support autodiscover and static endpoints. To be compatible with a wide range of distributions out of the box, we provide a wide range of known defaults as configuration entries. Doing this in the configuration instead of the code allows you to tweak autodiscovery to your needs. Another improvement was adding the possibility of having multiple endpoints per selector and adding a probe mechanism which automatically detects the correct one. This allows you to try different configurations such as ports or protocols by using the same selector. Scraping configuration for the etcd CP component looks like the following where the same structure and features applies for all components: config: etcd: enabled: true autodiscover: - selector: \"tier=control-plane,component=etcd\" namespace: kube-system matchNode: true endpoints: - url: https://localhost:4001 insecureSkipVerify: true auth: type: bearer - url: http://localhost:2381 staticEndpoint: url: https://url:port insecureSkipVerify: true auth: {} Copy If staticEndpoint is set, the component will try to scrape it. If it can't hit the endpoint, the integration will fail so there are no silent errors when manual endpoints are configured. If staticEndpoint is not set, the component will iterate over the autodiscover entries looking for the first pod that matches the selector in the specified namespace, and optionally is running in the same node of the DaemonSet (if matchNode is set to true). After a pod is discovered, the component probes, issuing an http HEAD request, the listed endpoints in order and scrapes the first successful probed one using the authorization type selected. While above we show a config excerpt for the etcd component, the scraping logic is the same for other components. For more detailed instructions on how to configure control plane monitoring, please check the control plane monitoring page. Helm Charts Helm is the primary means we offer to deploy our solution into your clusters. Chart complexity was also significantly increased from the previous version, where it only had to manage one DaemonSet. Now, it has to manage one deployment and two DaemonSets where each has slightly different configurations. This will give you more flexibilty to adapt the solution to your needs, whithout the need to apply manual patches on top of the chart and the generated manifests. Some of the new features that our new Helm chart exposes are: Full control of the securityContext for all pods Full control of pod labels and annotations for all pods Ability to add extra environment variables, volumes, and volumeMounts Full control on the integration configuration, including which endpoints are reached, autodiscovery behavior, and scraping intervals Better alignment with Helm idioms and standards You can check full details on all the switches that can be flipped in the Chart's README.md. Migration Guide In order to make migration from earlier versions as easy as possible, we developed a compatibility layer that will translate most of the options that were possible to specify in the old newrelic-infrastructure chart to their new counterparts. This compatibility layer is temporary and will be removed in the future, so we encourage you to read carefully this guide and migrate the configuration with human supervision. KSM configuration Tip KSM monitoring works out of the box for most configurations, most users will not need to change this config. disableKubeStateMetrics has been replaced by ksm.enabled. The default is still the same (KSM scraping enabled). kubeStateMetricsScheme, kubeStateMetricsPort, kubeStateMetricsUrl, kubeStateMetricsPodLabel, and kubeStateMetricsNamespace have been replaced by the more comprehensive and flexible ksm.config. The ksm.config object has the following structure: ksm: config: # When autodiscovering KSM, force the following scheme. By default, `http` is used. scheme: \"http\" # Label selector to find kube-state-metrics endpoints. Defaults to `app.kubernetes.io/name=kube-state-metrics`. selector: \"app.kubernetes.io/name=kube-state-metrics\" # Restrict KSM discovery to this particular namespace. Defaults to all namespaces. namespace: \"\" # When autodiscovering, only consider endpoints that use this port. By default, all ports from the discovered `endpoint` are probed. #port: 8080 # Override autodiscovery mechanism completely and specify the KSM url directly instead #staticUrl: \"http://test.io:8080/metrics\" Copy Control plane configuration Control plane configuration has changed substantially. If you previously had control plane monitoring enabled, we encourage you to take a look at the Configure control plane monitoring dedicated page. The following options have been replaced by more comprehensive configuration, covered in the section linked above: apiServerSecurePort etcdTlsSecretName etcdTlsSecretNamespace controllerManagerEndpointUrl, etcdEndpointUrl, apiServerEndpointUrl, and schedulerEndpointUrl Agent configuration Agent config file, previously specified in config has been moved to common.agentConfig. Format of the file has not changed, and the full range of options that can be configured can be found here. The following agent options were previously \"aliased\" in the root of the values.yml file, and are no longer available: logFile has been replaced by common.agentConfig.log_file. eventQueueDepth has been replaced by common.agentConfig.event_queue_depth. customAttributes has changed in format to a yaml object. The previous format, a manually json-encoded string e.g. {\"team\": \"devops\"}, is still accepted although discouraged. Previously, customAttributes had a default clusterName entry which might have unwanted consequences if removed. This is no longer the case, users may now safely override customAttributes on its entirety. discoveryCacheTTL has been completely removed, as the discovery is now performed using kubernetes informers which have a built-in cache. Integrations configuration Integrations were previously configured under integrations_config, using an array format: integrations_config: - name: nri-redis.yaml data: discovery: # ... integrations: # ... Copy The mechanism remains the same, but we have changed the format to be more user-friendly: integrations: nri-redis-sampleapp: discovery: # ... integrations: # ... Copy Moreover, now the --port and --tls flags are mandatory on the discovery command. In the past, the following would work: integrations: nri-redis-sampleapp: discovery: command: exec: /var/db/newrelic-infra/nri-discovery-kubernetes Copy From v3 onwards, you must specify --port and --tls: integrations: nri-redis-sampleapp: discovery: command: exec: /var/db/newrelic-infra/nri-discovery-kubernetes --tls --port 10250 Copy This change is required because in v2 and below, the nrk8s-kubelet component (or its equivalent) ran with hostNetwork: true, so nri-discovery-kubernetes could connect to the kubelet using localhost and plain http. For security reasons, this is no longer the case, hence the need to specify both flags from now on. For more details on how to configure on-host integrations in Kubernetes please check the Monitor services in Kubernetes page. Miscellaneous chart values While not related to the integration configuration, the following miscellaneous options for the helm chart have also changed: runAsUser has been replaced by securityContext, which is templated directly into the pods and more configurable. resources has been removed, as now we deploy three different workloads. Resources for each one can be configured individually under: ksm.resources kubelet.resources controlPlane.resources Similarly, tolerations has been split into three and the previous one is no longer valid: ksm.tolerations kubelet.tolerations controlPlane.tolerations All three default to tolerate any value for NoSchedule and NoExecute image and all its subkeys have been replaced by individual sections for each of the three images that are now deployed: images.forwarder.* to configure the infrastructure-agent forwarder. images.agent.* to configure the image bundling the infrastructure-agent and on-host integrations. images.integration.* to configure the image in charge of scraping k8s data. Upgrade from v2 In order to upgrade from the Kubernetes integration version 2 (included in nri-bundle chart versions 3.x), we strongly encourage you to create a values-newrelic.yaml file with your desired License Key and configuration. If you had previously installed our chart from the CLI directly, for example using a command like the following: bash Copy $ helm install newrelic/nri-bundle \\ > --set global.licenseKey= \\ > --set global.cluster= \\ > --set infrastructure.enabled=true \\ > --set prometheus.enabled=true \\ > --set webhook.enabled=true \\ > --set ksm.enabled=true \\ > --set kubeEvents.enabled=true \\ > --set logging.enabled=true You can take the provided --set arguments and put them in a yaml file like the following: # values-newrelic.yaml global: licenseKey: cluster: infrastructure: enabled: true prometheus: enabled: true webhook: enabled: true ksm: enabled: true kubeEvents: enabled: true logging: enabled: true Copy After doing this, and adapting any other setting you might have changed according to the section above, you can upgrade by running the following command: bash Copy $ helm upgrade newrelic newrelic/nri-bundle \\ > --namespace newrelic --create-namespace \\ > -f values-newrelic.yaml \\ > --devel The --devel flag will instruct helm to download the v3 version of the integration (version 4.x of the nri-bundle chart). Important The --reuse-values flag is not supported for upgrading from v2 to v3.", + "body": "When deploying the New Relic Kubernetes integration for the first time in an account, we deploy a default set of alert conditions to your account. The predefined alert policy, named Kubernetes default alert policy, doesn't have a notification channel by default to avoid unwanted notifications. The alert conditions' thresholds can be customized to your environment and the alert policy updated to send notifications. For more information, see the Infrastructure alerts documentation. Predefined alert conditions Container CPU usage % is too high Setting Value Event type K8sContainerSample SELECT value (cpuUsedCores/cpuLimitCores)*100 Warning threshold > 90% for at least 5 minutes Critical threshold > 95% for at least 5 mins Container memory usage % is too high Setting Value Event type K8sContainerSample SELECT value memoryWorkingSetUtilization Warning threshold > 85% for at least 5 minutes Critical threshold > 95% for at least 5 mins Pod was unable to be scheduled Setting Value Event type K8sPodSample SELECT value isScheduled Warning threshold Critical threshold isScheduled = 0 for at least 7 minutes Pod is not ready Setting Value Event type K8sPodSample SELECT value isReady Warning threshold Critical threshold isReady = 0 for at least 10 minutes Container is running out of space Setting Value Event type K8sContainerSample SELECT value fsUsedPercent Warning threshold > 75% for at least 5 minutes Critical threshold > 90% for at least 5 minutes ReplicaSet doesn't have desired amount of pods Setting Value Event type K8sReplicaSetSample SELECT value podsDesired - podsReady Warning threshold Critical threshold 0 for at least 5 minutes etcd open file descriptors Setting Value Event type K8sEtcdSample SELECT value (processOpenFds/processMaxFds)*100 Warning threshold > 75% for at least 3 minutes Critical threshold > 90% for at least 5 minutes Create new alert conditions To create new alert conditions based on Kubernetes metric data, see Understand and use data.", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 229.04001, + "_score": 218.63768, "_version": null, "_explanation": null, "sort": null, "highlight": { - "title": "Kubernetes integration: what's changed in v3", - "sections": "Kubernetes integration: what's changed in v3", + "title": "Kubernetes integration: Predefined alert policy", + "sections": "Kubernetes integration: Predefined alert policy", "tags": "Kubernetes integration", - "body": " the infrastructure-agent and on-host integrations. images.integration.* to configure the image in charge of scraping k8s data. Upgrade from v2 In order to upgrade from the Kubernetes integration version 2 (included in nri-bundle chart versions 3.x), we strongly encourage you to create a values" + "body": ". The alert conditions' thresholds can be customized to your environment and the alert policy updated to send notifications. For more information, see the Infrastructure alerts documentation. Predefined alert conditions Container CPU usage % is too high Setting Value Event type K8sContainerSample SELECT" }, - "id": "61fd3c9d28ccbc72eec0dcda" + "id": "61fd3c6de7b9d279f45e6625" }, { "sections": [ - "New Relic Metrics Adapter", - "BETA FEATURE", - "Requirements", - "Installation", + "Configure control plane monitoring", "Tip", + "Features", + "Compatibility and requirements", + "Control plane component", + "Autodiscovery and default configuration", + "hostNetwork and privileged", + "Custom autodiscovery", + "mTLS", + "Static endpoints", + "Limitations", + "Important", + "Control plane monitoring for managed and cloud environments", + "Monitoring control plane with integration version 2", + "Control plane monitoring on Integration version 2", + "Discovery of master nodes and control plane components", "Configuration", - "How it works", - "Caution", - "Troubleshooting", - "Get verbose logs", - "Get raw metrics", - "Metrics not working" + "etcd", + "API server", + "OpenShift configuration", + "OpenShift configuration on Integration version 2", + "Set up mTLS for etcd in OpenShift", + "See your data" ], - "title": "New Relic Metrics Adapter", + "title": "Configure control plane monitoring", "type": "docs", "tags": [ - "Link apps and services", + "Installation", "Kubernetes integration", "Integrations" ], - "external_id": "51fdc0c8df2fdc91fcc51556e323c62e7c12d48a", - "image": "", - "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/advanced-configuration/newrelic-metrics-adapter/", - "published_at": "2022-02-15T19:18:45Z", - "updated_at": "2022-02-04T12:17:01Z", + "external_id": "33b7b8ae3dab9a2ca553dcf8ea0c97499478a85a", + "image": "https://docs.newrelic.com/static/209f301630c770f87ea8cbb1cace8e6e/8c557/new-relic-one-k8s-cluster-explorer-control-plane-parameters.png", + "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/advanced-configuration/configure-control-plane-monitoring/", + "published_at": "2022-02-15T19:15:25Z", + "updated_at": "2022-02-04T12:15:37Z", "document_type": "page", "popularity": 1, - "body": "BETA FEATURE This feature is still in development, but we encourage you to try it out! You can use metrics from your New Relic account to autoscale applications and services in your Kubernetes cluster by deploying the New Relic Metrics Adapter. This adapter fetches the metric values from New Relic and makes them available for the Horizontal Pod Autoscalers. The newrelic-k8s-metrics-adapter implements the external.metrics.k8s.io API to support the use of external metrics based New Relic NRQL queries results. Once deployed, the value for each configured metric is fetched using the NerdGraph API based on the configured NRQL query. The metrics adapter exposes the metrics over a secured endpoint with TLS. New Relic metrics adapter in a cluster. Requirements Kubernetes 1.16 or higher. The New Relic Kubernetes integration. New Relic's user API key. No other External Metrics Adapter installed in the cluster. Installation To install the New Relic Metrics Adapter, we provide the newrelic-k8s-metrics-adapter Helm chart, which is also included in the nri-bundle chart used to deploy all New Relic Kubernetes components. If not already installed, install our Kubernetes integration. Upgrade the installation to include the New Relic Metrics Adapter with the following command: helm upgrade --install newrelic newrelic/nri-bundle \\ --namespace newrelic --create-namespace --reuse-values \\ --set metrics-adapter.enabled=true \\ --set newrelic-k8s-metrics-adapter.personalAPIKey=YOUR_NEW_RELIC_PERSONAL_API_KEY \\ --set newrelic-k8s-metrics-adapter.config.accountID=YOUR_NEW_RELIC_ACCOUNT_ID \\ --set newrelic-k8s-metrics-adapter.config.externalMetrics.external_metric_name.query=NRQL query Copy Please notice and adjust the following flags: metrics-adapter.enabled: Must be set to true so the metrics adapter chart is installed. newrelic-k8s-metrics-adapter.personalAPIKey: Must be set to valid New Relic Personal API key. newrelic-k8s-metrics-adapter.accountID: Must be set to valid New Relic account where metrics are going to be fetched from. newrelic-k8s-metrics-adapter.config.externalMetrics.external_metric_name.query: Adds a new external metric where: external_metric_name: The metric name. query: The base NRQL query that is used to get the value for the metric. Tip Alternatively, you can use a values.yaml file that can be passed to the helm command with the --values flag. Values files can contain all parameters needed to configure the metrics explained in the configuration section. Configuration You can configure multiple metrics in the metrics adapter and change some parameters to modify the behaviour of the metrics cache and filtering. To see the full list and descriptions of all parameters that can be modified, refer to the chart README.md and values.yaml files. How it works The following example is a Helm values file that enable the metrics adapter on the nri-bundle chart installation, and configures the nginx_average_requests metric: metrics-adapter: enabled: true newrelic-k8s-metrics-adapter: personalAPIKey: config: accountID: externalMetrics: nginx_average_requests: query: \"FROM Metric SELECT average(nginx.server.net.requestsPerSecond) SINCE 2 MINUTES AGO\" Copy Caution The default time span for metrics is 1h. Therefore, you should define queries with the SINCE clause to adjust the time span according to your environment and needs. There is an HPA consuming the external metric as follows: kind: HorizontalPodAutoscaler apiVersion: autoscaling/v2beta2 metadata: name: nginx-scaler spec: scaleTargetRef: apiVersion: apps/v1 kind: Deployment name: nginx minReplicas: 1 maxReplicas: 10 metrics: - type: External external: metric: name: nginx_average_requests selector: matchLabels: k8s.namespaceName: nginx target: type: Value value: 10000 Copy Based on the HPA definition, the controller manager fetches the metrics from the external metrics API which are served by the New Relic metrics adapter. The New Relic metrics adapter receives the query including the nginx_average_requests metric name and all the selectors, and searches for a matching metric name in the internal memory based on the configured metrics. Then, it adds the selectors to the query to form a final query that is executed using NerdGraph to fetch the value from New Relic. The above example will generate a query like the following: FROM Metric SELECT average(nginx.server.net.requestsPerSecond) WHERE clusterName= AND `k8s.namespaceName`='nginx' SINCE 2 MINUTES AGO Copy Notice that a clusterName filter has been automatically added to the query to exclude metrics from other clusters in the same account. You can remove it by using the removeClusterFilter configuration parameter. Also the value is cached for a period of time defined by the cacheTTLSeconds configuration parameter, whose default is 30 seconds. Troubleshooting Get verbose logs Most common errors are displayed in the standard (non-verbose) logs. If you're doing a more in-depth investigation on your own or with New Relic Support, you can enable verbose mode. To get verbose logging details for an integration using Helm: Enable verbose logging: bash Copy $ helm upgrade -n newrelic --reuse-values newrelic-bundle --set newrelic-k8s-metrics-adapter.verboseLog=true newrelic/nri-bundle Leave on verbose mode for a few minutes, or until enough activity has occurred. When you have the information you need, disable verbose logging: bash Copy $ helm upgrade --reuse-values newrelic-bundle --set newrelic-k8s-metrics-adapter.verboseLog=false newrelic/nri-bundle Caution Verbose mode increases significantly the amount of information sent to log files. Enable this mode temporarily, only for troubleshooting purposes, and reset the log level when finished. Get raw metrics Sometimes it's useful to get the list of available metrics and also to get the current value of an specific metric. To get the list of metrics available, run: bash Copy $ kubectl get --raw \"/apis/external.metrics.k8s.io/v1beta1/\" To get the value for a specific metric with a selector, run: bash Copy $ kubectl get --raw \"/apis/external.metrics.k8s.io/v1beta1/namespaces/*/__METRIC_NAME__?labelSelector=_SELECTOR_KEY_=_SELECTOR_VALUE_\" Tip You must replace , and with your values. Metrics not working There are some usual errors that could cause a metric fail to retrieve the value. These errors are showed in the status of the metrics when you describe the HPA or are printed when you get the raw metrics directly. executing query: NRQL Syntax Error: Error at line...: The query that is being run has syntax errors. The same error message gives you the executed query and position of the error. You can try this query inside the New Relic query builder and correct the configuration from the adapter. extracting return value: expected first value to be of type \"float64\", got %!q(): The query doesn't return any value. The same error message gives you the executed query so you can try this query inside the New Relic query builder and correct the configuration from the adapter or the match selectors in the HPA.", + "body": "New Relic provides control plane support for your Kubernetes integration, allowing you to monitor and collect metrics from your cluster's control plane components. That data can then be found in New Relic and used to create queries and charts. Tip Unless otherwise specified, this page refers to the Kubernetes integration v3. Details on how to configure control plane monitoring for v2 can be found in a specific section below. Features We monitor and collect metrics from the following control plane components: etcd: leader information, resident memory size, number of OS threads, consensus proposals data, etc. For a list of supported metrics, see etcd data. API server: rate of apiserver requests, breakdown of apiserver requests by HTTP method and response code, etc. For the complete list of supported metrics, see API server data. Scheduler: requested CPU/memory vs available on the node, tolerations to taints, any set affinity or anti-affinity, etc. For the complete list of supported metrics, see Scheduler data. Controller manager: resident memory size, number of OS threads created, goroutines currently existing, etc. For the complete list of supported metrics, see Controller manager data. Compatibility and requirements Control plane monitoring support is limited for managed clusters. This is because most cloud providers do not expose the metrics endpoints for the control plane components, so New Relic cannot access them. When deploying the solution in unprivileged mode, control plane setup will require extra steps and some caveats might apply. OpenShift 4.x uses control plane component metric endpoints that are different than the default. Control plane component The task of monitoring the Kubernetes control plane is a responsibility of the nrk8s-controlplane component, which by default is deployed as a DaemonSet. This component is automatically deployed to master nodes, through the use of a default list of nodeSelectorTerms which includes labels commonly used to identify master nodes, such as node-role.kubernetes.io/control-plane or node-role.kubernetes.io/master. Regardless, this selector is exposed in the values.yml file and therefore can be reconfigured to fit other environments. Clusters that do not have any node matching these selectors will not get any pod scheduled, thus not wasting any resources and being functionally equivalent of disabling control plane monitoring altogether by setting controlPlane.enabled to false in the Helm Chart. Each component of the control plane has a dedicated section, which allows to individually: Enable or disable monitoring of that component Define specific selectors and namespaces for discovering that component Define the endpoints and paths that will be used to fetch metrics for that component Define the authentication mechanisms that need to be used to get metrics for that component Manually specify endpoints that skip autodiscovery completely Autodiscovery and default configuration By default, our Helm Chart ships a configuration that should work out of the box for some control plane components for on-premise distributions that run the control plane inside the cluster, such as Kubeadm or minikube. hostNetwork and privileged Most users and Kubernetes distributions configure the control plane metrics endpoints to listen only in the loopback interface, i.e. localhost. For this reason, the control plane component is deployed with hostNetwork: true by default when privileged is set to true (the default). When the integration is deployed using privileged: false, the hostNetwork setting for the control plane component will be also be set to false. We chose to do it this way because otherwise, we would not be honoring the intent users have when they set privileged: false. Unfortunately, deploying without hostNetwork will cause control plane scraping to fail in most environments, which will result in missing metrics or the nrk8s-controlplane pods getting stuck into a CrashLoopBackoff state. This is a limitation of Kubernetes itself, as control plane cannot be monitored without hostNetwork unless components are manually configured to do so. As it is a common setting to deploy the integration in unprivileged mode (privileged: false), but still consider acceptable running the control plane pods with hostNetwork. This can be achieved by setting controlPlane.unprivilegedHostNetwork to true: This will tell the chart to deploy the control plane component with hostNetwork: true, despite the value of the higher-level privileged flag. If running pods with hostNetwork is not acceptable whatsoever, due to cluster or other policies, control plane monitoring is not possible and should be disabled by setting controlPlane.enabled to false. Custom autodiscovery Selectors used for autodiscovery are completely exposed as configuration entries in the values.yaml file, which means they can be tweaked or replaced to fit almost any environment where the control plane is run as a part of the cluster. An autodiscovery section looks like the following: autodiscover: - selector: \"tier=control-plane,component=etcd\" namespace: kube-system # Set to true to consider only pods sharing the node with the scraper pod. # This should be set to `true` if Kind is Daemonset, `false` otherwise. matchNode: true # Try to reach etcd using the following endpoints. endpoints: - url: https://localhost:4001 insecureSkipVerify: true auth: type: bearer - url: http://localhost:2381 - selector: \"k8s-app=etcd-manager-main\" namespace: kube-system matchNode: true endpoints: - url: https://localhost:4001 insecureSkipVerify: true auth: type: bearer Copy The autodiscover section contains a list of autodiscovery entries. Each entry has: selector: A string-encoded label selector that will be used to look for pods. matchNode: If set to true, it will additionally limit discovery to pods running in the same node as the particular instance of the DaemonSet performing discovery. endpoints: A list of endpoints to try if a pod is found for the specified selector. Additionally, each endpoint has: url: URL to target, including scheme. Can be http or https. insecureSkipVerify: If set to true, certificate will not be checked for https URLs. auth.type: Which mechanism to use to authenticate the request. Currently, the following methods are supported: None: If auth is not specified, the request will not contain any authentication whatsoever. bearer: The same bearer token used to authenticate against the Kubernetes API will be sent to this request. mtls: mTLS will be used to perform the request. mTLS For the mtls type, the following needs to be specified: endpoints: - url: https://localhost:4001 auth: type: mtls mtls: secretName: secret-name secretNamespace: secret-namespace Copy Where secret-name is the name of a Kubernetes TLS Secret, which lives in secret-namespace, and contains the certificate, key, and CA required to connect to that particular endpoint. The integration fetches this secret in runtime rather than mounting it, which means it requires an RBAC role granting it access to it. Our Helm Chart automatically detects auth.mtls entries at render time and will automatically create entries for these particular secrets and namespaces for you, unless rbac.create is set to false. Our integration accepts a secret with the following keys: cert: The PEM-encoded certificate that will be presented to etcd key: The PEM-encoded private key corresponding to the certificate above These certificates should be signed by the same CA etcd is using to operate. How to generate these certificates is out of the scope of this documentation, as it will vary greatly between different Kubernetes distribution. Please refer to your distribution's documentation to see how to fetch the required etcd peer certificates. In Kubeadm, for example, they can be found in /etc/kubernetes/pki/etcd/peer.{crt,key} in the master node. Once you have located or generated the etcd peer certificates, you should rename the files to match the keys we expect to be present in the secret, and create the secret in the cluster bash Copy $ mv peer.crt cert $ mv peer.key key $ mv ca.crt cacert $ $ kubectl -n newrelic create secret generic newrelic-etcd-tls-secret --from-file=./cert --from-file=./key --from-file=./cacert Finally, you can input the secret name (newrelic-etcd-tls-secret) and namespace (newrelic) in the config snippet shown at the beginning of this section. Remember that the Helm Chart will automatically parse this config and create an RBAC role to grant access to this specific secret and namespace for the nrk8s-controlplane component, so there's no manual action needed in that regard. Static endpoints While autodiscovery should cover cases where the control plane lives inside the Kubernetes clusters, some distributions or sophisticated Kubernetes environments run the control plane elsewhere, for a variety of reasons including availability or resource isolation. For these cases, the integration can be configured to scrape an arbitrary, fixed URL regardless of whether a pod with a control plane label is found in the node. This is done by specifying a staticEndpoint entry. For example, one for an external etcd instance would look like this: controlPlane: etcd: staticEndpoint: url: https://url:port insecureSkipVerify: true auth: {} Copy staticEndpoint is the same type of entry as endpoints in the autodiscover entry, whose fields are described above. The authentication mechanisms and schemas are supported here. Please keep in mind that if staticEndpoint is set, the autodiscover section will be ignored in its entirety. Limitations Important If you are using staticEndpoint pointing to an out-of-node (i.e. not localhost) endpoint, you must change controlPlane.kind from DaemonSet to Deployment. When using staticEndpoint, all nrk8s-controlplane pods will attempt to reach and scrape said endpoint. This means that, if nrk8s-controlplane is a DaemonSet (the default), all instances of the DaemonSet will scrape this endpoint. While this is fine if you are pointing them to localhost, if the endpoint is not local to the node you could potentially produce to duplicate metrics and increased billable usage. If you are using staticEndpoint and pointing it to a non-local URL, make sure to change controlPlane.kind to Deployment. For the same reason above, it is currently not possible to use autodiscovery for some control plane components, and a static endpoint for others. This is a known limitation we are working to address in future versions of the integration. Lastly, staticEndpoint allows only to define a single endpoint per component. This means that if you have multiple control plane shards in different hosts, it is currently not possible to point to them separately. This is also a known limitation we are working to address in future versions. For the time being, a workaround could be to aggregate metrics for different shards elsewhere, and point the staticEndpoint URL to the aggregated output. Control plane monitoring for managed and cloud environments Some cloud environments, like EKS or GKE, allow retrieving metrics from the Kubernetes API Server. This can be easily configured as an static endpoint: controlPlane: affinity: nodeAffinity: false # https://github.com/helm/helm/issues/9136 kind: Deployment config: etcd: enabled: false scheduler: enabled: false controllerManager: enabled: false apiServer: staticEndpoint: url: \"https://kubernetes.default:443\" insecureSkipVerify: true auth: type: bearer Copy Please note that this only applies to the API Server and that etcd, the scheduler, and the controller manager remain inaccessible in cloud environments. Monitoring control plane with integration version 2 This section covers how to configure control plane monitoring on versions 2 and earlier of the integration. Please note that these versions had a less flexible autodiscovery options, and did not support external endpoints. We strongly recommend you to update to version 3 at your earliest convenience. See what's changed of the Kubernetes integration. Control plane monitoring on Integration version 2 Discovery of master nodes and control plane components The Kubernetes integration relies on the kubeadm labeling conventions to discover the master nodes and the control plane components. This means that master nodes should be labeled with node-role.kubernetes.io/master=\"\" or kubernetes.io/role=\"master\". The control plane components should have either the k8s-app or the tier and component labels. Refer to the following table for accepted label combinations and values: Component Label Endpoint API server Kubeadm / Kops / ClusterAPI k8s-app=kube-apiserver tier=control-plane component=kube-apiserver OpenShift app=openshift-kube-apiserver apiserver=true localhost:443/metrics by default (can be configured) if the request fails falls back to localhost:8080/metrics etcd Kubeadm / Kops / ClusterAPI k8s-app=etcd-manager-main tier=control-plane component=etcd OpenShift k8s-app=etcd localhost:4001/metrics Scheduler Kubeadm / Kops / ClusterAPI k8s-app=kube-scheduler tier=control-plane component=kube-scheduler OpenShift app=openshift-kube-scheduler scheduler=true localhost:10251/metrics Controller manager Kubeadm / Kops / ClusterAPI k8s-app=kube-controller-manager tier=control-plane component=kube-controller-manager​ OpenShift app=kube-controller-manager kube-controller-manager=true localhost:10252/metrics When the integration detects that it is running inside a master node, it tries to find which components are running on the node by looking for pods that match the labels listed in the table above. For every running component, the integration makes a request to its metrics endpoint. Configuration Control plane monitoring is automatic for agents running inside master nodes. The only component that requires an extra step to run is etcd, because it uses mutual TLS authentication (mTLS) for client requests. The API Server can also be configured to be queried using the Secure Port. Important Control plane monitoring for OpenShift 4.x requires additional configuration. For more information, see the OpenShift 4.x Configuration section. etcd In order to set mTLS for querying etcd, there are two configuration options that need to be set: Option Value ETCD_TLS_SECRET_NAME Name of a Kubernetes secret that contains the mTLS configuration. The secret should contain the following keys: cert: the certificate that identifies the client making the request. It should be signed by an etcd trusted CA. key: the private key used to generate the client certificate. cacert: the root CA used to identify the etcd server certificate. If the ETCD_TLS_SECRET_NAME option is not set, etcd metrics won't be fetched. ETCD_TLS_SECRET_NAMESPACE The namespace where the secret specified in the ETCD_TLS_SECRET_NAME was created. If not set, the default namespace is used. API server By default, the API server metrics are queried using the localhost:8080 unsecured endpoint. If this port is disabled, you can also query these metrics over the secure port. To enable this, set the following configuration option in the Kubernetes integration manifest file: Option Value API_SERVER_ENDPOINT_URL The (secure) URL to query the metrics. The API server uses localhost:443 by default Ensure that the ClusterRole has been updated to the newest version found in the manifest Added in version 1.15.0 Important Note that the port can be different according to the secure port used by the API server. For example, in Minikube the API server secure port is 8443 and therefore API_SERVER_ENDPOINT_URL should be set to https://localhost:8443 OpenShift configuration Version 3 of the Kubernetes Integration includes default settings that will autodiscover control plane components in OpenShift clusters, so it should work out of the box for all components except etcd. Etcd is not supported out of the box as the metrics endpoint is configured to require mTLS authentication in OpenShift environments. Our integration supports mTLS authentication to fetch etcd metrics in this configuration, however you will need to create the required mTLS certificate manually. This is necessary to avoid granting wide permissions to our integration without the explicit approval from the user. To create an mTLS secret, please follow the steps in this section below, and then configure the integration to use the newly created secret as described in the mtls section. OpenShift configuration on Integration version 2 Important When installing openshift through Helm, specify the configuration to automatically include these endpoints. Setting openshift.enabled=true and openshift.version=\"4.x\" will include the secure endpoints and enable the /var/run/crio.sock runtime. Control plane components on OpenShift 4.x use endpoint URLs that require SSL and service account based authentication. Therefore, the default endpoint URLs can not be used. To configure control plane monitoring on OpenShift, uncomment the following environment variables in the customized manifest. URL values are pre-configured to the default base URLs for the control plane monitoring metrics endpoints in OpenShift 4.x. - name: \"SCHEDULER_ENDPOINT_URL\" value: \"https://localhost:10259 - name: \"ETCD_ENDPOINT_URL\" value: \"https://localhost:9979\" - name: \"CONTROLLER_MANAGER_ENDPOINT_URL\" value: \"https://localhost:10257\" - name: \"API_SERVER_ENDPOINT_URL\" value: \"https://localhost:6443\" Copy Important Even though the custom ETCD_ENDPOINT_URL is defined, etcd requires HTTPS and mTLS authentication to be configured. For more on configuring mTLS for etcd in OpenShift, see Set up mTLS for etcd in OpenShift. Set up mTLS for etcd in OpenShift Follow these instructions to set up mutual TLS authentication for etcd in OpenShift 4.x: Export the etcd client certificates from the cluster to an opaque secret. In a default managed OpenShift cluster, the secret is named kube-etcd-client-certs and it is stored in the openshift-monitoring namespace. bash Copy $ kubectl get secret kube-etcd-client-certs -n openshift-monitoring -o yaml > etcd-secret.yaml Open the secret file and change the keys: Rename the certificate authority to cacert. Rename the client certificate to cert. Rename the client key to key. Optionally, change the secret name and namespace to something meaningful. Remove these unnecessary keys in the metadata section: creationTimestamp resourceVersion selfLink uid Install the manifest with its new name and namespace: bash Copy $ kubectl apply -n newrelic -f etcd-secret.yaml Configure the integration to use the newly created secret as described in the mtls section. See your data If the integration has been been set up correctly, the Kubernetes cluster explorer contains all the control plane components and their status in a dedicated section, as shown below. one.newrelic.com > Kubernetes Cluster Explorer: Use the Kubernetes cluster explorer to monitor and collect metrics from your cluster's Control Plane components. You can also check for control plane data with this NRQL query: SELECT latest(timestamp) FROM K8sApiServerSample, K8sEtcdSample, K8sSchedulerSample, K8sControllerManagerSample FACET entityName where clusterName = '_MY_CLUSTER_NAME_' Copy Tip If you still can't see Control Plane data, try the solution described in Kubernetes integration troubleshooting: Not seeing data.", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 227.60461, + "_score": 218.4278, "_version": null, "_explanation": null, "sort": null, "highlight": { + "sections": "Static endpoints", "tags": "Kubernetes integration", - "body": " in the cluster. Installation To install the New Relic Metrics Adapter, we provide the newrelic-k8s-metrics-adapter Helm chart, which is also included in the nri-bundle chart used to deploy all New Relic Kubernetes components. If not already installed, install our Kubernetes integration. Upgrade" + "body": " and the control plane components. This means that master nodes should be labeled with node-role.kubernetes.io/master="" or kubernetes.io/role="master". The control plane components should have either the k8s-app or the tier and component labels. Refer to the following table for accepted label combinations" }, - "id": "61fd193d196a672daae826d6" + "id": "61fd18e9e7b9d2b5cc5e7358" } ], "/google-kubernetes-engine/7ecb459a-eb29-4346-a6f0-c9fe1acf8830": [ @@ -61197,7 +61150,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 279.59314, + "_score": 259.9965, "_version": null, "_explanation": null, "sort": null, @@ -61209,54 +61162,6 @@ }, "id": "617d5841196a67bb40f7c1de" }, - { - "sections": [ - "Link your applications to Kubernetes", - "Tip", - "Compatibility and requirements", - "Kubernetes requirements", - "Network requirements", - "APM agent compatibility", - "Openshift requirements", - "Important", - "Configure the injection of metadata", - "Default configuration", - "Custom configuration", - "Manage custom certificates", - "Validate the injection of metadata", - "Disable the injection of metadata", - "Troubleshooting" - ], - "title": "Link your applications to Kubernetes", - "type": "docs", - "tags": [ - "Link apps and services", - "Kubernetes integration", - "Integrations" - ], - "external_id": "0fe0951312aaf683f6614d5956f8c402b9693780", - "image": "", - "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/link-your-applications/link-your-applications-kubernetes/", - "published_at": "2022-02-06T01:24:10Z", - "updated_at": "2022-02-06T01:24:10Z", - "document_type": "page", - "popularity": 1, - "body": "You can surface Kubernetes metadata and link it to your APM agents as distributed traces to explore performance issues and troubleshoot transaction errors. For more information, see this New Relic blog post. You can quickly start monitoring Kubernetes clusters using our Auto-telemetry with Pixie integration, which doesn't require a language agent. Learn more about Auto-telemetry with Pixie. The metadata injection product uses a MutatingAdmissionWebhook to add the following environment variables to pods: NEW_RELIC_METADATA_KUBERNETES_CLUSTER_NAME NEW_RELIC_METADATA_KUBERNETES_NODE_NAME NEW_RELIC_METADATA_KUBERNETES_NAMESPACE_NAME NEW_RELIC_METADATA_KUBERNETES_DEPLOYMENT_NAME NEW_RELIC_METADATA_KUBERNETES_POD_NAME NEW_RELIC_METADATA_KUBERNETES_CONTAINER_NAME NEW_RELIC_METADATA_KUBERNETES_CONTAINER_IMAGE_NAME Copy Tip Our Kubernetes metadata injection project is open source. Here's the code to link APM and infrastructure data and the code to automatically manage certificates. Compatibility and requirements Before linking Kubernetes metadata to your APM agents, make sure you meet the following requirements: Kubernetes requirements Network requirements APM agent compatibility OpenShift requirements Kubernetes requirements To link your applications and Kubernetes, your cluster must have the MutatingAdmissionWebhook controller enabled, which requires Kubernetes 1.9 or higher. To verify that your cluster is compatible, run the following command: kubectl api-versions | grep admissionregistration.k8s.io/v1beta1 admissionregistration.k8s.io/v1beta1 Copy If you see a different result, follow the Kubernetes documentation to enable admission control in your cluster. Network requirements For Kubernetes to speak to our MutatingAdmissionWebhook, the master node (or the API server container, depending on how the cluster is set up) should be allowed egress for HTTPS traffic on port 443 to pods in all of the other nodes in the cluster. This might require specific configuration depending on how the infrastructure is set up (on-premises, AWS, Google Cloud, etc). Tip Until Kubernetes v1.14, users were only allowed to register admission webhooks on port 443. Since v1.15 it's possible to register them on different ports. To ensure backward compatibility, the webhook is registered by default on port 443 in the YAML config file we distribute. APM agent compatibility The following New Relic agents collect Kubernetes metadata: Go 2.3.0 or higher Java 4.10.0 or higher Node.js 5.3.0 or higher Python 4.14.0 or higher Ruby 6.1.0 or higher .NET 8.17.438 or higher Openshift requirements To link Openshift and Kubernetes you must enable mutating admission webhooks, which requires Openshift 3.9 or higher. During the process, install a resource that requires admin permissions to the cluster. Run this to log in as admin: oc login -u system:admin Copy Check that webhooks are correctly configured. If they are not, update the master-config.yaml file. admissionConfig: pluginConfig: MutatingAdmissionWebhook: configuration: apiVersion: apiserver.config.k8s.io/v1alpha1 kubeConfigFile: /dev/null kind: WebhookAdmission ValidatingAdmissionWebhook: configuration: apiVersion: apiserver.config.k8s.io/v1alpha1 kubeConfigFile: /dev/null kind: WebhookAdmission location: \"\" Copy Important Add kubeConfigFile: /dev/null to address some issues in Openshift. Enable certificate signing by editing the YAML file and updating your configuration: kubernetesMasterConfig: controllerArguments: cluster-signing-cert-file: - \"/etc/origin/master/ca.crt\" cluster-signing-key-file: - \"/etc/origin/master/ca.key\" Copy Restart the Openshift services in the master node. Configure the injection of metadata By default, all the pods you create that include APM agents have the correct environment variables set and the metadata injection applies to the entire cluster. To check that the environment variables have been set, any container that is running must be stopped, and a new instance started (see Validate the injection of metadata). This default configuration also uses the Kubernetes certificates API to automatically manage the certificates required for the injection. If needed, you can limit the injection of metadata to specific namespaces in your cluster or self-manage your certificates. Default configuration We offer instructions for deploying our integration using Helm. Just be sure that, when you are configuring the chart, the webhook that inject the metadata is enabled. Notice that we are specifying --dry-run and --debug, so nothing will be installed in this step: helm upgrade --install newrelic newrelic/nri-bundle \\ --dry-run \\ --debug \\ --namespace newrelic --create-namespace \\ --set global.licenseKey=YOUR_NEW_RELIC_LICENSE_KEY \\ --set global.cluster=K8S_CLUSTER_NAME \\ --set ksm.enabled=true \\ --set newrelic-infrastructure.privileged=true \\ --set infrastructure.enabled=true \\ --set prometheus.enabled=true \\ --set webhook.enabled= true \\ --set kubeEvents.enabled=true \\ --set logging.enabled=true Copy Custom configuration You can limit the injection of metadata only to specific namespaces by using labels. To enable this feature, edit nri-bundle Helm values.yaml file: nri-metadata-injection: injectOnlyLabeledNamespaces: true Copy Or add a --set when installing or upgrading your Helm release: helm upgrade --install newrelic newrelic/nri-bundle \\ --dry-run \\ --debug \\ --namespace newrelic --create-namespace \\ --set global.licenseKey=YOUR_NEW_RELIC_LICENSE_KEY \\ --set global.cluster=K8S_CLUSTER_NAME \\ --set ksm.enabled=true \\ --set newrelic-infrastructure.privileged=true \\ --set infrastructure.enabled=true \\ --set prometheus.enabled=true \\ --set webhook.enabled= true \\ --set nri-metadata-injection.injectOnlyLabeledNamespaces=true \\ --set kubeEvents.enabled=true \\ --set logging.enabled=true Copy With this option, injection is only applied to those namespaces that have the newrelic-metadata-injection label set to enabled: kubectl label namespace YOUR_NAMESPACE newrelic-metadata-injection=enabled Copy Manage custom certificates To use custom certificates you need to disable the automatic installation of certificates when you are installing using Helm. To disable the installation for certificates just modify nri-bundle Helm values.yaml like this: nri-metadata-injection: customTLSCertificate: true Copy Or add a --set when installing or upgrading your Helm release: helm upgrade --install newrelic newrelic/nri-bundle \\ --dry-run \\ --debug \\ --namespace newrelic --create-namespace \\ --set global.licenseKey=YOUR_NEW_RELIC_LICENSE_KEY \\ --set global.cluster=K8S_CLUSTER_NAME \\ --set ksm.enabled=true \\ --set newrelic-infrastructure.privileged=true \\ --set infrastructure.enabled=true \\ --set prometheus.enabled=true \\ --set webhook.enabled= true \\ --set nri-metadata-injection.customTLSCertificate=true \\ --set kubeEvents.enabled=true \\ --set logging.enabled=true Copy Now you can proceed with the custom certificate management option. You need your certificate, server key, and Certification Authority (CA) bundle encoded in PEM format. If you have them in the standard certificate format (X.509), install openssl, and run the following: openssl x509 -in CERTIFICATE_FILENAME -outform PEM -out CERTIFICATE_FILENAME.pem openssl x509 -in SERVER_KEY_FILENAME -outform PEM -out SERVER_KEY_FILENAME.pem openssl x509 -in CA_BUNDLE_FILENAME -outform PEM -out BUNDLE_FILENAME.pem Copy If your certificate/key pair are in another format, see the Digicert knowledgebase for more help. Create the TLS secret with the signed certificate/key pair, and patch the mutating webhook configuration with the CA using the following commands: kubectl create secret tls newrelic-metadata-injection-admission \\ --key=PEM_ENCODED_SERVER_KEY \\ --cert=PEM_ENCODED_CERTIFICATE \\ --dry-run -o yaml | kubectl -n newrelic apply -f - caBundle=$(cat PEM_ENCODED_CA_BUNDLE | base64 | td -d $'\\n') kubectl patch mutatingwebhookconfiguration newrelic-metadata-injection-cfg --type='json' -p \"[{'op': 'replace', 'path': '/webhooks/0/clientConfig/caBundle', 'value':'${caBundle}'}]\" Copy Important Certificates signed by Kubernetes have an expiration of one year. For more information, see the Kubernetes source code in GitHub. Validate the injection of metadata In order to validate that the webhook (responsible for injecting the metadata) was installed correctly, deploy a new pod and check for the New Relic environment variables. Create a dummy pod containing Busybox by running: kubectl create -f https://git.io/vPieo Copy Check if New Relic environment variables were injected: kubectl exec busybox0 -- env | grep NEW_RELIC_METADATA_KUBERNETES NEW_RELIC_METADATA_KUBERNETES_CLUSTER_NAME=fsi NEW_RELIC_METADATA_KUBERNETES_NODE_NAME=nodea NEW_RELIC_METADATA_KUBERNETES_NAMESPACE_NAME=default NEW_RELIC_METADATA_KUBERNETES_POD_NAME=busybox0 NEW_RELIC_METADATA_KUBERNETES_CONTAINER_NAME=busybox Copy Disable the injection of metadata To disable/uninstall the injection of metadata, use the following commands: Delete the Kubernetes objects using the yaml file: kubectl delete -f k8s-metadata-injection-latest.yaml Copy Delete the TLS secret containing the certificate/key pair: kubectl delete secret/newrelic-metadata-injection-secret Copy Troubleshooting Follow these troubleshooting tips as needed. No Kubernetes metadata in APM or distributed tracing transactions Problem The creation of the secret by the k8s-webhook-cert-manager job used to fail due to the kubectl version used by the image when running in Kubernetes version 1.19.x, The new version 1.3.2 fixes this issue, therefore it is enough to run again the job using an update version of the image to fix the issue. Solution Update the image k8s-webhook-cert-manager (to a version >= 1.3.2) and re-run the job. The secret will be correctly created and the k8s-metadata-injection pod will be able to start. Note that the new version of the manifest and of the nri-bundle are already updated with the correct version of the image. Problem In OpenShift version 4.x, the CA that is used in order to patch the mutatingwebhookconfiguration resource is not the one used when signing the certificates. This is a known issue currently tracked here. In the logs of the Pod nri-metadata-injection, you'll see the following error message: TLS handshake error from 10.131.0.29:37428: remote error: tls: unknown certificate authority TLS handshake error from 10.129.0.1:49314: remote error: tls: bad certificate Copy Workaround Manually update the certificate stored in the mutatingwebhookconfiguration object. The correct CA locations might change according to the cluster configuration. However, you can usually find the CA in the secret csr-signer in the namespace openshift-kube-controller-manager. Problem There is no Kubernetes metadata included in the transactions' attributes of your APM agent or in distributed tracing. Solution Verify that the environment variables are being correctly injected by following the instructions described in the Validate your installation step. If they are not present, get the name of the metadata injection pod by running: kubectl get pods | grep newrelic-metadata-injection-deployment kubectl logs -f pod/podname Copy In another terminal, create a new pod (for example, see Validate your installation), and inspect the logs of the metadata injection deployment for errors. For every created pod there should be a set of 4 new entries in the logs like: {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.107Z\",\"caller\":\"server/main.go:139\",\"msg\":\"POST https://newrelic-metadata-injection-svc.default.svc:443/mutate?timeout=30s HTTP/2.0\\\" from 10.11.49.2:32836\"} {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.110Z\",\"caller\":\"server/webhook.go:168\",\"msg\":\"received admission review\",\"kind\":\"/v1, Kind=Pod\",\"namespace\":\"default\",\"name\":\"\",\"pod\":\"busybox1\",\"UID\":\"6577519b-7a61-11ea-965e-0e46d1c9335c\",\"operation\":\"CREATE\",\"userinfo\":{\"username\":\"admin\",\"uid\":\"admin\",\"groups\":[\"system:masters\",\"system:authenticated\"]}} {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.111Z\",\"caller\":\"server/webhook.go:182\",\"msg\":\"admission response created\",\"response\":\"[{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env\\\",\\\"value\\\":[{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_CLUSTER_NAME\\\",\\\"value\\\":\\\"adn_kops\\\"}]},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_NODE_NAME\\\",\\\"valueFrom\\\":{\\\"fieldRef\\\":{\\\"fieldPath\\\":\\\"spec.nodeName\\\"}}}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_NAMESPACE_NAME\\\",\\\"valueFrom\\\":{\\\"fieldRef\\\":{\\\"fieldPath\\\":\\\"metadata.namespace\\\"}}}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_POD_NAME\\\",\\\"valueFrom\\\":{\\\"fieldRef\\\":{\\\"fieldPath\\\":\\\"metadata.name\\\"}}}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_CONTAINER_NAME\\\",\\\"value\\\":\\\"busybox\\\"}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_CONTAINER_IMAGE_NAME\\\",\\\"value\\\":\\\"busybox\\\"}}]\"} {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.111Z\",\"caller\":\"server/webhook.go:257\",\"msg\":\"writing response\"} Copy If there are no new entries on the logs, it means that the apiserver is not being able to communicate with the webhook service, this could be due to networking rules or security groups rejecting the communication. To check if the apiserver is not being able to communicate with the webhook you should inspect the apiserver logs for errors like: failed calling webhook \"metadata-injection.newrelic.com\": ERROR_REASON Copy To get the apiserver logs: Start a proxy to the Kubernetes API server by the executing the following command in a terminal window and keep it running. kubectl proxy --port=8001 Copy Create a new pod in your cluster, this will make the apiserver try to communicate with the webhook. The following command will create a busybox. kubectl create -f https://git.io/vPieo Copy Retrieve the apiserver logs. curl localhost:8001/logs/kube-apiserver.log > apiserver.log Copy Delete the busybox container. kubectl delete -f https://git.io/vPieo Copy Inspect the logs for errors. grep -E 'failed calling webhook' apiserver.log Copy Remember that one of the requirements for the metadata injection is that the apiserver must be allowed egress to the pods running on the cluster. If you encounter errors regarding connection timeouts or failed connections, make sure to check the security groups and firewall rules of the cluster. If there are no log entries in either the apiserver logs or the metadata injection deployment, it means that the webhook was not properly registered. Ensure the metadata injection setup job ran successfully by inspecting the output of: kubectl get job newrelic-metadata-setup Copy If the job is not completed, investigate the logs of the setup job: kubectl logs job/newrelic-metadata-setup Copy Ensure the CertificateSigningRequest is approved and issued by running: kubectl get csr newrelic-metadata-injection-svc.default Copy Ensure the TLS secret is present by running: kubectl get secret newrelic-metadata-injection-secret Copy Ensure the CA bundle is present in the mutating webhook configuration: kubectl get mutatingwebhookconfiguration newrelic-metadata-injection-cfg -o json Copy Ensure the TargetPort of the Service resource matches the Port of the Deployment's container: kubectl describe service/newrelic-metadata-injection-svc kubectl describe deployment/newrelic-metadata-injection-deployment Copy", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 253.35675, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "title": "Link your applications to Kubernetes", - "sections": "Link your applications to Kubernetes", - "tags": "Kubernetes integration", - "body": " the following commands: Delete the Kubernetes objects using the yaml file: kubectl delete -f k8s-metadata-injection-latest.yaml Copy Delete the TLS secret containing the certificate/key pair: kubectl delete secret/newrelic-metadata-injection-secret Copy Troubleshooting Follow these troubleshooting tips" - }, - "id": "617daead28ccbc662b7ffe23" - }, { "sections": [ "Link your applications to Kubernetes", @@ -61299,7 +61204,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 250.06598, + "_score": 245.19565, "_version": null, "_explanation": null, "sort": null, @@ -61311,57 +61216,6 @@ }, "id": "61fd3c9d196a675ff3e80980" }, - { - "sections": [ - "Kubernetes integration: what's changed in v3", - "v3 BETA", - "Integration version and Chart version", - "Overview", - "Architectural changes", - "Kube-state-metrics component", - "Kubelet component", - "Control plane component", - "Helm Charts", - "Migration Guide", - "KSM configuration", - "Tip", - "Control plane configuration", - "Agent configuration", - "Integrations configuration", - "Miscellaneous chart values", - "Upgrade from v2", - "Important" - ], - "title": "Kubernetes integration: what's changed in v3", - "type": "docs", - "tags": [ - "Changelog", - "Kubernetes integration", - "Integrations" - ], - "external_id": "a78ca20934f98fd2b43c7a9fbc2453c802c24ab8", - "image": "", - "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/get-started/changes-since-v3/", - "published_at": "2022-02-15T19:13:49Z", - "updated_at": "2022-02-13T15:01:38Z", - "document_type": "page", - "popularity": 1, - "body": "v3 BETA Kubernetes integration v3 is currently in a late stage beta, and we expect to make it generally available during the second quarter of 2022. We encourage you to give it a try and let us know what you think! Integration version and Chart version The Kubernetes integration v3 (appVersion) is included on the nri-bundle chart version 4. Overview v3 BETA Data reported by the Kubernetes Integration version 3 has not changed with respect to version 2. For this major release, we focused in configurability, stability, and user experience. From version 3 onwards, New Relic's Kubernetes solution features a new architecture which aims to be more modular and configurable, giving you more power to choose how the solution is deployed and making it compatible with more environments. Architectural changes In this new version, the main component of the integration, the newrelic-infrastructure DaemonSet, is divided in three different components: nrk8s-ksm, nrk8s-kubelet, and nrk8s-controlplane, with the first being a deployment and the next two being DaemonSets. This makes it easier to make decisions at scheduling and deployment time, rather than runtime. Moreover, we also changed the lifecycle of the scraping process. We went from a one-shot, short-lived process, to a long-lived one, allowing it to leverage higher-level Kubernetes APIs like the Kubernetes informers, that provide built-in caching and watching of cluster objects. For this reason, each of the components has two containers: A container for the integration, responsible for collecting metrics. A container with the New Relic Infrastructure Agent, which is used to send the metrics to the New Relic Platform. Kube-state-metrics component We build our cluster state metrics on top of the OSS project kube-state-metrics, which is housed under the Kubernetes organization itself. Previously, as our solution was comprised by just one DaemonSet, an election process was made to decide which pod was going to be in charge of scraping the metrics. This process was based merely on locality. The pod in charge would be the one that shares a node with the KSM deployment. As the KSM output contains data for the whole cluster, parsing this output requires a substantial amount of resources. While this is something that big cluster operators can assume, the fact that it's one arbitrary instance of the DaemonSet the one that will need this big amount of resources forces cluster operators to allow such consumption to the whole DaemonSet, where only one actually needed them. Another problem with KSM scraping was figuring out in which node the KSM pod lived. To do this, we need to contact the API Server and filter pods by some labels, but given the short-lived nature of the integration, caches and watchers were not being used effectively by it. This caused that, on large clusters, all instances of the DaemonSet flooded the control plane with non-namespaced pod list requests as an attempt to figure out whether the KSM pod was living next to them. We decided to tackle this problem by making two big changes to how KSM is scraped: Split the responsibility of scraping KSM out of the DaemonSet pods to a different, single instance Deployment. Refactor the code and make it long-running, so we can leverage Kubernetes informers which provide built-in caching and watching mechanisms. Thus, a specific Deployment nrk8s-ksm now takes care of finding KSM and scraping it. With this pod now being long-lived and single, it can safely use an endpoints informer to locate the IP of the KSM pod and scrape it. The informer will automatically cache the list of informers in the cluster locally and watch for new ones, avoiding storming the API Server with requests to figure out where the pod was located. While a sharded KSM setup is not supported yet, this new code was built with this future improvement in mind. Kubelet component The Kubelet is the “Kubernetes agent”, a service that runs on every Kubernetes node and is responsible for creating the containers as instructed by the control plane. Since it's the Kubelet who partners closely with the Container Runtime, it's the main source of infrastructure metrics for our integration, such as use of CPU, memory, disk, network, etc. Although not thoroughly documented, the Kubelet API is the de-facto standard source for most Kubernetes metrics. Scraping the Kubelet is typically a low-resource operation. Given this, and our intent to minimize internode traffic whenever possible, nrk8s-kubelet is run as a DaemonSet where each instance gathers metric from the Kubelet running in the same node as it is. nrk8s-kubelet no longer requires hostNetwork to run properly, and instead it connects to the Kubelet using the Node IP. If this process fails, nrk8s-kubelet will fall back to reach the node through the API Server proxy. This fallback mechanism is not new, but we do encourage you to mind this if you have very large clusters, as proxying many kubelets might increase the load in the API server. You can check if the API Server is being used as a proxy by looking for a message like this in the logs: Trying to connect to kubelet through API proxy Copy Control plane component Enabling the integration to successfully find and connect to CP components was probably one of the hardest parts of this effort. The main reason for this is the amount of ways in which CP components can be configured: inside or outside the cluster, with one or many replicas, with or without dedicated nodes, etc. Moreover, different CP components might be configured directly. We built the current approach with the following scenarios in mind: CP monitoring should work out of the box for those environments in which the CP is reachable out of the box, e.g. Kubeadm or even Minikube. For setups where the CP cannot be autodiscovered. For example, if it lives out of the cluster, we should provide a way for the user to specify their own endpoints. Failure to autodiscover shouldn't cause the deployment to fail, but failure to hit a manually defined endpoint should. As major Kubernetes distributions such as Kubeadm deploy CP components configured to listen only in localhost on the host's network namespace, we chose to deploy nrk8s-controlplane as a DaemonSet with hostNetwork: true. We structured the configuration to support autodiscover and static endpoints. To be compatible with a wide range of distributions out of the box, we provide a wide range of known defaults as configuration entries. Doing this in the configuration instead of the code allows you to tweak autodiscovery to your needs. Another improvement was adding the possibility of having multiple endpoints per selector and adding a probe mechanism which automatically detects the correct one. This allows you to try different configurations such as ports or protocols by using the same selector. Scraping configuration for the etcd CP component looks like the following where the same structure and features applies for all components: config: etcd: enabled: true autodiscover: - selector: \"tier=control-plane,component=etcd\" namespace: kube-system matchNode: true endpoints: - url: https://localhost:4001 insecureSkipVerify: true auth: type: bearer - url: http://localhost:2381 staticEndpoint: url: https://url:port insecureSkipVerify: true auth: {} Copy If staticEndpoint is set, the component will try to scrape it. If it can't hit the endpoint, the integration will fail so there are no silent errors when manual endpoints are configured. If staticEndpoint is not set, the component will iterate over the autodiscover entries looking for the first pod that matches the selector in the specified namespace, and optionally is running in the same node of the DaemonSet (if matchNode is set to true). After a pod is discovered, the component probes, issuing an http HEAD request, the listed endpoints in order and scrapes the first successful probed one using the authorization type selected. While above we show a config excerpt for the etcd component, the scraping logic is the same for other components. For more detailed instructions on how to configure control plane monitoring, please check the control plane monitoring page. Helm Charts Helm is the primary means we offer to deploy our solution into your clusters. Chart complexity was also significantly increased from the previous version, where it only had to manage one DaemonSet. Now, it has to manage one deployment and two DaemonSets where each has slightly different configurations. This will give you more flexibilty to adapt the solution to your needs, whithout the need to apply manual patches on top of the chart and the generated manifests. Some of the new features that our new Helm chart exposes are: Full control of the securityContext for all pods Full control of pod labels and annotations for all pods Ability to add extra environment variables, volumes, and volumeMounts Full control on the integration configuration, including which endpoints are reached, autodiscovery behavior, and scraping intervals Better alignment with Helm idioms and standards You can check full details on all the switches that can be flipped in the Chart's README.md. Migration Guide In order to make migration from earlier versions as easy as possible, we developed a compatibility layer that will translate most of the options that were possible to specify in the old newrelic-infrastructure chart to their new counterparts. This compatibility layer is temporary and will be removed in the future, so we encourage you to read carefully this guide and migrate the configuration with human supervision. KSM configuration Tip KSM monitoring works out of the box for most configurations, most users will not need to change this config. disableKubeStateMetrics has been replaced by ksm.enabled. The default is still the same (KSM scraping enabled). kubeStateMetricsScheme, kubeStateMetricsPort, kubeStateMetricsUrl, kubeStateMetricsPodLabel, and kubeStateMetricsNamespace have been replaced by the more comprehensive and flexible ksm.config. The ksm.config object has the following structure: ksm: config: # When autodiscovering KSM, force the following scheme. By default, `http` is used. scheme: \"http\" # Label selector to find kube-state-metrics endpoints. Defaults to `app.kubernetes.io/name=kube-state-metrics`. selector: \"app.kubernetes.io/name=kube-state-metrics\" # Restrict KSM discovery to this particular namespace. Defaults to all namespaces. namespace: \"\" # When autodiscovering, only consider endpoints that use this port. By default, all ports from the discovered `endpoint` are probed. #port: 8080 # Override autodiscovery mechanism completely and specify the KSM url directly instead #staticUrl: \"http://test.io:8080/metrics\" Copy Control plane configuration Control plane configuration has changed substantially. If you previously had control plane monitoring enabled, we encourage you to take a look at the Configure control plane monitoring dedicated page. The following options have been replaced by more comprehensive configuration, covered in the section linked above: apiServerSecurePort etcdTlsSecretName etcdTlsSecretNamespace controllerManagerEndpointUrl, etcdEndpointUrl, apiServerEndpointUrl, and schedulerEndpointUrl Agent configuration Agent config file, previously specified in config has been moved to common.agentConfig. Format of the file has not changed, and the full range of options that can be configured can be found here. The following agent options were previously \"aliased\" in the root of the values.yml file, and are no longer available: logFile has been replaced by common.agentConfig.log_file. eventQueueDepth has been replaced by common.agentConfig.event_queue_depth. customAttributes has changed in format to a yaml object. The previous format, a manually json-encoded string e.g. {\"team\": \"devops\"}, is still accepted although discouraged. Previously, customAttributes had a default clusterName entry which might have unwanted consequences if removed. This is no longer the case, users may now safely override customAttributes on its entirety. discoveryCacheTTL has been completely removed, as the discovery is now performed using kubernetes informers which have a built-in cache. Integrations configuration Integrations were previously configured under integrations_config, using an array format: integrations_config: - name: nri-redis.yaml data: discovery: # ... integrations: # ... Copy The mechanism remains the same, but we have changed the format to be more user-friendly: integrations: nri-redis-sampleapp: discovery: # ... integrations: # ... Copy Moreover, now the --port and --tls flags are mandatory on the discovery command. In the past, the following would work: integrations: nri-redis-sampleapp: discovery: command: exec: /var/db/newrelic-infra/nri-discovery-kubernetes Copy From v3 onwards, you must specify --port and --tls: integrations: nri-redis-sampleapp: discovery: command: exec: /var/db/newrelic-infra/nri-discovery-kubernetes --tls --port 10250 Copy This change is required because in v2 and below, the nrk8s-kubelet component (or its equivalent) ran with hostNetwork: true, so nri-discovery-kubernetes could connect to the kubelet using localhost and plain http. For security reasons, this is no longer the case, hence the need to specify both flags from now on. For more details on how to configure on-host integrations in Kubernetes please check the Monitor services in Kubernetes page. Miscellaneous chart values While not related to the integration configuration, the following miscellaneous options for the helm chart have also changed: runAsUser has been replaced by securityContext, which is templated directly into the pods and more configurable. resources has been removed, as now we deploy three different workloads. Resources for each one can be configured individually under: ksm.resources kubelet.resources controlPlane.resources Similarly, tolerations has been split into three and the previous one is no longer valid: ksm.tolerations kubelet.tolerations controlPlane.tolerations All three default to tolerate any value for NoSchedule and NoExecute image and all its subkeys have been replaced by individual sections for each of the three images that are now deployed: images.forwarder.* to configure the infrastructure-agent forwarder. images.agent.* to configure the image bundling the infrastructure-agent and on-host integrations. images.integration.* to configure the image in charge of scraping k8s data. Upgrade from v2 In order to upgrade from the Kubernetes integration version 2 (included in nri-bundle chart versions 3.x), we strongly encourage you to create a values-newrelic.yaml file with your desired License Key and configuration. If you had previously installed our chart from the CLI directly, for example using a command like the following: bash Copy $ helm install newrelic/nri-bundle \\ > --set global.licenseKey= \\ > --set global.cluster= \\ > --set infrastructure.enabled=true \\ > --set prometheus.enabled=true \\ > --set webhook.enabled=true \\ > --set ksm.enabled=true \\ > --set kubeEvents.enabled=true \\ > --set logging.enabled=true You can take the provided --set arguments and put them in a yaml file like the following: # values-newrelic.yaml global: licenseKey: cluster: infrastructure: enabled: true prometheus: enabled: true webhook: enabled: true ksm: enabled: true kubeEvents: enabled: true logging: enabled: true Copy After doing this, and adapting any other setting you might have changed according to the section above, you can upgrade by running the following command: bash Copy $ helm upgrade newrelic newrelic/nri-bundle \\ > --namespace newrelic --create-namespace \\ > -f values-newrelic.yaml \\ > --devel The --devel flag will instruct helm to download the v3 version of the integration (version 4.x of the nri-bundle chart). Important The --reuse-values flag is not supported for upgrading from v2 to v3.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 229.22153, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "title": "Kubernetes integration: what's changed in v3", - "sections": "Kubernetes integration: what's changed in v3", - "tags": "Kubernetes integration", - "body": " the infrastructure-agent and on-host integrations. images.integration.* to configure the image in charge of scraping k8s data. Upgrade from v2 In order to upgrade from the Kubernetes integration version 2 (included in nri-bundle chart versions 3.x), we strongly encourage you to create a values" - }, - "id": "61fd3c9d28ccbc72eec0dcda" - }, { "sections": [ "New Relic Metrics Adapter", @@ -61395,7 +61249,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 227.97115, + "_score": 223.34859, "_version": null, "_explanation": null, "sort": null, @@ -61404,6 +61258,104 @@ "body": " in the cluster. Installation To install the New Relic Metrics Adapter, we provide the newrelic-k8s-metrics-adapter Helm chart, which is also included in the nri-bundle chart used to deploy all New Relic Kubernetes components. If not already installed, install our Kubernetes integration. Upgrade" }, "id": "61fd193d196a672daae826d6" + }, + { + "sections": [ + "Kubernetes integration: Predefined alert policy", + "Predefined alert conditions", + "Container CPU usage % is too high", + "Container memory usage % is too high", + "Pod was unable to be scheduled", + "Pod is not ready", + "Container is running out of space", + "ReplicaSet doesn't have desired amount of pods", + "etcd open file descriptors", + "Create new alert conditions" + ], + "title": "Kubernetes integration: Predefined alert policy", + "type": "docs", + "tags": [ + "Installation", + "Kubernetes integration", + "Integrations" + ], + "external_id": "7c92831c394c4c087bad8b481250e55557e4b794", + "image": "", + "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/advanced-configuration/kubernetes-integration-predefined-alert-policy/", + "published_at": "2022-02-15T19:17:51Z", + "updated_at": "2022-02-04T14:47:09Z", + "document_type": "page", + "popularity": 1, + "body": "When deploying the New Relic Kubernetes integration for the first time in an account, we deploy a default set of alert conditions to your account. The predefined alert policy, named Kubernetes default alert policy, doesn't have a notification channel by default to avoid unwanted notifications. The alert conditions' thresholds can be customized to your environment and the alert policy updated to send notifications. For more information, see the Infrastructure alerts documentation. Predefined alert conditions Container CPU usage % is too high Setting Value Event type K8sContainerSample SELECT value (cpuUsedCores/cpuLimitCores)*100 Warning threshold > 90% for at least 5 minutes Critical threshold > 95% for at least 5 mins Container memory usage % is too high Setting Value Event type K8sContainerSample SELECT value memoryWorkingSetUtilization Warning threshold > 85% for at least 5 minutes Critical threshold > 95% for at least 5 mins Pod was unable to be scheduled Setting Value Event type K8sPodSample SELECT value isScheduled Warning threshold Critical threshold isScheduled = 0 for at least 7 minutes Pod is not ready Setting Value Event type K8sPodSample SELECT value isReady Warning threshold Critical threshold isReady = 0 for at least 10 minutes Container is running out of space Setting Value Event type K8sContainerSample SELECT value fsUsedPercent Warning threshold > 75% for at least 5 minutes Critical threshold > 90% for at least 5 minutes ReplicaSet doesn't have desired amount of pods Setting Value Event type K8sReplicaSetSample SELECT value podsDesired - podsReady Warning threshold Critical threshold 0 for at least 5 minutes etcd open file descriptors Setting Value Event type K8sEtcdSample SELECT value (processOpenFds/processMaxFds)*100 Warning threshold > 75% for at least 3 minutes Critical threshold > 90% for at least 5 minutes Create new alert conditions To create new alert conditions based on Kubernetes metric data, see Understand and use data.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 219.00739, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "title": "Kubernetes integration: Predefined alert policy", + "sections": "Kubernetes integration: Predefined alert policy", + "tags": "Kubernetes integration", + "body": ". The alert conditions' thresholds can be customized to your environment and the alert policy updated to send notifications. For more information, see the Infrastructure alerts documentation. Predefined alert conditions Container CPU usage % is too high Setting Value Event type K8sContainerSample SELECT" + }, + "id": "61fd3c6de7b9d279f45e6625" + }, + { + "sections": [ + "Configure control plane monitoring", + "Tip", + "Features", + "Compatibility and requirements", + "Control plane component", + "Autodiscovery and default configuration", + "hostNetwork and privileged", + "Custom autodiscovery", + "mTLS", + "Static endpoints", + "Limitations", + "Important", + "Control plane monitoring for managed and cloud environments", + "Monitoring control plane with integration version 2", + "Control plane monitoring on Integration version 2", + "Discovery of master nodes and control plane components", + "Configuration", + "etcd", + "API server", + "OpenShift configuration", + "OpenShift configuration on Integration version 2", + "Set up mTLS for etcd in OpenShift", + "See your data" + ], + "title": "Configure control plane monitoring", + "type": "docs", + "tags": [ + "Installation", + "Kubernetes integration", + "Integrations" + ], + "external_id": "33b7b8ae3dab9a2ca553dcf8ea0c97499478a85a", + "image": "https://docs.newrelic.com/static/209f301630c770f87ea8cbb1cace8e6e/8c557/new-relic-one-k8s-cluster-explorer-control-plane-parameters.png", + "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/advanced-configuration/configure-control-plane-monitoring/", + "published_at": "2022-02-15T19:15:25Z", + "updated_at": "2022-02-04T12:15:37Z", + "document_type": "page", + "popularity": 1, + "body": "New Relic provides control plane support for your Kubernetes integration, allowing you to monitor and collect metrics from your cluster's control plane components. That data can then be found in New Relic and used to create queries and charts. Tip Unless otherwise specified, this page refers to the Kubernetes integration v3. Details on how to configure control plane monitoring for v2 can be found in a specific section below. Features We monitor and collect metrics from the following control plane components: etcd: leader information, resident memory size, number of OS threads, consensus proposals data, etc. For a list of supported metrics, see etcd data. API server: rate of apiserver requests, breakdown of apiserver requests by HTTP method and response code, etc. For the complete list of supported metrics, see API server data. Scheduler: requested CPU/memory vs available on the node, tolerations to taints, any set affinity or anti-affinity, etc. For the complete list of supported metrics, see Scheduler data. Controller manager: resident memory size, number of OS threads created, goroutines currently existing, etc. For the complete list of supported metrics, see Controller manager data. Compatibility and requirements Control plane monitoring support is limited for managed clusters. This is because most cloud providers do not expose the metrics endpoints for the control plane components, so New Relic cannot access them. When deploying the solution in unprivileged mode, control plane setup will require extra steps and some caveats might apply. OpenShift 4.x uses control plane component metric endpoints that are different than the default. Control plane component The task of monitoring the Kubernetes control plane is a responsibility of the nrk8s-controlplane component, which by default is deployed as a DaemonSet. This component is automatically deployed to master nodes, through the use of a default list of nodeSelectorTerms which includes labels commonly used to identify master nodes, such as node-role.kubernetes.io/control-plane or node-role.kubernetes.io/master. Regardless, this selector is exposed in the values.yml file and therefore can be reconfigured to fit other environments. Clusters that do not have any node matching these selectors will not get any pod scheduled, thus not wasting any resources and being functionally equivalent of disabling control plane monitoring altogether by setting controlPlane.enabled to false in the Helm Chart. Each component of the control plane has a dedicated section, which allows to individually: Enable or disable monitoring of that component Define specific selectors and namespaces for discovering that component Define the endpoints and paths that will be used to fetch metrics for that component Define the authentication mechanisms that need to be used to get metrics for that component Manually specify endpoints that skip autodiscovery completely Autodiscovery and default configuration By default, our Helm Chart ships a configuration that should work out of the box for some control plane components for on-premise distributions that run the control plane inside the cluster, such as Kubeadm or minikube. hostNetwork and privileged Most users and Kubernetes distributions configure the control plane metrics endpoints to listen only in the loopback interface, i.e. localhost. For this reason, the control plane component is deployed with hostNetwork: true by default when privileged is set to true (the default). When the integration is deployed using privileged: false, the hostNetwork setting for the control plane component will be also be set to false. We chose to do it this way because otherwise, we would not be honoring the intent users have when they set privileged: false. Unfortunately, deploying without hostNetwork will cause control plane scraping to fail in most environments, which will result in missing metrics or the nrk8s-controlplane pods getting stuck into a CrashLoopBackoff state. This is a limitation of Kubernetes itself, as control plane cannot be monitored without hostNetwork unless components are manually configured to do so. As it is a common setting to deploy the integration in unprivileged mode (privileged: false), but still consider acceptable running the control plane pods with hostNetwork. This can be achieved by setting controlPlane.unprivilegedHostNetwork to true: This will tell the chart to deploy the control plane component with hostNetwork: true, despite the value of the higher-level privileged flag. If running pods with hostNetwork is not acceptable whatsoever, due to cluster or other policies, control plane monitoring is not possible and should be disabled by setting controlPlane.enabled to false. Custom autodiscovery Selectors used for autodiscovery are completely exposed as configuration entries in the values.yaml file, which means they can be tweaked or replaced to fit almost any environment where the control plane is run as a part of the cluster. An autodiscovery section looks like the following: autodiscover: - selector: \"tier=control-plane,component=etcd\" namespace: kube-system # Set to true to consider only pods sharing the node with the scraper pod. # This should be set to `true` if Kind is Daemonset, `false` otherwise. matchNode: true # Try to reach etcd using the following endpoints. endpoints: - url: https://localhost:4001 insecureSkipVerify: true auth: type: bearer - url: http://localhost:2381 - selector: \"k8s-app=etcd-manager-main\" namespace: kube-system matchNode: true endpoints: - url: https://localhost:4001 insecureSkipVerify: true auth: type: bearer Copy The autodiscover section contains a list of autodiscovery entries. Each entry has: selector: A string-encoded label selector that will be used to look for pods. matchNode: If set to true, it will additionally limit discovery to pods running in the same node as the particular instance of the DaemonSet performing discovery. endpoints: A list of endpoints to try if a pod is found for the specified selector. Additionally, each endpoint has: url: URL to target, including scheme. Can be http or https. insecureSkipVerify: If set to true, certificate will not be checked for https URLs. auth.type: Which mechanism to use to authenticate the request. Currently, the following methods are supported: None: If auth is not specified, the request will not contain any authentication whatsoever. bearer: The same bearer token used to authenticate against the Kubernetes API will be sent to this request. mtls: mTLS will be used to perform the request. mTLS For the mtls type, the following needs to be specified: endpoints: - url: https://localhost:4001 auth: type: mtls mtls: secretName: secret-name secretNamespace: secret-namespace Copy Where secret-name is the name of a Kubernetes TLS Secret, which lives in secret-namespace, and contains the certificate, key, and CA required to connect to that particular endpoint. The integration fetches this secret in runtime rather than mounting it, which means it requires an RBAC role granting it access to it. Our Helm Chart automatically detects auth.mtls entries at render time and will automatically create entries for these particular secrets and namespaces for you, unless rbac.create is set to false. Our integration accepts a secret with the following keys: cert: The PEM-encoded certificate that will be presented to etcd key: The PEM-encoded private key corresponding to the certificate above These certificates should be signed by the same CA etcd is using to operate. How to generate these certificates is out of the scope of this documentation, as it will vary greatly between different Kubernetes distribution. Please refer to your distribution's documentation to see how to fetch the required etcd peer certificates. In Kubeadm, for example, they can be found in /etc/kubernetes/pki/etcd/peer.{crt,key} in the master node. Once you have located or generated the etcd peer certificates, you should rename the files to match the keys we expect to be present in the secret, and create the secret in the cluster bash Copy $ mv peer.crt cert $ mv peer.key key $ mv ca.crt cacert $ $ kubectl -n newrelic create secret generic newrelic-etcd-tls-secret --from-file=./cert --from-file=./key --from-file=./cacert Finally, you can input the secret name (newrelic-etcd-tls-secret) and namespace (newrelic) in the config snippet shown at the beginning of this section. Remember that the Helm Chart will automatically parse this config and create an RBAC role to grant access to this specific secret and namespace for the nrk8s-controlplane component, so there's no manual action needed in that regard. Static endpoints While autodiscovery should cover cases where the control plane lives inside the Kubernetes clusters, some distributions or sophisticated Kubernetes environments run the control plane elsewhere, for a variety of reasons including availability or resource isolation. For these cases, the integration can be configured to scrape an arbitrary, fixed URL regardless of whether a pod with a control plane label is found in the node. This is done by specifying a staticEndpoint entry. For example, one for an external etcd instance would look like this: controlPlane: etcd: staticEndpoint: url: https://url:port insecureSkipVerify: true auth: {} Copy staticEndpoint is the same type of entry as endpoints in the autodiscover entry, whose fields are described above. The authentication mechanisms and schemas are supported here. Please keep in mind that if staticEndpoint is set, the autodiscover section will be ignored in its entirety. Limitations Important If you are using staticEndpoint pointing to an out-of-node (i.e. not localhost) endpoint, you must change controlPlane.kind from DaemonSet to Deployment. When using staticEndpoint, all nrk8s-controlplane pods will attempt to reach and scrape said endpoint. This means that, if nrk8s-controlplane is a DaemonSet (the default), all instances of the DaemonSet will scrape this endpoint. While this is fine if you are pointing them to localhost, if the endpoint is not local to the node you could potentially produce to duplicate metrics and increased billable usage. If you are using staticEndpoint and pointing it to a non-local URL, make sure to change controlPlane.kind to Deployment. For the same reason above, it is currently not possible to use autodiscovery for some control plane components, and a static endpoint for others. This is a known limitation we are working to address in future versions of the integration. Lastly, staticEndpoint allows only to define a single endpoint per component. This means that if you have multiple control plane shards in different hosts, it is currently not possible to point to them separately. This is also a known limitation we are working to address in future versions. For the time being, a workaround could be to aggregate metrics for different shards elsewhere, and point the staticEndpoint URL to the aggregated output. Control plane monitoring for managed and cloud environments Some cloud environments, like EKS or GKE, allow retrieving metrics from the Kubernetes API Server. This can be easily configured as an static endpoint: controlPlane: affinity: nodeAffinity: false # https://github.com/helm/helm/issues/9136 kind: Deployment config: etcd: enabled: false scheduler: enabled: false controllerManager: enabled: false apiServer: staticEndpoint: url: \"https://kubernetes.default:443\" insecureSkipVerify: true auth: type: bearer Copy Please note that this only applies to the API Server and that etcd, the scheduler, and the controller manager remain inaccessible in cloud environments. Monitoring control plane with integration version 2 This section covers how to configure control plane monitoring on versions 2 and earlier of the integration. Please note that these versions had a less flexible autodiscovery options, and did not support external endpoints. We strongly recommend you to update to version 3 at your earliest convenience. See what's changed of the Kubernetes integration. Control plane monitoring on Integration version 2 Discovery of master nodes and control plane components The Kubernetes integration relies on the kubeadm labeling conventions to discover the master nodes and the control plane components. This means that master nodes should be labeled with node-role.kubernetes.io/master=\"\" or kubernetes.io/role=\"master\". The control plane components should have either the k8s-app or the tier and component labels. Refer to the following table for accepted label combinations and values: Component Label Endpoint API server Kubeadm / Kops / ClusterAPI k8s-app=kube-apiserver tier=control-plane component=kube-apiserver OpenShift app=openshift-kube-apiserver apiserver=true localhost:443/metrics by default (can be configured) if the request fails falls back to localhost:8080/metrics etcd Kubeadm / Kops / ClusterAPI k8s-app=etcd-manager-main tier=control-plane component=etcd OpenShift k8s-app=etcd localhost:4001/metrics Scheduler Kubeadm / Kops / ClusterAPI k8s-app=kube-scheduler tier=control-plane component=kube-scheduler OpenShift app=openshift-kube-scheduler scheduler=true localhost:10251/metrics Controller manager Kubeadm / Kops / ClusterAPI k8s-app=kube-controller-manager tier=control-plane component=kube-controller-manager​ OpenShift app=kube-controller-manager kube-controller-manager=true localhost:10252/metrics When the integration detects that it is running inside a master node, it tries to find which components are running on the node by looking for pods that match the labels listed in the table above. For every running component, the integration makes a request to its metrics endpoint. Configuration Control plane monitoring is automatic for agents running inside master nodes. The only component that requires an extra step to run is etcd, because it uses mutual TLS authentication (mTLS) for client requests. The API Server can also be configured to be queried using the Secure Port. Important Control plane monitoring for OpenShift 4.x requires additional configuration. For more information, see the OpenShift 4.x Configuration section. etcd In order to set mTLS for querying etcd, there are two configuration options that need to be set: Option Value ETCD_TLS_SECRET_NAME Name of a Kubernetes secret that contains the mTLS configuration. The secret should contain the following keys: cert: the certificate that identifies the client making the request. It should be signed by an etcd trusted CA. key: the private key used to generate the client certificate. cacert: the root CA used to identify the etcd server certificate. If the ETCD_TLS_SECRET_NAME option is not set, etcd metrics won't be fetched. ETCD_TLS_SECRET_NAMESPACE The namespace where the secret specified in the ETCD_TLS_SECRET_NAME was created. If not set, the default namespace is used. API server By default, the API server metrics are queried using the localhost:8080 unsecured endpoint. If this port is disabled, you can also query these metrics over the secure port. To enable this, set the following configuration option in the Kubernetes integration manifest file: Option Value API_SERVER_ENDPOINT_URL The (secure) URL to query the metrics. The API server uses localhost:443 by default Ensure that the ClusterRole has been updated to the newest version found in the manifest Added in version 1.15.0 Important Note that the port can be different according to the secure port used by the API server. For example, in Minikube the API server secure port is 8443 and therefore API_SERVER_ENDPOINT_URL should be set to https://localhost:8443 OpenShift configuration Version 3 of the Kubernetes Integration includes default settings that will autodiscover control plane components in OpenShift clusters, so it should work out of the box for all components except etcd. Etcd is not supported out of the box as the metrics endpoint is configured to require mTLS authentication in OpenShift environments. Our integration supports mTLS authentication to fetch etcd metrics in this configuration, however you will need to create the required mTLS certificate manually. This is necessary to avoid granting wide permissions to our integration without the explicit approval from the user. To create an mTLS secret, please follow the steps in this section below, and then configure the integration to use the newly created secret as described in the mtls section. OpenShift configuration on Integration version 2 Important When installing openshift through Helm, specify the configuration to automatically include these endpoints. Setting openshift.enabled=true and openshift.version=\"4.x\" will include the secure endpoints and enable the /var/run/crio.sock runtime. Control plane components on OpenShift 4.x use endpoint URLs that require SSL and service account based authentication. Therefore, the default endpoint URLs can not be used. To configure control plane monitoring on OpenShift, uncomment the following environment variables in the customized manifest. URL values are pre-configured to the default base URLs for the control plane monitoring metrics endpoints in OpenShift 4.x. - name: \"SCHEDULER_ENDPOINT_URL\" value: \"https://localhost:10259 - name: \"ETCD_ENDPOINT_URL\" value: \"https://localhost:9979\" - name: \"CONTROLLER_MANAGER_ENDPOINT_URL\" value: \"https://localhost:10257\" - name: \"API_SERVER_ENDPOINT_URL\" value: \"https://localhost:6443\" Copy Important Even though the custom ETCD_ENDPOINT_URL is defined, etcd requires HTTPS and mTLS authentication to be configured. For more on configuring mTLS for etcd in OpenShift, see Set up mTLS for etcd in OpenShift. Set up mTLS for etcd in OpenShift Follow these instructions to set up mutual TLS authentication for etcd in OpenShift 4.x: Export the etcd client certificates from the cluster to an opaque secret. In a default managed OpenShift cluster, the secret is named kube-etcd-client-certs and it is stored in the openshift-monitoring namespace. bash Copy $ kubectl get secret kube-etcd-client-certs -n openshift-monitoring -o yaml > etcd-secret.yaml Open the secret file and change the keys: Rename the certificate authority to cacert. Rename the client certificate to cert. Rename the client key to key. Optionally, change the secret name and namespace to something meaningful. Remove these unnecessary keys in the metadata section: creationTimestamp resourceVersion selfLink uid Install the manifest with its new name and namespace: bash Copy $ kubectl apply -n newrelic -f etcd-secret.yaml Configure the integration to use the newly created secret as described in the mtls section. See your data If the integration has been been set up correctly, the Kubernetes cluster explorer contains all the control plane components and their status in a dedicated section, as shown below. one.newrelic.com > Kubernetes Cluster Explorer: Use the Kubernetes cluster explorer to monitor and collect metrics from your cluster's Control Plane components. You can also check for control plane data with this NRQL query: SELECT latest(timestamp) FROM K8sApiServerSample, K8sEtcdSample, K8sSchedulerSample, K8sControllerManagerSample FACET entityName where clusterName = '_MY_CLUSTER_NAME_' Copy Tip If you still can't see Control Plane data, try the solution described in Kubernetes integration troubleshooting: Not seeing data.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 218.7926, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "sections": "Static endpoints", + "tags": "Kubernetes integration", + "body": " and the control plane components. This means that master nodes should be labeled with node-role.kubernetes.io/master="" or kubernetes.io/role="master". The control plane components should have either the k8s-app or the tier and component labels. Refer to the following table for accepted label combinations" + }, + "id": "61fd18e9e7b9d2b5cc5e7358" } ], "/memcached/b892f939-8c3b-4419-b041-639117b14874": [ @@ -61438,7 +61390,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 110.59576, + "_score": 102.69989, "_version": null, "_explanation": null, "sort": null, @@ -61480,7 +61432,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 109.75423, + "_score": 101.91952, "_version": null, "_explanation": null, "sort": null, @@ -61522,7 +61474,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 109.746796, + "_score": 101.91374, "_version": null, "_explanation": null, "sort": null, @@ -61564,7 +61516,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 105.93259, + "_score": 98.52911, "_version": null, "_explanation": null, "sort": null, @@ -61607,7 +61559,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 101.1855, + "_score": 95.344696, "_version": null, "_explanation": null, "sort": null, @@ -61644,7 +61596,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 319.28305, + "_score": 296.58286, "_version": null, "_explanation": null, "sort": null, @@ -61749,7 +61701,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 314.08, + "_score": 292.2414, "_version": null, "_explanation": null, "sort": null, @@ -61778,7 +61730,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 284.55774, + "_score": 269.04272, "_version": null, "_explanation": null, "sort": null, @@ -61823,7 +61775,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 269.03418, + "_score": 253.98067, "_version": null, "_explanation": null, "sort": null, @@ -61868,7 +61820,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 262.44315, + "_score": 246.80772, "_version": null, "_explanation": null, "sort": null, @@ -61915,7 +61867,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 172.90567, + "_score": 169.79524, "_version": null, "_explanation": null, "sort": null, @@ -61947,7 +61899,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 145.39859, + "_score": 144.82233, "_version": null, "_explanation": null, "sort": null, @@ -62006,7 +61958,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 107.20784, + "_score": 100.97262, "_version": null, "_explanation": null, "sort": null, @@ -62047,7 +61999,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 95.16456, + "_score": 95.25579, "_version": null, "_explanation": null, "sort": null, @@ -62077,7 +62029,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 62.70721, + "_score": 62.228157, "_version": null, "_explanation": null, "sort": null, @@ -62121,7 +62073,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.33618, + "_score": 81.56581, "_version": null, "_explanation": null, "sort": null, @@ -62163,7 +62115,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.276306, + "_score": 81.50674, "_version": null, "_explanation": null, "sort": null, @@ -62205,7 +62157,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -62247,7 +62199,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -62289,7 +62241,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -62332,7 +62284,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 134.9674, + "_score": 127.507996, "_version": null, "_explanation": null, "sort": null, @@ -62366,7 +62318,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 123.15417, + "_score": 115.90346, "_version": null, "_explanation": null, "sort": null, @@ -62417,7 +62369,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 122.00985, + "_score": 113.42065, "_version": null, "_explanation": null, "sort": null, @@ -62458,7 +62410,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 96.05551, + "_score": 94.069824, "_version": null, "_explanation": null, "sort": null, @@ -62526,7 +62478,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 91.74544, + "_score": 86.06653, "_version": null, "_explanation": null, "sort": null, @@ -62569,7 +62521,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.33618, + "_score": 81.56581, "_version": null, "_explanation": null, "sort": null, @@ -62611,7 +62563,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.276306, + "_score": 81.50674, "_version": null, "_explanation": null, "sort": null, @@ -62653,7 +62605,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -62695,7 +62647,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -62737,7 +62689,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -62781,7 +62733,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99103, + "_score": 95.215935, "_version": null, "_explanation": null, "sort": null, @@ -62823,7 +62775,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99097, + "_score": 95.21587, "_version": null, "_explanation": null, "sort": null, @@ -62865,7 +62817,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99097, + "_score": 95.21587, "_version": null, "_explanation": null, "sort": null, @@ -62907,7 +62859,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99081, + "_score": 95.21571, "_version": null, "_explanation": null, "sort": null, @@ -62946,7 +62898,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 93.49031, + "_score": 93.73342, "_version": null, "_explanation": null, "sort": null, @@ -62990,7 +62942,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99103, + "_score": 95.215935, "_version": null, "_explanation": null, "sort": null, @@ -63032,7 +62984,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99097, + "_score": 95.21587, "_version": null, "_explanation": null, "sort": null, @@ -63074,7 +63026,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99097, + "_score": 95.21587, "_version": null, "_explanation": null, "sort": null, @@ -63116,7 +63068,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99081, + "_score": 95.21571, "_version": null, "_explanation": null, "sort": null, @@ -63155,7 +63107,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 93.49031, + "_score": 93.73342, "_version": null, "_explanation": null, "sort": null, @@ -63194,7 +63146,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 194.57608, + "_score": 193.38777, "_version": null, "_explanation": null, "sort": null, @@ -63246,7 +63198,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 181.90442, + "_score": 171.44473, "_version": null, "_explanation": null, "sort": null, @@ -63292,7 +63244,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 168.5301, + "_score": 165.52116, "_version": null, "_explanation": null, "sort": null, @@ -63333,7 +63285,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 158.35973, + "_score": 157.97801, "_version": null, "_explanation": null, "sort": null, @@ -63345,52 +63297,44 @@ }, { "sections": [ - "NGINX monitoring integration", - "Important", - "Compatibility and requirements", - "Supported operating systems", - "Enabling your NGINX Server", - "Configure the integration", - "Install and activate the integration", - "Linux installation", - "Other environments", - "Amazon ECS installation", - "Kubernetes installation", - "nginx-config.yml sample files", - "Basic configuration", - "HTTP Basic authentication", - "Metrics with only one self-signed certificate", - "Environment variables replacement", - "Multi-instance monitoring", - "Find and use data" + "Install the infrastructure monitoring agent for macOS", + "Tip", + "Install for macOS", + "Limitations", + "Step-by-step instructions", + "Homebrew install", + "What's next?", + "Update the agent" ], - "title": "NGINX monitoring integration", + "title": "Install the infrastructure monitoring agent for macOS", "type": "docs", "tags": [ - "On-host integrations list", - "On-host integrations", - "Integrations" + "macOS installation", + "Install the infrastructure agent", + "Infrastructure" ], - "external_id": "411a85528ddc65a2f04f7c05659e4b6695c9400a", - "image": "https://docs.newrelic.com/static/6a347940f064146525be36b805414901/01e7c/kubernetes-k8.png", - "url": "https://docs.newrelic.com/docs/infrastructure/host-integrations/host-integrations-list/nginx/nginx-integration/", - "published_at": "2022-02-14T16:33:59Z", - "updated_at": "2022-02-14T16:33:59Z", + "external_id": "755028024e9e2757dd8441cccb2adb1fd3aefdac", + "image": "", + "url": "https://docs.newrelic.com/docs/infrastructure/install-infrastructure-agent/macos-installation/install-infrastructure-monitoring-agent-macos/", + "published_at": "2022-02-14T09:29:30Z", + "updated_at": "2021-10-30T21:45:28Z", "document_type": "page", "popularity": 1, - "body": "Our NGINX integration collects and sends inventory and metrics from your NGINX server to our platform, where you can see data on connections and client requests so that you can find the source of any problems. To install the NGINX monitoring integration, you must run through the following steps: Enabling your NGINX Server. Configure the integration. Install and activate the integration. Find and use data. Optionally, see the advanced configuration settings. Important For best results, regularly update the integration package and the infrastructure agent. Compatibility and requirements Our integration is compatible with both NGINX Open Source and NGINX Plus. Before installing the integration, ensure you meet the following requirements: A New Relic account. Don't have one? Sign up for free! No credit card required. NGINX extension enabled, as described in the Configure the integration section. If NGINX is not running on Kubernetes or Amazon ECS, you must install the infrastructure agent on a Linux OS host that's running NGINX. Otherwise: If running on Kubernetes, see these requirements. If running on Amazon ECS, see these requirements. Supported operating systems Windows Linux MacOS For a comprehensive list of OS versions, check the table of compatible operating systems. Enabling your NGINX Server To capture data from the NGINX integration, you must first enable and configure the applicable extension module: For NGINX Open Source, see HTTP stub status module. For NGINX Plus, see HTTP status module and HTTP API module. Configure the integration There are several ways to configure the integration, depending on how it was installed: If it was enabled via Kubernetes, see Monitor services running on Kubernetes. If it was enabled via Amazon ECS, see Monitor services running on ECS. If it was installed on-host, edit the integration's nginx-config.yml configuration file. An integration's YAML-format configuration is where you can place required login credentials and configure how data is collected. The options you change depend on your setup and preference. The configuration file has common settings applicable to all integrations such as interval, timeout, or inventory_source. To read about these common settings, see configuration format. Important If you are still using our Legacy configuration and definition files, refer to this document for help. Specific settings related to NGINX are defined using the env section of the configuration file. These settings control the connection to your NGINX instance as well as other security settings and features. The list of valid settings is described in the next section of this document. Install and activate the integration To install the NGINX integration, follow the instructions for your environment: Linux installation Install the infrastructure agent, and use nri-nginx as filename. Change the directory to the integrations configuration folder by running: bash Copy $ cd /etc/newrelic-infra/integrations.d Copy the sample configuration file by running: bash Copy $ sudo cp nginx-config.yml.sample nginx-config.yml Edit the nginx-config.yml configuration file with your favorite editor. Check out some great configuration file examples.. Restart the infrastructure agent. See how to restart the infrastructure agent in different Linux environments. Other environments Amazon ECS installation See Monitor service running on ECS. Kubernetes installation See Monitor service running on Kubernetes. Additional notes: Advanced: Integrations are also available in tarball format to allow for install outside of a package manager. On-host integrations do not automatically update. For best results, regularly update the integration package and the infrastructure agent. nginx-config.yml sample files Basic configuration This is the very basic configuration to collect Metrics and Inventory from your localhost: integrations: - name: nri-nginx env: METRICS: \"true\" STATUS_URL: http://127.0.0.1/status STATUS_MODULE: discover REMOTE_MONITORING: true interval: 30s labels: env: production role: load_balancer - name: nri-nginx env: INVENTORY: \"true\" STATUS_URL: http://127.0.0.1/status CONFIG_PATH: /etc/nginx/nginx.conf REMOTE_MONITORING: true interval: 60s labels: env: production role: load_balancer inventory_source: config/nginx Copy HTTP Basic authentication This configuration collects Metrics and Inventory from your localhost protected with basic authentication. Replace the username and password on the STATUS_URL with your credentials: integrations: - name: nri-nginx env: METRICS: \"true\" STATUS_URL: http://username:password@127.0.0.1/status STATUS_MODULE: discover REMOTE_MONITORING: true interval: 30s labels: env: production role: load_balancer - name: nri-nginx env: INVENTORY: \"true\" STATUS_URL: http://username:password@127.0.0.1/status CONFIG_PATH: /etc/nginx/nginx.conf REMOTE_MONITORING: true interval: 60s labels: env: production role: load_balancer inventory_source: config/nginx Copy Metrics with only one self-signed certificate In this configuration we only have 1 integration block with METRICS: true to collect only metrics and added VALIDATE_CERTS: false to prevent validation of the server's SSL certificate when using a self-signed one: integrations: - name: nri-nginx env: METRICS: \"true\" STATUS_URL: https://my_nginx_host/status STATUS_MODULE: discover VALIDATE_CERTS: false REMOTE_MONITORING: true interval: 30s labels: env: production role: load_balancer Copy Environment variables replacement In this configuration we are using the environment variable NGINX_STATUS to populate the STATUS_URL setting of the integration: integrations: - name: nri-nginx env: METRICS: \"true\" STATUS_URL: {{NGINX_STATUS}} STATUS_MODULE: discover VALIDATE_CERTS: false REMOTE_MONITORING: true interval: 30s labels: env: production role: load_balancer Copy Multi-instance monitoring In this configuration we are monitoring multiple NGINX servers from the same integration. For the first instance (STATUS_URL: https://1st_nginx_host/status) we are collecting metrics and inventory while for the second instance (STATUS_URL: https://2nd_nginx_host/status) we will only collect metrics. integrations: - name: nri-nginx env: METRICS: \"true\" STATUS_URL: https://1st_nginx_host/status STATUS_MODULE: discover VALIDATE_CERTS: false REMOTE_MONITORING: true interval: 30s labels: env: production role: load_balancer - name: nri-nginx env: INVENTORY: \"true\" STATUS_URL: https://1st_nginx_host/status CONFIG_PATH: /etc/nginx/nginx.conf REMOTE_MONITORING: true interval: 60s labels: env: production role: load_balancer inventory_source: config/nginx - name: nri-nginx env: METRICS: \"true\" STATUS_URL: http://2nd_nginx_host/status STATUS_MODULE: discover VALIDATE_CERTS: false REMOTE_MONITORING: true interval: 30s labels: env: production role: load_balancer Copy Find and use data After you've configured and installed the integration, you can start monitoring: Data from this service, which is reported to an integration dashboard. Metrics that are attached to the NginxSample event type. You can query this data for troubleshooting purposes or to create custom charts and dashboards. For more on how to find and use your data, see: NGINX's advanced configuration Understand integration data", + "body": "With New Relic's infrastructure monitoring agent for macOS, you can monitor key performance metrics on macOS hosts. The agent can run on your own hardware or in cloud systems such as Amazon EC2. The infrastructure monitoring agent is compatible with all generally available, Apple supported macOS versions. Tip To use infrastructure monitoring and the rest of our observability platform, join the New Relic family! Sign up to create your free account in only a few seconds. Then ingest up to 100GB of data for free each month. Forever. Install for macOS Before installation, be sure to review the requirements. Then, to install the infrastructure monitoring agent for macOS, you can use our Guided Install, or follow the instructions in this document to complete a basic installation. Limitations The following capabilities of the infrastructure agent are not yet available on macOS: Log forwarder. On-host integrations (including built-in integrations such as Docker and Flex). Automated deployment via Configuration Management tools (Chef, Ansible, Puppet). Step-by-step instructions To install the infrastructure monitoring agent, follow the step-by-step instructions: Homebrew install Review the agent requirements and supported operating systems. Make sure Homebrew is installed in the system. You can check if Homebrew is installed with: which brew Copy If it's not installed, you can install it with this command (or check Homebrew up-to-date instructions): /bin/bash -c \"$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)\" Copy Create the configuration file and add your license key: echo \"license_key: YOUR_LICENSE_KEY\" | sudo tee -a /usr/local/etc/newrelic-infra/newrelic-infra.yml Copy Then, open the terminal and run the following command: brew install newrelic/tap/newrelic-infra-agent -q Copy Start the infrastructure agent service: brew services start newrelic-infra-agent Copy Wait a few minutes, then view your server in the Infrastructure UI. If no data appears after waiting a few minutes, follow the troubleshooting steps. What's next? The only required configuration option is the license_key setting, which is created as part of the installation procedures. You may also want to: Add custom attributes to annotate your infrastructure data. Connect your AWS account if your servers are hosted on Amazon EC2. Add other New Relic infrastructure integrations to collect data from external services. Update the agent To upgrade to the latest version, follow standard procedures to update the infrastructure monitoring agent.", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 148.20627, + "_score": 143.01776, "_version": null, "_explanation": null, "sort": null, "highlight": { - "sections": "Supported operating systems", - "body": " section. If NGINX is not running on Kubernetes or Amazon ECS, you must install the infrastructure agent on a Linux OS host that's running NGINX. Otherwise: If running on Kubernetes, see these requirements. If running on Amazon ECS, see these requirements. Supported operating systems Windows Linux MacOS" + "title": "Install the infrastructure monitoring agent for macOS", + "sections": "Install the infrastructure monitoring agent for macOS", + "tags": "macOS installation", + "body": "With New Relic's infrastructure monitoring agent for macOS, you can monitor key performance metrics on macOS hosts. The agent can run on your own hardware or in cloud systems such as Amazon EC2. The infrastructure monitoring agent is compatible with all generally available, Apple supported macOS" }, - "id": "61ac559d196a6703e2d1085f" + "id": "611fe66a28ccbc920ff9abfa" } ], "/kubernetes-plugin-for-logs/6868d952-7e27-4dbb-823b-1077f6258bbf": [ @@ -63435,7 +63379,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 340.8547, + "_score": 316.81882, "_version": null, "_explanation": null, "sort": null, @@ -63480,7 +63424,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 279.19852, + "_score": 273.60492, "_version": null, "_explanation": null, "sort": null, @@ -63491,54 +63435,6 @@ }, "id": "61fd193d196a672daae826d6" }, - { - "sections": [ - "Link your applications to Kubernetes", - "Tip", - "Compatibility and requirements", - "Kubernetes requirements", - "Network requirements", - "APM agent compatibility", - "Openshift requirements", - "Important", - "Configure the injection of metadata", - "Default configuration", - "Custom configuration", - "Manage custom certificates", - "Validate the injection of metadata", - "Disable the injection of metadata", - "Troubleshooting" - ], - "title": "Link your applications to Kubernetes", - "type": "docs", - "tags": [ - "Link apps and services", - "Kubernetes integration", - "Integrations" - ], - "external_id": "0fe0951312aaf683f6614d5956f8c402b9693780", - "image": "", - "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/link-your-applications/link-your-applications-kubernetes/", - "published_at": "2022-02-06T01:24:10Z", - "updated_at": "2022-02-06T01:24:10Z", - "document_type": "page", - "popularity": 1, - "body": "You can surface Kubernetes metadata and link it to your APM agents as distributed traces to explore performance issues and troubleshoot transaction errors. For more information, see this New Relic blog post. You can quickly start monitoring Kubernetes clusters using our Auto-telemetry with Pixie integration, which doesn't require a language agent. Learn more about Auto-telemetry with Pixie. The metadata injection product uses a MutatingAdmissionWebhook to add the following environment variables to pods: NEW_RELIC_METADATA_KUBERNETES_CLUSTER_NAME NEW_RELIC_METADATA_KUBERNETES_NODE_NAME NEW_RELIC_METADATA_KUBERNETES_NAMESPACE_NAME NEW_RELIC_METADATA_KUBERNETES_DEPLOYMENT_NAME NEW_RELIC_METADATA_KUBERNETES_POD_NAME NEW_RELIC_METADATA_KUBERNETES_CONTAINER_NAME NEW_RELIC_METADATA_KUBERNETES_CONTAINER_IMAGE_NAME Copy Tip Our Kubernetes metadata injection project is open source. Here's the code to link APM and infrastructure data and the code to automatically manage certificates. Compatibility and requirements Before linking Kubernetes metadata to your APM agents, make sure you meet the following requirements: Kubernetes requirements Network requirements APM agent compatibility OpenShift requirements Kubernetes requirements To link your applications and Kubernetes, your cluster must have the MutatingAdmissionWebhook controller enabled, which requires Kubernetes 1.9 or higher. To verify that your cluster is compatible, run the following command: kubectl api-versions | grep admissionregistration.k8s.io/v1beta1 admissionregistration.k8s.io/v1beta1 Copy If you see a different result, follow the Kubernetes documentation to enable admission control in your cluster. Network requirements For Kubernetes to speak to our MutatingAdmissionWebhook, the master node (or the API server container, depending on how the cluster is set up) should be allowed egress for HTTPS traffic on port 443 to pods in all of the other nodes in the cluster. This might require specific configuration depending on how the infrastructure is set up (on-premises, AWS, Google Cloud, etc). Tip Until Kubernetes v1.14, users were only allowed to register admission webhooks on port 443. Since v1.15 it's possible to register them on different ports. To ensure backward compatibility, the webhook is registered by default on port 443 in the YAML config file we distribute. APM agent compatibility The following New Relic agents collect Kubernetes metadata: Go 2.3.0 or higher Java 4.10.0 or higher Node.js 5.3.0 or higher Python 4.14.0 or higher Ruby 6.1.0 or higher .NET 8.17.438 or higher Openshift requirements To link Openshift and Kubernetes you must enable mutating admission webhooks, which requires Openshift 3.9 or higher. During the process, install a resource that requires admin permissions to the cluster. Run this to log in as admin: oc login -u system:admin Copy Check that webhooks are correctly configured. If they are not, update the master-config.yaml file. admissionConfig: pluginConfig: MutatingAdmissionWebhook: configuration: apiVersion: apiserver.config.k8s.io/v1alpha1 kubeConfigFile: /dev/null kind: WebhookAdmission ValidatingAdmissionWebhook: configuration: apiVersion: apiserver.config.k8s.io/v1alpha1 kubeConfigFile: /dev/null kind: WebhookAdmission location: \"\" Copy Important Add kubeConfigFile: /dev/null to address some issues in Openshift. Enable certificate signing by editing the YAML file and updating your configuration: kubernetesMasterConfig: controllerArguments: cluster-signing-cert-file: - \"/etc/origin/master/ca.crt\" cluster-signing-key-file: - \"/etc/origin/master/ca.key\" Copy Restart the Openshift services in the master node. Configure the injection of metadata By default, all the pods you create that include APM agents have the correct environment variables set and the metadata injection applies to the entire cluster. To check that the environment variables have been set, any container that is running must be stopped, and a new instance started (see Validate the injection of metadata). This default configuration also uses the Kubernetes certificates API to automatically manage the certificates required for the injection. If needed, you can limit the injection of metadata to specific namespaces in your cluster or self-manage your certificates. Default configuration We offer instructions for deploying our integration using Helm. Just be sure that, when you are configuring the chart, the webhook that inject the metadata is enabled. Notice that we are specifying --dry-run and --debug, so nothing will be installed in this step: helm upgrade --install newrelic newrelic/nri-bundle \\ --dry-run \\ --debug \\ --namespace newrelic --create-namespace \\ --set global.licenseKey=YOUR_NEW_RELIC_LICENSE_KEY \\ --set global.cluster=K8S_CLUSTER_NAME \\ --set ksm.enabled=true \\ --set newrelic-infrastructure.privileged=true \\ --set infrastructure.enabled=true \\ --set prometheus.enabled=true \\ --set webhook.enabled= true \\ --set kubeEvents.enabled=true \\ --set logging.enabled=true Copy Custom configuration You can limit the injection of metadata only to specific namespaces by using labels. To enable this feature, edit nri-bundle Helm values.yaml file: nri-metadata-injection: injectOnlyLabeledNamespaces: true Copy Or add a --set when installing or upgrading your Helm release: helm upgrade --install newrelic newrelic/nri-bundle \\ --dry-run \\ --debug \\ --namespace newrelic --create-namespace \\ --set global.licenseKey=YOUR_NEW_RELIC_LICENSE_KEY \\ --set global.cluster=K8S_CLUSTER_NAME \\ --set ksm.enabled=true \\ --set newrelic-infrastructure.privileged=true \\ --set infrastructure.enabled=true \\ --set prometheus.enabled=true \\ --set webhook.enabled= true \\ --set nri-metadata-injection.injectOnlyLabeledNamespaces=true \\ --set kubeEvents.enabled=true \\ --set logging.enabled=true Copy With this option, injection is only applied to those namespaces that have the newrelic-metadata-injection label set to enabled: kubectl label namespace YOUR_NAMESPACE newrelic-metadata-injection=enabled Copy Manage custom certificates To use custom certificates you need to disable the automatic installation of certificates when you are installing using Helm. To disable the installation for certificates just modify nri-bundle Helm values.yaml like this: nri-metadata-injection: customTLSCertificate: true Copy Or add a --set when installing or upgrading your Helm release: helm upgrade --install newrelic newrelic/nri-bundle \\ --dry-run \\ --debug \\ --namespace newrelic --create-namespace \\ --set global.licenseKey=YOUR_NEW_RELIC_LICENSE_KEY \\ --set global.cluster=K8S_CLUSTER_NAME \\ --set ksm.enabled=true \\ --set newrelic-infrastructure.privileged=true \\ --set infrastructure.enabled=true \\ --set prometheus.enabled=true \\ --set webhook.enabled= true \\ --set nri-metadata-injection.customTLSCertificate=true \\ --set kubeEvents.enabled=true \\ --set logging.enabled=true Copy Now you can proceed with the custom certificate management option. You need your certificate, server key, and Certification Authority (CA) bundle encoded in PEM format. If you have them in the standard certificate format (X.509), install openssl, and run the following: openssl x509 -in CERTIFICATE_FILENAME -outform PEM -out CERTIFICATE_FILENAME.pem openssl x509 -in SERVER_KEY_FILENAME -outform PEM -out SERVER_KEY_FILENAME.pem openssl x509 -in CA_BUNDLE_FILENAME -outform PEM -out BUNDLE_FILENAME.pem Copy If your certificate/key pair are in another format, see the Digicert knowledgebase for more help. Create the TLS secret with the signed certificate/key pair, and patch the mutating webhook configuration with the CA using the following commands: kubectl create secret tls newrelic-metadata-injection-admission \\ --key=PEM_ENCODED_SERVER_KEY \\ --cert=PEM_ENCODED_CERTIFICATE \\ --dry-run -o yaml | kubectl -n newrelic apply -f - caBundle=$(cat PEM_ENCODED_CA_BUNDLE | base64 | td -d $'\\n') kubectl patch mutatingwebhookconfiguration newrelic-metadata-injection-cfg --type='json' -p \"[{'op': 'replace', 'path': '/webhooks/0/clientConfig/caBundle', 'value':'${caBundle}'}]\" Copy Important Certificates signed by Kubernetes have an expiration of one year. For more information, see the Kubernetes source code in GitHub. Validate the injection of metadata In order to validate that the webhook (responsible for injecting the metadata) was installed correctly, deploy a new pod and check for the New Relic environment variables. Create a dummy pod containing Busybox by running: kubectl create -f https://git.io/vPieo Copy Check if New Relic environment variables were injected: kubectl exec busybox0 -- env | grep NEW_RELIC_METADATA_KUBERNETES NEW_RELIC_METADATA_KUBERNETES_CLUSTER_NAME=fsi NEW_RELIC_METADATA_KUBERNETES_NODE_NAME=nodea NEW_RELIC_METADATA_KUBERNETES_NAMESPACE_NAME=default NEW_RELIC_METADATA_KUBERNETES_POD_NAME=busybox0 NEW_RELIC_METADATA_KUBERNETES_CONTAINER_NAME=busybox Copy Disable the injection of metadata To disable/uninstall the injection of metadata, use the following commands: Delete the Kubernetes objects using the yaml file: kubectl delete -f k8s-metadata-injection-latest.yaml Copy Delete the TLS secret containing the certificate/key pair: kubectl delete secret/newrelic-metadata-injection-secret Copy Troubleshooting Follow these troubleshooting tips as needed. No Kubernetes metadata in APM or distributed tracing transactions Problem The creation of the secret by the k8s-webhook-cert-manager job used to fail due to the kubectl version used by the image when running in Kubernetes version 1.19.x, The new version 1.3.2 fixes this issue, therefore it is enough to run again the job using an update version of the image to fix the issue. Solution Update the image k8s-webhook-cert-manager (to a version >= 1.3.2) and re-run the job. The secret will be correctly created and the k8s-metadata-injection pod will be able to start. Note that the new version of the manifest and of the nri-bundle are already updated with the correct version of the image. Problem In OpenShift version 4.x, the CA that is used in order to patch the mutatingwebhookconfiguration resource is not the one used when signing the certificates. This is a known issue currently tracked here. In the logs of the Pod nri-metadata-injection, you'll see the following error message: TLS handshake error from 10.131.0.29:37428: remote error: tls: unknown certificate authority TLS handshake error from 10.129.0.1:49314: remote error: tls: bad certificate Copy Workaround Manually update the certificate stored in the mutatingwebhookconfiguration object. The correct CA locations might change according to the cluster configuration. However, you can usually find the CA in the secret csr-signer in the namespace openshift-kube-controller-manager. Problem There is no Kubernetes metadata included in the transactions' attributes of your APM agent or in distributed tracing. Solution Verify that the environment variables are being correctly injected by following the instructions described in the Validate your installation step. If they are not present, get the name of the metadata injection pod by running: kubectl get pods | grep newrelic-metadata-injection-deployment kubectl logs -f pod/podname Copy In another terminal, create a new pod (for example, see Validate your installation), and inspect the logs of the metadata injection deployment for errors. For every created pod there should be a set of 4 new entries in the logs like: {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.107Z\",\"caller\":\"server/main.go:139\",\"msg\":\"POST https://newrelic-metadata-injection-svc.default.svc:443/mutate?timeout=30s HTTP/2.0\\\" from 10.11.49.2:32836\"} {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.110Z\",\"caller\":\"server/webhook.go:168\",\"msg\":\"received admission review\",\"kind\":\"/v1, Kind=Pod\",\"namespace\":\"default\",\"name\":\"\",\"pod\":\"busybox1\",\"UID\":\"6577519b-7a61-11ea-965e-0e46d1c9335c\",\"operation\":\"CREATE\",\"userinfo\":{\"username\":\"admin\",\"uid\":\"admin\",\"groups\":[\"system:masters\",\"system:authenticated\"]}} {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.111Z\",\"caller\":\"server/webhook.go:182\",\"msg\":\"admission response created\",\"response\":\"[{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env\\\",\\\"value\\\":[{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_CLUSTER_NAME\\\",\\\"value\\\":\\\"adn_kops\\\"}]},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_NODE_NAME\\\",\\\"valueFrom\\\":{\\\"fieldRef\\\":{\\\"fieldPath\\\":\\\"spec.nodeName\\\"}}}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_NAMESPACE_NAME\\\",\\\"valueFrom\\\":{\\\"fieldRef\\\":{\\\"fieldPath\\\":\\\"metadata.namespace\\\"}}}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_POD_NAME\\\",\\\"valueFrom\\\":{\\\"fieldRef\\\":{\\\"fieldPath\\\":\\\"metadata.name\\\"}}}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_CONTAINER_NAME\\\",\\\"value\\\":\\\"busybox\\\"}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_CONTAINER_IMAGE_NAME\\\",\\\"value\\\":\\\"busybox\\\"}}]\"} {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.111Z\",\"caller\":\"server/webhook.go:257\",\"msg\":\"writing response\"} Copy If there are no new entries on the logs, it means that the apiserver is not being able to communicate with the webhook service, this could be due to networking rules or security groups rejecting the communication. To check if the apiserver is not being able to communicate with the webhook you should inspect the apiserver logs for errors like: failed calling webhook \"metadata-injection.newrelic.com\": ERROR_REASON Copy To get the apiserver logs: Start a proxy to the Kubernetes API server by the executing the following command in a terminal window and keep it running. kubectl proxy --port=8001 Copy Create a new pod in your cluster, this will make the apiserver try to communicate with the webhook. The following command will create a busybox. kubectl create -f https://git.io/vPieo Copy Retrieve the apiserver logs. curl localhost:8001/logs/kube-apiserver.log > apiserver.log Copy Delete the busybox container. kubectl delete -f https://git.io/vPieo Copy Inspect the logs for errors. grep -E 'failed calling webhook' apiserver.log Copy Remember that one of the requirements for the metadata injection is that the apiserver must be allowed egress to the pods running on the cluster. If you encounter errors regarding connection timeouts or failed connections, make sure to check the security groups and firewall rules of the cluster. If there are no log entries in either the apiserver logs or the metadata injection deployment, it means that the webhook was not properly registered. Ensure the metadata injection setup job ran successfully by inspecting the output of: kubectl get job newrelic-metadata-setup Copy If the job is not completed, investigate the logs of the setup job: kubectl logs job/newrelic-metadata-setup Copy Ensure the CertificateSigningRequest is approved and issued by running: kubectl get csr newrelic-metadata-injection-svc.default Copy Ensure the TLS secret is present by running: kubectl get secret newrelic-metadata-injection-secret Copy Ensure the CA bundle is present in the mutating webhook configuration: kubectl get mutatingwebhookconfiguration newrelic-metadata-injection-cfg -o json Copy Ensure the TargetPort of the Service resource matches the Port of the Deployment's container: kubectl describe service/newrelic-metadata-injection-svc kubectl describe deployment/newrelic-metadata-injection-deployment Copy", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 261.80432, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "title": "Link your applications to Kubernetes", - "sections": "Link your applications to Kubernetes", - "tags": "Kubernetes integration", - "body": " the following commands: Delete the Kubernetes objects using the yaml file: kubectl delete -f k8s-metadata-injection-latest.yaml Copy Delete the TLS secret containing the certificate/key pair: kubectl delete secret/newrelic-metadata-injection-secret Copy Troubleshooting Follow these troubleshooting tips" - }, - "id": "617daead28ccbc662b7ffe23" - }, { "sections": [ "Link your applications to Kubernetes", @@ -63581,7 +63477,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 233.32877, + "_score": 228.60892, "_version": null, "_explanation": null, "sort": null, @@ -63632,7 +63528,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 231.22183, + "_score": 219.8403, "_version": null, "_explanation": null, "sort": null, @@ -63643,6 +63539,49 @@ "body": " the infrastructure-agent and on-host integrations. images.integration.* to configure the image in charge of scraping k8s data. Upgrade from v2 In order to upgrade from the Kubernetes integration version 2 (included in nri-bundle chart versions 3.x), we strongly encourage you to create a values" }, "id": "61fd3c9d28ccbc72eec0dcda" + }, + { + "sections": [ + "Kubernetes integration: Predefined alert policy", + "Predefined alert conditions", + "Container CPU usage % is too high", + "Container memory usage % is too high", + "Pod was unable to be scheduled", + "Pod is not ready", + "Container is running out of space", + "ReplicaSet doesn't have desired amount of pods", + "etcd open file descriptors", + "Create new alert conditions" + ], + "title": "Kubernetes integration: Predefined alert policy", + "type": "docs", + "tags": [ + "Installation", + "Kubernetes integration", + "Integrations" + ], + "external_id": "7c92831c394c4c087bad8b481250e55557e4b794", + "image": "", + "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/advanced-configuration/kubernetes-integration-predefined-alert-policy/", + "published_at": "2022-02-15T19:17:51Z", + "updated_at": "2022-02-04T14:47:09Z", + "document_type": "page", + "popularity": 1, + "body": "When deploying the New Relic Kubernetes integration for the first time in an account, we deploy a default set of alert conditions to your account. The predefined alert policy, named Kubernetes default alert policy, doesn't have a notification channel by default to avoid unwanted notifications. The alert conditions' thresholds can be customized to your environment and the alert policy updated to send notifications. For more information, see the Infrastructure alerts documentation. Predefined alert conditions Container CPU usage % is too high Setting Value Event type K8sContainerSample SELECT value (cpuUsedCores/cpuLimitCores)*100 Warning threshold > 90% for at least 5 minutes Critical threshold > 95% for at least 5 mins Container memory usage % is too high Setting Value Event type K8sContainerSample SELECT value memoryWorkingSetUtilization Warning threshold > 85% for at least 5 minutes Critical threshold > 95% for at least 5 mins Pod was unable to be scheduled Setting Value Event type K8sPodSample SELECT value isScheduled Warning threshold Critical threshold isScheduled = 0 for at least 7 minutes Pod is not ready Setting Value Event type K8sPodSample SELECT value isReady Warning threshold Critical threshold isReady = 0 for at least 10 minutes Container is running out of space Setting Value Event type K8sContainerSample SELECT value fsUsedPercent Warning threshold > 75% for at least 5 minutes Critical threshold > 90% for at least 5 minutes ReplicaSet doesn't have desired amount of pods Setting Value Event type K8sReplicaSetSample SELECT value podsDesired - podsReady Warning threshold Critical threshold 0 for at least 5 minutes etcd open file descriptors Setting Value Event type K8sEtcdSample SELECT value (processOpenFds/processMaxFds)*100 Warning threshold > 75% for at least 3 minutes Critical threshold > 90% for at least 5 minutes Create new alert conditions To create new alert conditions based on Kubernetes metric data, see Understand and use data.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 219.00739, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "title": "Kubernetes integration: Predefined alert policy", + "sections": "Kubernetes integration: Predefined alert policy", + "tags": "Kubernetes integration", + "body": ". The alert conditions' thresholds can be customized to your environment and the alert policy updated to send notifications. For more information, see the Infrastructure alerts documentation. Predefined alert conditions Container CPU usage % is too high Setting Value Event type K8sContainerSample SELECT" + }, + "id": "61fd3c6de7b9d279f45e6625" } ], "/silex/d154455e-ae50-4feb-a277-9b6ea0eb998e": [ @@ -63677,7 +63616,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99103, + "_score": 95.215935, "_version": null, "_explanation": null, "sort": null, @@ -63719,7 +63658,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99097, + "_score": 95.21587, "_version": null, "_explanation": null, "sort": null, @@ -63761,7 +63700,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99097, + "_score": 95.21587, "_version": null, "_explanation": null, "sort": null, @@ -63803,7 +63742,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99081, + "_score": 95.21571, "_version": null, "_explanation": null, "sort": null, @@ -63842,7 +63781,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 93.49031, + "_score": 93.73342, "_version": null, "_explanation": null, "sort": null, @@ -63879,7 +63818,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 1259.9567, + "_score": 1262.2787, "_version": null, "_explanation": null, "sort": null, @@ -63898,7 +63837,7 @@ "Tip", "Important" ], - "published_at": "2022-02-15T01:41:58Z", + "published_at": "2022-02-16T01:44:29Z", "title": "Instrument your cluster", "updated_at": "2021-11-06T01:49:38Z", "type": "developer", @@ -63909,7 +63848,7 @@ "body": "lab This procedure is part of a lab that teaches you how to monitor your Kubernetes cluster with Pixie. Each procedure in the lab builds upon the last, so make sure you've completed the last procedure, Explore your cluster, before starting this one. As the developer of TinyHat.me, you need to have visibility into your cluster. You need to know how healthy your application is. You need to know when things go wrong. But you've put it off for so long because instrumenting Kubernetes is hard and time-consuming. This is one of the things that makes Pixie so valuable. Pixie is a CNCF open source Kubernetes monitoring solution that provides: Automatic and instant baseline observability of your cluster Actionable, code-level insights of your applications With Pixie's auto-telemetry, you'll instrument your cluster in minutes to get dynamic data such as protocol traces, resource metrics, and app metrics from your cluster—all without an agent! Tip If you haven't signed into New Relic, do that now so you're ready to install Pixie into your cluster. Step 1 of 10 Open New Relic. On the right side of the upper navigation bar, click Add more data: Step 2 of 10 Click Guided install: This walks you through the installation process. Step 3 of 10 Click Kubernetes to let New Relic guide you through instrumenting your Kubernetes cluster: Step 4 of 10 Click Begin installation: Step 5 of 10 Select your account, name your cluster \"tiny-hat\", and click Continue: This specifies that TinyHat.me, and all its services, should live in a New Relic cluster called \"tiny-hat\" in the account you selected. Step 6 of 10 Leave the default choices on the next screen. These provide a range of observability features for your cluster, including our infrastructure agent which gives you a high level overview of the health of your cluster. Notably, the default options include \"Instant service-level insights, full-body requests, and application profiles through Pixie\" which you focus on in this lab. Click Continue: In the next screen, you see a command for installing our Kubernetes integration into your cluster. Step 7 of 10 Click Copy command: Now you're ready to install Pixie into your cluster. Step 8 of 10 Switch back to your terminal and paste the Helm command. Step 9 of 10 While you're installing Pixie, switch back to New Relic and click Continue to progress your guided installation to the final step. Here, you see a message that says \"Listening for data\": In a few minutes, Helm will have fully installed the necessary charts. You should see a message with the name, deployed date, namespace, and more: bash Copy NAME: newrelic-bundle LAST DEPLOYED: Thu Sep 23 13:50:24 2021 NAMESPACE: newrelic STATUS: deployed REVISION: 1 TEST SUITE: None Soon after that, the New Relic page updates to tell you that we're receiving data from your cluster: Step 10 of 10 Click Kubernetes cluster explorer to see your nodes, pods, deployments and a host of other data about your cluster, all in one view: Important You may see an error message, \"We're receiving incomplete data for this cluster.\" Please wait a few more minutes and refresh the page to see your cluster. In minutes, you were able to instrument your entire cluster without having to install language-specific agents or specify detailed cluster information! On top of all the data you see in the cluster explorer, click a pod or a node to dig deeper and see the granular data that the infrastructure agent was able to access near-instantly: lab This procedure is part of a lab that teaches you how to monitor your Kubernetes cluster with Pixie. Now that you've instrumented your cluster, use Pixie to debug your application.", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 57.33723, + "_score": 57.40007, "_version": null, "_explanation": null, "sort": null, @@ -63951,7 +63890,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99103, + "_score": 95.215935, "_version": null, "_explanation": null, "sort": null, @@ -63993,7 +63932,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99097, + "_score": 95.21587, "_version": null, "_explanation": null, "sort": null, @@ -64035,7 +63974,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99097, + "_score": 95.21587, "_version": null, "_explanation": null, "sort": null, @@ -64077,7 +64016,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99081, + "_score": 95.21571, "_version": null, "_explanation": null, "sort": null, @@ -64116,7 +64055,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 93.49031, + "_score": 93.73342, "_version": null, "_explanation": null, "sort": null, @@ -64160,7 +64099,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99103, + "_score": 95.215935, "_version": null, "_explanation": null, "sort": null, @@ -64202,7 +64141,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99097, + "_score": 95.21587, "_version": null, "_explanation": null, "sort": null, @@ -64244,7 +64183,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99097, + "_score": 95.21587, "_version": null, "_explanation": null, "sort": null, @@ -64286,7 +64225,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99081, + "_score": 95.21571, "_version": null, "_explanation": null, "sort": null, @@ -64325,7 +64264,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 93.49031, + "_score": 93.73342, "_version": null, "_explanation": null, "sort": null, @@ -64381,7 +64320,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 155.82205, + "_score": 146.50717, "_version": null, "_explanation": null, "sort": null, @@ -64392,6 +64331,54 @@ }, "id": "61ac554e28ccbc3ccdc24744" }, + { + "sections": [ + "PHP agent compatibility and requirements", + "PHP releases", + "Important", + "Permissions", + "License key", + "Processor type", + "Operating systems", + "Web servers", + "Frameworks", + "Databases and libraries", + "Other APM software", + "Instance details", + "Message queuing", + "Security requirements", + "Connect the agent to other New Relic features" + ], + "title": "PHP agent compatibility and requirements", + "type": "docs", + "tags": [ + "Getting started", + "PHP agent", + "Agents" + ], + "external_id": "768d979d777c65da2940c078e176de86904259a8", + "image": "", + "url": "https://docs.newrelic.com/docs/apm/agents/php-agent/getting-started/php-agent-compatibility-requirements/", + "published_at": "2022-02-14T16:37:25Z", + "updated_at": "2021-10-31T13:41:58Z", + "document_type": "page", + "popularity": 1, + "body": "Before you install New Relic for PHP, make sure your system meets the applicable requirements. If you don't have one already, create a New Relic account. It's free, forever. PHP releases New Relic supports PHP versions 5.5, 5.6, 7.0, 7.1, 7.2, 7.3, 7.4, and 8.0. Important Compatibility note: When PHP 8.0 detects the New Relic agent, it disables Just-In-Time compilation. We recommend using a supported release of PHP, especially 7.3, 7.4, and 8.0. PHP 5.1 support was deprecated in release 4.0 of the agent, and removed in release 4.5. PHP 5.2 support was deprecated in release 6.8 of the agent, and removed in release 7.0. PHP 5.3 and PHP 5.4 support was deprecated in release 9.15 of the agent, and removed in release 9.17. Permissions Installation: Root access is required for most installations. Running: Root access is not required. License key For any installation, you will need your New Relic license key. This is a 40-character hexadecimal string that New Relic provides when you sign up for your account. For more information, see the PHP install script and license key settings in the PHP INI file. Processor type Intel (and compatible) platforms only Support for SSE2 instructions is required Operating systems Important The Windows operating system is not supported. Operating system Supported by New Relic's PHP agent Linux (x86 and x86_64) AWS Linux 2 Red Hat Enterprise Linux (RHEL) 5 or higher CentOS 6 or higher Important As of January 2021, we will discontinue support for CentOS 5. For more information, see our PHP agent release notes v9.15.0 and our Explorers Hub post. Debian 7.0 (\"wheezy\") or higher Ubuntu LTS 14.04 (\"Trusty Tahr\") or higher or Ubuntu non-LTS 19.04 (\"Disco Dingo\") or higher. Any other Linux distribution with: Kernel version 2.6.13 or higher (2.6.26 or higher highly recommended) glibc 2.5 or higher with NPTL support; or musl libc version 1.1 or higher Linux (ARM64) AWS Linux 2 (including AWS Graviton 2) CentOS 8 Important ARM64 is only supported with PHP 8.0 and with New Relic PHP Agent 9.18.1 or later. For more information on Arm64 support, please see the ARM64 installation info. macOS (x86_64 only) Important As of January 2021, we will discontinue support for macOS. For more information, see our PHP agent release notes v9.15.0 and our Explorers Hub post. macOS 10.6 or higher. Because modern versions of macOS can't run 32-bit applications, New Relic removed support for 32-bit macOS with PHP agent release 4.6. FreeBSD (x64) The latest agent supports the latest Production Release. SmartOS and Solaris (x64) (DEPRECATED) Last supported PHP Agent for Solaris and SmartOS is 8.1 SmartOS: All versions OpenSolaris 10: snv_134b or higher Must use i86pc architecture. SPARC architectures are not supported. Web servers Apache 2.2 or 2.4 via mod_php Any web server that supports FastCGI using PHP-FPM Frameworks Supported PHP frameworks include: Frameworks CakePHP 2.x Magento 1.x and 2.x, CE and EE CodeIgniter 2.x MediaWiki Drupal 6.x, 7.x, 8.x, and 9.x Joomla 3.x Slim 2.x, 3.x, and 4.x Kohana 3.2 and 3.3 Symfony 3.x, 4.x, 5.x Laminas 3.x Wordpress Laravel 4.x, 5.x, 6.x, and 7.x Yii 1.x Laravel Lumen 6.x, 7.x, and 8.x Zend Framework 1.x, 2.x, and 3.x Important Joomla 3.x is not supported on PHP 8. Important As of PHP agent version 9.17, the following frameworks or framework versions are no longer supported and may be removed from future agent builds: Cake PHP 1.x Joomla 1.5, 1.6, and 2.x Kohana Silex 1.x and 2.x Symfony 1.x and 2.x The PHP agent's list of frameworks continues to grow. Even if the framework you are using is not listed here, the PHP agent may be able to provide you with useful information about your app. For more information, see PHP frameworks: Integrating support for New Relic. If you want to suggest support for other popular PHP frameworks, visit us at the Explorers Hub and create a Feature Idea! Databases and libraries Supported databases and libraries: Databases and Libraries Firebird Oracle Guzzle PHPUnit Informix PDO Memcached Postgres MongoDB Predis Microsoft SQL Server Redis MySQL SQLite ODBC Sybase Other APM software If your application uses other application performance monitoring (APM) software besides our agent, we cannot guarantee that our agent will work correctly and we cannot offer technical support. For more information, see Errors when using other monitoring software. Instance details New Relic collects instance details for a variety of databases and database drivers. The ability to view specific instances and the types of database information in APM depends on your New Relic agent version. New Relic's PHP agent version 6.8 or higher supports instance details for the following: Database Extension Minimum agent version MongoDB mongodb 7.1 MySQL mysql 6.8 MySQL mysqli 6.8 MySQL pdo_mysql 6.8 PostgreSQL pgsql 6.9 PostgreSQL pdo_pgsql 6.9 Redis predis 7.1 Redis redis 7.1 To disable collection of host information, use either of these options: Set newrelic.datastore_tracer.instance_reporting.enabled to false in the newrelic.ini. Omit the database name with newrelic.datastore_tracer.database_name_reporting.enabled = false. To request instance-level information from datastores currently not listed for your New Relic agent, get support at support.newrelic.com. Message queuing HTTP Laravel Queuing, available as an experimental feature in the PHP Agent 6.6.0.169 release, enabled by default since PHP Agent 8.0.0.204. Security requirements As a standard security measure for data collection, your app server must support SHA-2 (256-bit). SHA-1 is not supported. Connect the agent to other New Relic features The PHP agent integrates with other New Relic features to give you end-to-end visibility: Product Integration Browser monitoring The PHP agent automatically injects the browser agent's JS code when you enable auto-instrumentation. After enabling browser injection, you can view data in the APM Summary page and quickly switch between the APM and browser data for a particular app. For configuration options and manual instrumentation, see Browser monitoring and the PHP agent. Infrastructure monitoring When you install the infrastructure and APM agents on the same host, they automatically detect one another. You can then view a list of hosts in the APM UI, and filter your Infrastructure hosts by APM app in the Infrastructure UI. For more information, see APM data in infrastructure monitoring. New Relic Dashboards The PHP agent sends default events and attributes for NRQL queries. You can also record custom events for advanced analysis. Synthetic monitoring Synthetic transaction traces connect requests from synthetic monitors to the underlying APM transaction.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 116.77734, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "title": "PHP agent compatibility and requirements", + "sections": "PHP agent compatibility and requirements", + "tags": "PHP agent", + "body": " is required Operating systems Important The Windows operating system is not supported. Operating system Supported by New Relic's PHP agent Linux (x86 and x86_64) AWS Linux 2 Red Hat Enterprise Linux (RHEL) 5 or higher CentOS 6 or higher Important As of January 2021, we will discontinue support" + }, + "id": "6174902e196a674f042f1856" + }, { "sections": [ "Apache monitoring integration", @@ -64442,7 +64429,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 119.81903, + "_score": 113.25148, "_version": null, "_explanation": null, "sort": null, @@ -64453,54 +64440,6 @@ }, "id": "6174ae5a64441f5baf5fc976" }, - { - "sections": [ - "PHP agent compatibility and requirements", - "PHP releases", - "Important", - "Permissions", - "License key", - "Processor type", - "Operating systems", - "Web servers", - "Frameworks", - "Databases and libraries", - "Other APM software", - "Instance details", - "Message queuing", - "Security requirements", - "Connect the agent to other New Relic features" - ], - "title": "PHP agent compatibility and requirements", - "type": "docs", - "tags": [ - "Getting started", - "PHP agent", - "Agents" - ], - "external_id": "768d979d777c65da2940c078e176de86904259a8", - "image": "", - "url": "https://docs.newrelic.com/docs/apm/agents/php-agent/getting-started/php-agent-compatibility-requirements/", - "published_at": "2022-02-14T16:37:25Z", - "updated_at": "2021-10-31T13:41:58Z", - "document_type": "page", - "popularity": 1, - "body": "Before you install New Relic for PHP, make sure your system meets the applicable requirements. If you don't have one already, create a New Relic account. It's free, forever. PHP releases New Relic supports PHP versions 5.5, 5.6, 7.0, 7.1, 7.2, 7.3, 7.4, and 8.0. Important Compatibility note: When PHP 8.0 detects the New Relic agent, it disables Just-In-Time compilation. We recommend using a supported release of PHP, especially 7.3, 7.4, and 8.0. PHP 5.1 support was deprecated in release 4.0 of the agent, and removed in release 4.5. PHP 5.2 support was deprecated in release 6.8 of the agent, and removed in release 7.0. PHP 5.3 and PHP 5.4 support was deprecated in release 9.15 of the agent, and removed in release 9.17. Permissions Installation: Root access is required for most installations. Running: Root access is not required. License key For any installation, you will need your New Relic license key. This is a 40-character hexadecimal string that New Relic provides when you sign up for your account. For more information, see the PHP install script and license key settings in the PHP INI file. Processor type Intel (and compatible) platforms only Support for SSE2 instructions is required Operating systems Important The Windows operating system is not supported. Operating system Supported by New Relic's PHP agent Linux (x86 and x86_64) AWS Linux 2 Red Hat Enterprise Linux (RHEL) 5 or higher CentOS 6 or higher Important As of January 2021, we will discontinue support for CentOS 5. For more information, see our PHP agent release notes v9.15.0 and our Explorers Hub post. Debian 7.0 (\"wheezy\") or higher Ubuntu LTS 14.04 (\"Trusty Tahr\") or higher or Ubuntu non-LTS 19.04 (\"Disco Dingo\") or higher. Any other Linux distribution with: Kernel version 2.6.13 or higher (2.6.26 or higher highly recommended) glibc 2.5 or higher with NPTL support; or musl libc version 1.1 or higher Linux (ARM64) AWS Linux 2 (including AWS Graviton 2) CentOS 8 Important ARM64 is only supported with PHP 8.0 and with New Relic PHP Agent 9.18.1 or later. For more information on Arm64 support, please see the ARM64 installation info. macOS (x86_64 only) Important As of January 2021, we will discontinue support for macOS. For more information, see our PHP agent release notes v9.15.0 and our Explorers Hub post. macOS 10.6 or higher. Because modern versions of macOS can't run 32-bit applications, New Relic removed support for 32-bit macOS with PHP agent release 4.6. FreeBSD (x64) The latest agent supports the latest Production Release. SmartOS and Solaris (x64) (DEPRECATED) Last supported PHP Agent for Solaris and SmartOS is 8.1 SmartOS: All versions OpenSolaris 10: snv_134b or higher Must use i86pc architecture. SPARC architectures are not supported. Web servers Apache 2.2 or 2.4 via mod_php Any web server that supports FastCGI using PHP-FPM Frameworks Supported PHP frameworks include: Frameworks CakePHP 2.x Magento 1.x and 2.x, CE and EE CodeIgniter 2.x MediaWiki Drupal 6.x, 7.x, 8.x, and 9.x Joomla 3.x Slim 2.x, 3.x, and 4.x Kohana 3.2 and 3.3 Symfony 3.x, 4.x, 5.x Laminas 3.x Wordpress Laravel 4.x, 5.x, 6.x, and 7.x Yii 1.x Laravel Lumen 6.x, 7.x, and 8.x Zend Framework 1.x, 2.x, and 3.x Important Joomla 3.x is not supported on PHP 8. Important As of PHP agent version 9.17, the following frameworks or framework versions are no longer supported and may be removed from future agent builds: Cake PHP 1.x Joomla 1.5, 1.6, and 2.x Kohana Silex 1.x and 2.x Symfony 1.x and 2.x The PHP agent's list of frameworks continues to grow. Even if the framework you are using is not listed here, the PHP agent may be able to provide you with useful information about your app. For more information, see PHP frameworks: Integrating support for New Relic. If you want to suggest support for other popular PHP frameworks, visit us at the Explorers Hub and create a Feature Idea! Databases and libraries Supported databases and libraries: Databases and Libraries Firebird Oracle Guzzle PHPUnit Informix PDO Memcached Postgres MongoDB Predis Microsoft SQL Server Redis MySQL SQLite ODBC Sybase Other APM software If your application uses other application performance monitoring (APM) software besides our agent, we cannot guarantee that our agent will work correctly and we cannot offer technical support. For more information, see Errors when using other monitoring software. Instance details New Relic collects instance details for a variety of databases and database drivers. The ability to view specific instances and the types of database information in APM depends on your New Relic agent version. New Relic's PHP agent version 6.8 or higher supports instance details for the following: Database Extension Minimum agent version MongoDB mongodb 7.1 MySQL mysql 6.8 MySQL mysqli 6.8 MySQL pdo_mysql 6.8 PostgreSQL pgsql 6.9 PostgreSQL pdo_pgsql 6.9 Redis predis 7.1 Redis redis 7.1 To disable collection of host information, use either of these options: Set newrelic.datastore_tracer.instance_reporting.enabled to false in the newrelic.ini. Omit the database name with newrelic.datastore_tracer.database_name_reporting.enabled = false. To request instance-level information from datastores currently not listed for your New Relic agent, get support at support.newrelic.com. Message queuing HTTP Laravel Queuing, available as an experimental feature in the PHP Agent 6.6.0.169 release, enabled by default since PHP Agent 8.0.0.204. Security requirements As a standard security measure for data collection, your app server must support SHA-2 (256-bit). SHA-1 is not supported. Connect the agent to other New Relic features The PHP agent integrates with other New Relic features to give you end-to-end visibility: Product Integration Browser monitoring The PHP agent automatically injects the browser agent's JS code when you enable auto-instrumentation. After enabling browser injection, you can view data in the APM Summary page and quickly switch between the APM and browser data for a particular app. For configuration options and manual instrumentation, see Browser monitoring and the PHP agent. Infrastructure monitoring When you install the infrastructure and APM agents on the same host, they automatically detect one another. You can then view a list of hosts in the APM UI, and filter your Infrastructure hosts by APM app in the Infrastructure UI. For more information, see APM data in infrastructure monitoring. New Relic Dashboards The PHP agent sends default events and attributes for NRQL queries. You can also record custom events for advanced analysis. Synthetic monitoring Synthetic transaction traces connect requests from synthetic monitors to the underlying APM transaction.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 116.833084, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "title": "PHP agent compatibility and requirements", - "sections": "PHP agent compatibility and requirements", - "tags": "PHP agent", - "body": " is required Operating systems Important The Windows operating system is not supported. Operating system Supported by New Relic's PHP agent Linux (x86 and x86_64) AWS Linux 2 Red Hat Enterprise Linux (RHEL) 5 or higher CentOS 6 or higher Important As of January 2021, we will discontinue support" - }, - "id": "6174902e196a674f042f1856" - }, { "sections": [ "Get started with infrastructure monitoring", @@ -64531,7 +64470,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 103.38542, + "_score": 99.22398, "_version": null, "_explanation": null, "sort": null, @@ -64576,7 +64515,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 98.928665, + "_score": 98.88489, "_version": null, "_explanation": null, "sort": null, @@ -64616,7 +64555,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 439.0726, + "_score": 414.12265, "_version": null, "_explanation": null, "sort": null, @@ -64673,7 +64612,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 398.72922, + "_score": 376.5428, "_version": null, "_explanation": null, "sort": null, @@ -64711,7 +64650,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 345.9989, + "_score": 325.8106, "_version": null, "_explanation": null, "sort": null, @@ -64752,7 +64691,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 338.27264, + "_score": 316.5302, "_version": null, "_explanation": null, "sort": null, @@ -64813,7 +64752,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 245.2834, + "_score": 243.15027, "_version": null, "_explanation": null, "sort": null, @@ -64857,7 +64796,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.17942, + "_score": 77.41069, "_version": null, "_explanation": null, "sort": null, @@ -64898,7 +64837,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.179146, + "_score": 77.41042, "_version": null, "_explanation": null, "sort": null, @@ -64939,7 +64878,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.1507, + "_score": 77.38236, "_version": null, "_explanation": null, "sort": null, @@ -64980,7 +64919,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -65021,7 +64960,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.15053, + "_score": 77.38218, "_version": null, "_explanation": null, "sort": null, @@ -65059,7 +64998,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 328.07147, + "_score": 324.20026, "_version": null, "_explanation": null, "sort": null, @@ -65097,7 +65036,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 288.77274, + "_score": 284.98157, "_version": null, "_explanation": null, "sort": null, @@ -65133,7 +65072,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 229.05695, + "_score": 226.57106, "_version": null, "_explanation": null, "sort": null, @@ -65170,7 +65109,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 209.39697, + "_score": 198.54785, "_version": null, "_explanation": null, "sort": null, @@ -65188,31 +65127,31 @@ "Automate workflows", "Guides to automate workflows", "Quickly tag resources", - "Set up New Relic using the Kubernetes operator", "Automate common tasks", + "Set up New Relic using the Kubernetes operator", "Automatically tag a simple \"Hello World\" Demo across the entire stack", "Getting started with New Relic and Terraform", "Set up New Relic using Helm charts" ], - "published_at": "2022-02-15T01:37:23Z", + "published_at": "2022-02-16T01:38:14Z", "title": "Automate workflows", - "updated_at": "2022-02-15T01:37:23Z", + "updated_at": "2022-02-16T01:38:14Z", "type": "developer", "external_id": "d4f408f077ed950dc359ad44829e9cfbd2ca4871", "document_type": "page", "popularity": 1, - "body": "When building today's complex systems, you want an easy, predictable way to verify that your configuration is defined as expected. This concept, Observability as Code, is brought to life through a collection of New Relic-supported orchestration tools, including Terraform, AWS CloudFormation, and a command-line interface. These tools enable you to integrate New Relic into your existing workflows, easing adoption, accelerating deployment, and returning focus to your main job — getting stuff done. In addition to our Terraform and CLI guides below, find more automation solutions in our Developer Toolkit. Guides to automate workflows Quickly tag resources Add tags to apps for easy filtering 5 min Set up New Relic using the Kubernetes operator Learn how to provision New Relic resources using the Kubernetes operator 20 min Automate common tasks Use the New Relic CLI to tag apps and create deployment markers 20 min Automatically tag a simple \"Hello World\" Demo across the entire stack See how easy it is to leverage automation in your DevOps environment! 30 min Getting started with New Relic and Terraform Learn how to provision New Relic resources using Terraform 30 min Set up New Relic using Helm charts Learn how to set up New Relic using Helm charts 20 min", + "body": "When building today's complex systems, you want an easy, predictable way to verify that your configuration is defined as expected. This concept, Observability as Code, is brought to life through a collection of New Relic-supported orchestration tools, including Terraform, AWS CloudFormation, and a command-line interface. These tools enable you to integrate New Relic into your existing workflows, easing adoption, accelerating deployment, and returning focus to your main job — getting stuff done. In addition to our Terraform and CLI guides below, find more automation solutions in our Developer Toolkit. Guides to automate workflows Quickly tag resources Add tags to apps for easy filtering 5 min Automate common tasks Use the New Relic CLI to tag apps and create deployment markers 20 min Set up New Relic using the Kubernetes operator Learn how to provision New Relic resources using the Kubernetes operator 20 min Automatically tag a simple \"Hello World\" Demo across the entire stack See how easy it is to leverage automation in your DevOps environment! 30 min Getting started with New Relic and Terraform Learn how to provision New Relic resources using Terraform 30 min Set up New Relic using Helm charts Learn how to set up New Relic using Helm charts 20 min", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 95.14711, + "_score": 94.86789, "_version": null, "_explanation": null, "sort": null, "highlight": { "title": "Automate workflows", "sections": "Automate workflows", - "body": " deployment markers 20 min Automatically tag a simple "Hello World" Demo across the entire stack See how easy it is to leverage automation in your DevOps environment! 30 min Getting started with New Relic and Terraform Learn how to provision New Relic resources using Terraform 30 min Set up New Relic using Helm charts Learn how to set up New Relic using Helm charts 20 min" + "body": " the Kubernetes operator 20 min Automatically tag a simple "Hello World" Demo across the entire stack See how easy it is to leverage automation in your DevOps environment! 30 min Getting started with New Relic and Terraform Learn how to provision New Relic resources using Terraform 30 min Set up New Relic using Helm charts Learn how to set up New Relic using Helm charts 20 min" }, "id": "6091f7c8e7b9d2f6715068f1" }, @@ -65237,7 +65176,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.15608, + "_score": 94.278885, "_version": null, "_explanation": null, "sort": null, @@ -65283,7 +65222,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 86.503174, + "_score": 81.99892, "_version": null, "_explanation": null, "sort": null, @@ -65317,7 +65256,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 86.496025, + "_score": 81.99311, "_version": null, "_explanation": null, "sort": null, @@ -65362,7 +65301,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 86.49205, + "_score": 81.98988, "_version": null, "_explanation": null, "sort": null, @@ -65404,7 +65343,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.33618, + "_score": 81.56581, "_version": null, "_explanation": null, "sort": null, @@ -65446,7 +65385,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.276306, + "_score": 81.50674, "_version": null, "_explanation": null, "sort": null, @@ -65488,7 +65427,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -65530,7 +65469,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -65572,7 +65511,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -65616,7 +65555,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99103, + "_score": 95.215935, "_version": null, "_explanation": null, "sort": null, @@ -65658,7 +65597,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99097, + "_score": 95.21587, "_version": null, "_explanation": null, "sort": null, @@ -65700,7 +65639,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99097, + "_score": 95.21587, "_version": null, "_explanation": null, "sort": null, @@ -65742,7 +65681,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99081, + "_score": 95.21571, "_version": null, "_explanation": null, "sort": null, @@ -65781,7 +65720,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 93.49031, + "_score": 93.73342, "_version": null, "_explanation": null, "sort": null, @@ -65819,7 +65758,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 342.17096, + "_score": 338.64017, "_version": null, "_explanation": null, "sort": null, @@ -65855,7 +65794,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 230.7015, + "_score": 230.11563, "_version": null, "_explanation": null, "sort": null, @@ -65893,7 +65832,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 226.74734, + "_score": 223.68565, "_version": null, "_explanation": null, "sort": null, @@ -65905,6 +65844,40 @@ }, "id": "61b15cb928ccbc888afb35eb" }, + { + "sections": [ + "Bring your own data", + "Track your data and models in production easily", + "How do I stream inference data and metrics to New Relic?", + "Need an example for reference? Check out the following use case!" + ], + "title": "Bring your own data", + "type": "docs", + "tags": [ + "Byo", + "MLOps integrations" + ], + "external_id": "0d936b817691f423260af64c0f156142848406fb", + "image": "", + "url": "https://docs.newrelic.com/docs/mlops/bring-your-own/mlops-byo/", + "published_at": "2022-02-14T03:13:35Z", + "updated_at": "2022-01-22T03:22:19Z", + "document_type": "page", + "popularity": 1, + "body": "New Relic has entered the MLOps observability space, and is now allowing data scientists and machine learning engineers to monitor and observe the performance and effectiveness of their machine-learning models in a production environment. Track your data and models in production easily Once your production reaches a certain point in terms of the number of models it has to monitor, it becomes difficult to keep track of your data and model KPIs, resulting in even more complex systems. With bring your own data (BYO), after just a few minutes, you can quickly send your model’s inference and metric data to the New Relic platform, directly from a notebook or any other environment. The ml-performance-monitoring python package, based on newrelic-telemetry-sdk-python, allows you to send your model’s features and prediction values, as well as custom metrics, by simply adding a few lines to your code. Use the python package to send the following types of data to New Relic: Inference data: Stream your model’s feature and prediction values. Choose between: “Online” instrumentation: Stream the values while the model is being invoked in production by easily wrapping your model through the wrap_model function. The data is streamed automatically on each invocation. \"Offline\" instrumentation: Send the data (features and predictions) as a dataset (as an np.array, or pandas dataframe). Inference data is streamed as a custom event named \"InferenceData\". Data metrics: Instead of sending all your raw inference data, select the aggregated statistics option over the features and predictions (for example, min, max, average, or percentile). They will be sent automatically as metrics. Custom metrics: Calculate your own metrics, monitor your model performance or model data, and stream them to New Relic using the record_metrics function. They will be sent as metrics. How do I stream inference data and metrics to New Relic? Follow these steps to stream your data and view it on the New Relic platform: Get your license key: Use the license key for the New Relic account you want your data to be associated with. Stream your data to New Relic by using the new-relic-ml-performance-monitoring package. Follow the example below to see how to send data from your code. View your data in the New Relic Platform: Query your data: Use the data explorer to view the metrics and events data you sent, or use the following queries on the query builder: SELECT * FROM InferenceData WHERE model_name=[model_name] SINCE 1 day ago SELECT * FROM Metric WHERE model_name=[model_name] SINCE 1 day ago Create your own dashboard: Build your own dashboard to visualize the metrics you sent, or view the distributions of your features and predictions. See an example dashboard below. Explore entities: When you stream data to New Relic, an entity of the type machine learning model is automatically created, one per each model name. You can explore your model entities by selecting Explorer on New Relic One, and going to the Machine Learning section on the left navigation menu. Need an example for reference? Check out the following use case! Run the following example to stream data and view an example dashboard on your New Relic account: Run the example notebook: You'll have to define your ingest key as an environment variable, or send it as a parameter. (Here's an [example notebook] .](https://github.com/newrelic-experimental/ml-performance-monitoring/tree/main/examples)) View the data in the example dashboard: Import the example dashboard template JSON, and update YOUR_ACCOUNT_ID with your account ID.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 188.04887, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "tags": "MLOps integrations", + "body": "New Relic has entered the MLOps observability space, and is now allowing data scientists and machine learning engineers to monitor and observe the performance and effectiveness of their machine-learning models in a production environment. Track your data and models in production easily Once your" + }, + "id": "61b332a6e7b9d249035a2b4c" + }, { "sections": [ "Comet MLOps integration", @@ -65928,7 +65901,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 190.28519, + "_score": 187.76013, "_version": null, "_explanation": null, "sort": null, @@ -65939,90 +65912,9 @@ "body": "Comet is an MLOps platform that extends full stack observability to machine learning models, and allows you to establish production performance baselines based on model performance. The Comet integration allows you to integrate machine learning metrics with New Relic, so you can continuosly monitor" }, "id": "61b33228196a676f3da5c5b9" - }, - { - "sections": [ - "Bring your own data", - "Track your data and models in production easily", - "How do I stream inference data and metrics to New Relic?", - "Need an example for reference? Check out the following use case!" - ], - "title": "Bring your own data", - "type": "docs", - "tags": [ - "Byo", - "MLOps integrations" - ], - "external_id": "0d936b817691f423260af64c0f156142848406fb", - "image": "", - "url": "https://docs.newrelic.com/docs/mlops/bring-your-own/mlops-byo/", - "published_at": "2022-02-14T03:13:35Z", - "updated_at": "2022-01-22T03:22:19Z", - "document_type": "page", - "popularity": 1, - "body": "New Relic has entered the MLOps observability space, and is now allowing data scientists and machine learning engineers to monitor and observe the performance and effectiveness of their machine-learning models in a production environment. Track your data and models in production easily Once your production reaches a certain point in terms of the number of models it has to monitor, it becomes difficult to keep track of your data and model KPIs, resulting in even more complex systems. With bring your own data (BYO), after just a few minutes, you can quickly send your model’s inference and metric data to the New Relic platform, directly from a notebook or any other environment. The ml-performance-monitoring python package, based on newrelic-telemetry-sdk-python, allows you to send your model’s features and prediction values, as well as custom metrics, by simply adding a few lines to your code. Use the python package to send the following types of data to New Relic: Inference data: Stream your model’s feature and prediction values. Choose between: “Online” instrumentation: Stream the values while the model is being invoked in production by easily wrapping your model through the wrap_model function. The data is streamed automatically on each invocation. \"Offline\" instrumentation: Send the data (features and predictions) as a dataset (as an np.array, or pandas dataframe). Inference data is streamed as a custom event named \"InferenceData\". Data metrics: Instead of sending all your raw inference data, select the aggregated statistics option over the features and predictions (for example, min, max, average, or percentile). They will be sent automatically as metrics. Custom metrics: Calculate your own metrics, monitor your model performance or model data, and stream them to New Relic using the record_metrics function. They will be sent as metrics. How do I stream inference data and metrics to New Relic? Follow these steps to stream your data and view it on the New Relic platform: Get your license key: Use the license key for the New Relic account you want your data to be associated with. Stream your data to New Relic by using the new-relic-ml-performance-monitoring package. Follow the example below to see how to send data from your code. View your data in the New Relic Platform: Query your data: Use the data explorer to view the metrics and events data you sent, or use the following queries on the query builder: SELECT * FROM InferenceData WHERE model_name=[model_name] SINCE 1 day ago SELECT * FROM Metric WHERE model_name=[model_name] SINCE 1 day ago Create your own dashboard: Build your own dashboard to visualize the metrics you sent, or view the distributions of your features and predictions. See an example dashboard below. Explore entities: When you stream data to New Relic, an entity of the type machine learning model is automatically created, one per each model name. You can explore your model entities by selecting Explorer on New Relic One, and going to the Machine Learning section on the left navigation menu. Need an example for reference? Check out the following use case! Run the following example to stream data and view an example dashboard on your New Relic account: Run the example notebook: You'll have to define your ingest key as an environment variable, or send it as a parameter. (Here's an [example notebook] .](https://github.com/newrelic-experimental/ml-performance-monitoring/tree/main/examples)) View the data in the example dashboard: Import the example dashboard template JSON, and update YOUR_ACCOUNT_ID with your account ID.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 189.92934, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "tags": "MLOps integrations", - "body": "New Relic has entered the MLOps observability space, and is now allowing data scientists and machine learning engineers to monitor and observe the performance and effectiveness of their machine-learning models in a production environment. Track your data and models in production easily Once your" - }, - "id": "61b332a6e7b9d249035a2b4c" } ], "/pixie/4a2bc392-e39d-423d-a771-ef09dff5a4c2": [ - { - "sections": [ - "Amazon DynamoDB monitoring integration", - "Important", - "Features", - "Activate integration", - "Configuration and polling", - "View and use data", - "Metric data", - "Tip", - "DynamoDbTable", - "DynamoDbRegion", - "DynamoDbGlobalSecondaryIndex", - "Inventory data", - "EOL NOTICE", - "/limits", - "/table", - "/table/provisionedThroughput", - "/table/dynamodb/table" - ], - "title": "Amazon DynamoDB monitoring integration", - "type": "docs", - "tags": [ - "AWS integrations list", - "Amazon integrations", - "Integrations" - ], - "external_id": "de6e40a969a6c90c8d017023020a6bb711298c93", - "image": "", - "url": "https://docs.newrelic.com/docs/infrastructure/amazon-integrations/aws-integrations-list/aws-dynamodb-monitoring-integration/", - "published_at": "2022-02-14T09:17:54Z", - "updated_at": "2022-02-14T09:17:54Z", - "document_type": "page", - "popularity": 1, - "body": "Important Enable the AWS CloudWatch Metric Streams integration to monitor all CloudWatch metrics from your AWS services, including custom namespaces. Individual integrations are no longer our recommended option. New Relic infrastructure integrations include an integration for reporting Amazon DynamoDB data to New Relic. This document explains how to activate this integration and describes the data that can be reported. Features Amazon DynamoDB is a fully managed NoSQL cloud database that supports both document and key-value store models. With the New Relic DynamoDB Integration, you can quickly understand how request latency or errors are affecting your environment. You'll receive metrics about how the database is performing, as well as inventory data like configuration settings, service status, and host metadata. Activate integration To enable this integration follow standard procedures to Connect AWS services to New Relic. Configuration and polling You can change the polling frequency and filter data using configuration options. Default polling information for the Amazon DynamoDB integration: New Relic polling interval: 5 minutes Amazon CloudWatch data interval: 1 minute or 5 minutes, depending on statistic View and use data To view and use your integration data, go to one.newrelic.com > Infrastructure > AWS and select one of the Amazon DynamodDB integration links. You can query and explore your data using the DatastoreSample event type, with a provider value of DynamoDbTable for DynamoDB tables, DynamoDbRegion for regions, or DynamoDbGlobalSecondaryIndex for DynamoDB global secondary indexes. Metric data The New Relic Amazon DynamoDB integration collects the following metric data: Tip Data indicated with an asterisk * is only fetched if extended inventory collection is on. DynamoDbTable Metric Description batchGetSuccessfulRequestLatency Elapsed time for successful BatchGetItem operation requests, in milliseconds. batchGetThrottledRequests BatchGetItem operation requests that exceed the provisioned throughput limits on a table. batchWriteSuccessfulRequestLatency Elapsed time for successful BatchWriteItem operation requests, in milliseconds. batchWriteThrottledRequests BatchWriteItem operation requests that exceed the provisioned throughput limits on a table. conditionalCheckFailedRequests The number of failed attempts to perform conditional writes. consumedReadCapacityUnits The number of read capacity units consumed over the specified time period, to track how much of provisioned throughput is used. consumedWriteCapacityUnits The number of write capacity units consumed over the specified time period, to track how much of provisioned throughput is used. deleteSuccessfulRequestLatency Elapsed time for successful Delete operation requests, in milliseconds. deleteThrottledRequests Delete operation requests that exceed the provisioned throughput limits on a table. getSuccessfulRequestLatency Elapsed time for successful Get operation requests, in milliseconds. getThrottledRequests Get operation requests that exceed the provisioned throughput limits on a table. pendingReplicationCount* The number of item updates that are written to one replica table, but that have not yet been written to another replica in the global table. provisionedReadCapacityUnits The number of provisioned read capacity units for a table. provisionedWriteCapacityUnits The number of provisioned write capacity units for a table. putSuccessfulRequestLatency Elapsed time for successful Put operation requests, in milliseconds. putThrottledRequests Put operation requests that exceed the provisioned throughput limits on a table. queryReturnedItemCount The number of items returned by Query operations. querySuccessfulRequestLatency Elapsed time for successful Query operation requests, in milliseconds. queryThrottledRequests Query operation requests that exceed the provisioned throughput limits on a table. readThrottleEvents Requests to DynamoDB that exceed the provisioned read capacity units for a table. replicationLatency* The elapsed time between an updated item appearing in the DynamoDB stream for one replica table, and that item appearing in another replica in the global table, in milliseconds. scanReturnedItemCount The number of items returned by Scan operations. scanSuccessfulRequestLatency Elapsed time for successful Scan operation requests, in milliseconds. scanThrottledRequests Scan operation requests that exceed the provisioned throughput limits on a table. batchGetSystemErrors BatchGetItem operation requests that generate an HTTP 500 status code. batchWriteSystemErrors BatchWriteItem operation requests that generate an HTTP 500 status code. deleteSystemErrors DeleteItem operation requests that generate an HTTP 500 status code. getSystemErrors GetItem operation requests that generate an HTTP 500 status code. putSystemErrors PutItem operation requests that generate an HTTP 500 status code. querySystemErrors Query operation requests that generate an HTTP 500 status code. scanSystemErrors Scan operation requests that generate an HTTP 500 status code. updateSystemErrors UpdateItem operation requests that generate an HTTP 500 status code. updateSuccessfulRequestLatency Elapsed time for successful Update operation requests, in milliseconds. updateThrottledRequests Update operation requests that exceed the provisioned throughput limits on a table. userErrors Requests that generate an HTTP 400 status code. writeThrottleEvents Requests to DynamoDB that exceed the provisioned write capacity units for a table. DynamoDbRegion This sample limits the data to a particular AWS region. It is used with metrics originating from replica tables within a DynamoDB global table. Metric Description systemErrors Requests that generate an HTTP 500 status code. userErrors Requests that generate an HTTP 400 status code DynamoDbGlobalSecondaryIndex Metric Description consumedReadCapacityUnits The number of read capacity units consumed over the specified time period, to track how much of provisioned throughput is used. consumedWriteCapacityUnits The number of write capacity units consumed over the specified time period, to track how much of provisioned throughput is used. onlineIndexConsumedWriteCapacity The number of write capacity units consumed when adding a new global secondary index to a table. onlineIndexPercentageProgress The percentage of completion when a new global secondary index is being added to a table. onlineIndexThrottleEvents The number of write throttle events that occur when adding a new global secondary index to a table. provisionedReadCapacityUnits The number of provisioned read capacity units for a global secondary index. provisionedWriteCapacityUnits The number of provisioned write capacity units for a table. readThrottleEvents Requests to DynamoDB that exceed the provisioned read capacity units for a table. writeThrottleEvents Requests to DynamoDB that exceed the provisioned write capacity units for a table. Inventory data EOL NOTICE After March 2022, we're discontinuing support for several capabilities, including inventory data for cloud integrations. For more details, including how you can easily prepare for this transition, see our Explorers Hub post. Inventory data provides information about a service's state and configuration, and available host information. /limits Name Description accountMaxReadCapacity The maximum total read capacity units that your account allows you to provision across all of your tables in this region. accountMaxWriteCapacity The maximum total write capacity units that your account allows you to provision across all of your tables in this region. awsRegion The AWS region that the table was provisioned in. tableMaxReadCapacity The maximum read capacity units allowed by your account to provision a new table created in this region, including the read capacity units provisioned for its Global Secondary Indexes (GSIs). tableMaxWriteCapacity The maximum write capacity units allowed by your account to provision a new table created in this region, including the write capacity units provisioned for its Global Secondary Indexes (GSIs). /table Name Description awsRegion The AWS region that the table was provisioned in. tableStatus The state of the table: creating: The table is being created. updating: The table is being updated. deleting: The table is being deleted. active: The table is ready for use. /table/provisionedThroughput Name Description numberOfDecreasesToday The number of provisioned throughput decreases for this table during this UTC calendar day. For current maximums on provisioned throughput decreases, see Limits in DynamoDB. readCapacityUnits The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException. Eventually, consistent reads require less effort than strongly consistent reads, so a setting of 50 ReadCapacityUnits per second provides 100 eventually consistent ReadCapacityUnits per second. writeCapacityUnits The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException. /table/dynamodb/table Tip Data indicated with an asterisk * is only fetched if extended inventory collection is on. Name Description tableName The name of the table. creationDate The date and time when the table was created in UNIX epoch time. tableStatus The current state of the table: CREATING - The table is being created. UPDATING - The table is being updated. DELETING - The table is being deleted. ACTIVE - The table is ready for use. keySchema The complete index key schema. attributeDefinition The list of AttributeDefinition objects. Each of these objects describes one attribute in the table and index key schema. Each AttributeDefinition object in this list is composed of: AttributeName - The name of the attribute. AttributeType - The data type for the attribute. provisionedThroughput The provisioned throughput settings for the table, consisting of read and write capacity units, along with data about increases and decreases. streamSpecification The current DynamoDB Streams configuration for the table. globalSecondaryIndexes The global secondary indexes, if any, on the table. creationDateTime* The date and time when the global table was created. in UNIX epoch time. globalTableName* The global table name. globalTableStatus* The current state of the global table: CREATING - The global table is being created. UPDATING - The global table is being updated. DELETING - The global table is being deleted. ACTIVE - The global table is ready for use. replicationGroup* The regions where the global table has replicas.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 214.38235, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "body": " status code. updateSystemErrors UpdateItem operation requests that generate an HTTP 500 status code. updateSuccessfulRequestLatency Elapsed time for successful Update operation requests, in milliseconds. updateThrottledRequests Update operation requests that exceed the provisioned throughput limits" - }, - "id": "617da67364441f667bfbc6ce" - }, { "sections": [ "Auto-telemetry with Pixie data and security", @@ -66051,7 +65943,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 212.26524, + "_score": 210.8713, "_version": null, "_explanation": null, "sort": null, @@ -66092,7 +65984,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 207.18161, + "_score": 206.30255, "_version": null, "_explanation": null, "sort": null, @@ -66104,6 +65996,53 @@ }, "id": "6174ca7364441f0e385fdea5" }, + { + "sections": [ + "Amazon DynamoDB monitoring integration", + "Important", + "Features", + "Activate integration", + "Configuration and polling", + "View and use data", + "Metric data", + "Tip", + "DynamoDbTable", + "DynamoDbRegion", + "DynamoDbGlobalSecondaryIndex", + "Inventory data", + "EOL NOTICE", + "/limits", + "/table", + "/table/provisionedThroughput", + "/table/dynamodb/table" + ], + "title": "Amazon DynamoDB monitoring integration", + "type": "docs", + "tags": [ + "AWS integrations list", + "Amazon integrations", + "Integrations" + ], + "external_id": "de6e40a969a6c90c8d017023020a6bb711298c93", + "image": "", + "url": "https://docs.newrelic.com/docs/infrastructure/amazon-integrations/aws-integrations-list/aws-dynamodb-monitoring-integration/", + "published_at": "2022-02-14T09:17:54Z", + "updated_at": "2022-02-14T09:17:54Z", + "document_type": "page", + "popularity": 1, + "body": "Important Enable the AWS CloudWatch Metric Streams integration to monitor all CloudWatch metrics from your AWS services, including custom namespaces. Individual integrations are no longer our recommended option. New Relic infrastructure integrations include an integration for reporting Amazon DynamoDB data to New Relic. This document explains how to activate this integration and describes the data that can be reported. Features Amazon DynamoDB is a fully managed NoSQL cloud database that supports both document and key-value store models. With the New Relic DynamoDB Integration, you can quickly understand how request latency or errors are affecting your environment. You'll receive metrics about how the database is performing, as well as inventory data like configuration settings, service status, and host metadata. Activate integration To enable this integration follow standard procedures to Connect AWS services to New Relic. Configuration and polling You can change the polling frequency and filter data using configuration options. Default polling information for the Amazon DynamoDB integration: New Relic polling interval: 5 minutes Amazon CloudWatch data interval: 1 minute or 5 minutes, depending on statistic View and use data To view and use your integration data, go to one.newrelic.com > Infrastructure > AWS and select one of the Amazon DynamodDB integration links. You can query and explore your data using the DatastoreSample event type, with a provider value of DynamoDbTable for DynamoDB tables, DynamoDbRegion for regions, or DynamoDbGlobalSecondaryIndex for DynamoDB global secondary indexes. Metric data The New Relic Amazon DynamoDB integration collects the following metric data: Tip Data indicated with an asterisk * is only fetched if extended inventory collection is on. DynamoDbTable Metric Description batchGetSuccessfulRequestLatency Elapsed time for successful BatchGetItem operation requests, in milliseconds. batchGetThrottledRequests BatchGetItem operation requests that exceed the provisioned throughput limits on a table. batchWriteSuccessfulRequestLatency Elapsed time for successful BatchWriteItem operation requests, in milliseconds. batchWriteThrottledRequests BatchWriteItem operation requests that exceed the provisioned throughput limits on a table. conditionalCheckFailedRequests The number of failed attempts to perform conditional writes. consumedReadCapacityUnits The number of read capacity units consumed over the specified time period, to track how much of provisioned throughput is used. consumedWriteCapacityUnits The number of write capacity units consumed over the specified time period, to track how much of provisioned throughput is used. deleteSuccessfulRequestLatency Elapsed time for successful Delete operation requests, in milliseconds. deleteThrottledRequests Delete operation requests that exceed the provisioned throughput limits on a table. getSuccessfulRequestLatency Elapsed time for successful Get operation requests, in milliseconds. getThrottledRequests Get operation requests that exceed the provisioned throughput limits on a table. pendingReplicationCount* The number of item updates that are written to one replica table, but that have not yet been written to another replica in the global table. provisionedReadCapacityUnits The number of provisioned read capacity units for a table. provisionedWriteCapacityUnits The number of provisioned write capacity units for a table. putSuccessfulRequestLatency Elapsed time for successful Put operation requests, in milliseconds. putThrottledRequests Put operation requests that exceed the provisioned throughput limits on a table. queryReturnedItemCount The number of items returned by Query operations. querySuccessfulRequestLatency Elapsed time for successful Query operation requests, in milliseconds. queryThrottledRequests Query operation requests that exceed the provisioned throughput limits on a table. readThrottleEvents Requests to DynamoDB that exceed the provisioned read capacity units for a table. replicationLatency* The elapsed time between an updated item appearing in the DynamoDB stream for one replica table, and that item appearing in another replica in the global table, in milliseconds. scanReturnedItemCount The number of items returned by Scan operations. scanSuccessfulRequestLatency Elapsed time for successful Scan operation requests, in milliseconds. scanThrottledRequests Scan operation requests that exceed the provisioned throughput limits on a table. batchGetSystemErrors BatchGetItem operation requests that generate an HTTP 500 status code. batchWriteSystemErrors BatchWriteItem operation requests that generate an HTTP 500 status code. deleteSystemErrors DeleteItem operation requests that generate an HTTP 500 status code. getSystemErrors GetItem operation requests that generate an HTTP 500 status code. putSystemErrors PutItem operation requests that generate an HTTP 500 status code. querySystemErrors Query operation requests that generate an HTTP 500 status code. scanSystemErrors Scan operation requests that generate an HTTP 500 status code. updateSystemErrors UpdateItem operation requests that generate an HTTP 500 status code. updateSuccessfulRequestLatency Elapsed time for successful Update operation requests, in milliseconds. updateThrottledRequests Update operation requests that exceed the provisioned throughput limits on a table. userErrors Requests that generate an HTTP 400 status code. writeThrottleEvents Requests to DynamoDB that exceed the provisioned write capacity units for a table. DynamoDbRegion This sample limits the data to a particular AWS region. It is used with metrics originating from replica tables within a DynamoDB global table. Metric Description systemErrors Requests that generate an HTTP 500 status code. userErrors Requests that generate an HTTP 400 status code DynamoDbGlobalSecondaryIndex Metric Description consumedReadCapacityUnits The number of read capacity units consumed over the specified time period, to track how much of provisioned throughput is used. consumedWriteCapacityUnits The number of write capacity units consumed over the specified time period, to track how much of provisioned throughput is used. onlineIndexConsumedWriteCapacity The number of write capacity units consumed when adding a new global secondary index to a table. onlineIndexPercentageProgress The percentage of completion when a new global secondary index is being added to a table. onlineIndexThrottleEvents The number of write throttle events that occur when adding a new global secondary index to a table. provisionedReadCapacityUnits The number of provisioned read capacity units for a global secondary index. provisionedWriteCapacityUnits The number of provisioned write capacity units for a table. readThrottleEvents Requests to DynamoDB that exceed the provisioned read capacity units for a table. writeThrottleEvents Requests to DynamoDB that exceed the provisioned write capacity units for a table. Inventory data EOL NOTICE After March 2022, we're discontinuing support for several capabilities, including inventory data for cloud integrations. For more details, including how you can easily prepare for this transition, see our Explorers Hub post. Inventory data provides information about a service's state and configuration, and available host information. /limits Name Description accountMaxReadCapacity The maximum total read capacity units that your account allows you to provision across all of your tables in this region. accountMaxWriteCapacity The maximum total write capacity units that your account allows you to provision across all of your tables in this region. awsRegion The AWS region that the table was provisioned in. tableMaxReadCapacity The maximum read capacity units allowed by your account to provision a new table created in this region, including the read capacity units provisioned for its Global Secondary Indexes (GSIs). tableMaxWriteCapacity The maximum write capacity units allowed by your account to provision a new table created in this region, including the write capacity units provisioned for its Global Secondary Indexes (GSIs). /table Name Description awsRegion The AWS region that the table was provisioned in. tableStatus The state of the table: creating: The table is being created. updating: The table is being updated. deleting: The table is being deleted. active: The table is ready for use. /table/provisionedThroughput Name Description numberOfDecreasesToday The number of provisioned throughput decreases for this table during this UTC calendar day. For current maximums on provisioned throughput decreases, see Limits in DynamoDB. readCapacityUnits The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException. Eventually, consistent reads require less effort than strongly consistent reads, so a setting of 50 ReadCapacityUnits per second provides 100 eventually consistent ReadCapacityUnits per second. writeCapacityUnits The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException. /table/dynamodb/table Tip Data indicated with an asterisk * is only fetched if extended inventory collection is on. Name Description tableName The name of the table. creationDate The date and time when the table was created in UNIX epoch time. tableStatus The current state of the table: CREATING - The table is being created. UPDATING - The table is being updated. DELETING - The table is being deleted. ACTIVE - The table is ready for use. keySchema The complete index key schema. attributeDefinition The list of AttributeDefinition objects. Each of these objects describes one attribute in the table and index key schema. Each AttributeDefinition object in this list is composed of: AttributeName - The name of the attribute. AttributeType - The data type for the attribute. provisionedThroughput The provisioned throughput settings for the table, consisting of read and write capacity units, along with data about increases and decreases. streamSpecification The current DynamoDB Streams configuration for the table. globalSecondaryIndexes The global secondary indexes, if any, on the table. creationDateTime* The date and time when the global table was created. in UNIX epoch time. globalTableName* The global table name. globalTableStatus* The current state of the global table: CREATING - The global table is being created. UPDATING - The global table is being updated. DELETING - The global table is being deleted. ACTIVE - The global table is ready for use. replicationGroup* The regions where the global table has replicas.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 201.59506, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "body": " status code. updateSystemErrors UpdateItem operation requests that generate an HTTP 500 status code. updateSuccessfulRequestLatency Elapsed time for successful Update operation requests, in milliseconds. updateThrottledRequests Update operation requests that exceed the provisioned throughput limits" + }, + "id": "617da67364441f667bfbc6ce" + }, { "sections": [ "Amazon RDS monitoring integration", @@ -66141,7 +66080,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 174.7107, + "_score": 164.25568, "_version": null, "_explanation": null, "sort": null, @@ -66152,45 +66091,46 @@ }, { "sections": [ - "Azure Cosmos DB (Document DB) monitoring integration", - "Features", - "Activate integration", - "Configuration and polling", - "View and query data", - "Metric data", + "Install Auto-telemetry with Pixie", + "General prerequisites for using Pixie", + "Setup steps depend on your account status", + "Install from the beginning of the guided install process", + "Install from the Configure the HELM command/manifest (yaml) file", "Important", - "Account Data", - "DataBase Data", - "Collection Data", - "Inventory data", - "EOL NOTICE" + "Helm method", + "manifest method", + "If you link the wrong Pixie and New Relic account" ], - "title": "Azure Cosmos DB (Document DB) monitoring integration", + "title": "Install Auto-telemetry with Pixie", "type": "docs", "tags": [ - "Azure integrations list", - "Microsoft Azure integrations", - "Integrations" + "eBPF", + "Kubernetes", + "Service monitoring", + "Pixie Auto-telemetry" ], - "external_id": "e4bb0ee9204d3af8c336f3bccd58052df2451116", + "external_id": "bb957763e579e39ef2bbeafeebc46ab3a111bca2", "image": "", - "url": "https://docs.newrelic.com/docs/infrastructure/microsoft-azure-integrations/azure-integrations-list/azure-cosmos-db-document-db-monitoring-integration/", - "published_at": "2022-02-15T19:28:51Z", - "updated_at": "2022-02-15T19:28:51Z", + "url": "https://docs.newrelic.com/docs/kubernetes-pixie/auto-telemetry-pixie/install-auto-telemetry-pixie/", + "published_at": "2022-02-14T04:06:38Z", + "updated_at": "2022-01-17T02:28:08Z", "document_type": "page", "popularity": 1, - "body": "New Relic infrastructure monitoring provides an integration for Microsoft Azure's Cosmos DB service that reports your Cosmos DB metrics and other data to New Relic. This document explains how to activate the Cosmos DB integration and describes the data that can be captured. Features New Relic gathers both database data and collection billing data from your Azure Cosmos DB service. You can monitor and alert on your Azure Cosmos DB data from New Relic, and you can create custom queries and custom chart dashboards. Activate integration To enable this integration follow standard procedures to activate your Azure service in New Relic. The Cosmos DB integration requires you to create an additional role and permission to fetch database and collection data: Go to the Azure Portal and open a shell by selecting the terminal icon. Add the following command: az role definition create --role-definition '{ \"Name\": \"NewRelic Integrations\", \"Actions\": [ \"*/read\", \"Microsoft.DocumentDB/databaseAccounts/listKeys/action\" ], \"NotActions\": [], \"AssignableScopes\": [ \"/subscriptions/YOUR_INSERT_SUBSCRIPTION_ID\" ], \"Description\": \"Read Only for NewRelic Integrations\", \"IsCustom\": \"true\" }' Copy From Services > Subscriptions, select the subscription, go to Access control (IAM), and then select Add. In the Role search box, add the name of the newly created role definition (for example, NewRelic Integrations). In the Select search box, add the name of the New Relic integration application, and select it. Ensure that the application is added to the Selected members list, then Save. Configuration and polling You can change the polling frequency and filter data using configuration options. Default polling information for the Cosmos DB integration: Polling interval: 5 minutes Resolution: 1 minute or 5 minutes, varies by metric. For resolution information on a specific metric, see Microsoft Azure's documentation about support metrics. View and query data To view your integration data, go to one.newrelic.com > Infrastructure > Azure and select the Cosmos DB Integration. You can query and explore your data using the following event types: Entity Event Type Provider Account AzureCosmosDbAccountSample AzureCosmosDbAccount Database AzureCosmosDbDatabaseSample AzureCosmosDbDatabase Collection AzureCosmosDbCollectionSample AzureCosmosDbCollection For more on how to find and use data, see Understand and use integration data. Metric data Important For information on deprecated Cosmos DB events or metrics, see Azure Cosmos DB integration (deprecated). We strongly recommend migrating to the supported events and metrics in this document. To view metrics reported by the Cosmos DB integration, query the Entities below. Use the metadata associated with each metric to filter and facet the data being reported. For detailed metric information, see the Azure supported metrics documentation. Account Data Metric Description Metadata totalRequests Total number of requests. account kind region offerType statusCode resourceGroup metadataRequests Count of metadata requests. account kind region offerType statusCode resourceGroup mongoRequests Count of Mongo requests made. account kind region commandName offerType errorCode resourceGroup mongoRequestCharge Total number of Mongo request units consumed. account kind region commandName offerType errorCode resourceGroup totalRequestUnits Total number of request units consumed. account kind region offerType statusCode resourceGroup provisionedThroughput Throughput provisioned for the database or collection. account offerType kind resourceGroup availableStorageBytes Total available storage, in bytes. account kind offerType region resourceGroup dataUsageBytes Total data usage reported, in bytes. account kind offerType region resourceGroup indexUsageBytes Total index usage reported, in bytes. account kind offerType region resourceGroup documentQuotaBytes Total storage quota reported, in bytes. account kind offerType region resourceGroup documentCount Total document count reported. account kind offerType region resourceGroup ReplicationLatency P99 replication latency across source and target regions for geo-enabled account, in milliseconds. account kind sourceRegion offerType targetRegion resourceGroup ServiceAvailability Account requests availability percentage in hour, day, or month granularity. No specific metadata. cassandraRequests Count of Cassandra requests made. account kind errorCode offerType opperationType region resourceType resourceGroup cassandraRequestCharges Total number of request units consumed for Cassandra requests. account kind errorCode offerType opperationType region resourceType resourceGroup cassandraConnectionClosures Total number of Cassandra connections that were closed. account kind closureReason offerType region resourceGroup DataBase Data Metric Description Metadata totalRequests Total number of requests. account databaseName region statusCode metadataRequests Count of metadata requests. account databaseName region statusCode mongoRequests Count of Mongo requests made. account databaseName region commandName errorCode mongoRequestCharge Total number of Mongo request units consumed. account databaseName region commandName errorCode totalRequestUnits Total number of request units consumed. account databaseName region statusCode provisionedThroughput Throughput provisioned for the database or collection. account databaseName availableStorageBytes Total available storage, in bytes. account databaseName region dataUsageBytes Total data usage reported, in bytes. account databaseName region indexUsageBytes Total index usage reported, in bytes. account databaseName region documentQuotaBytes Total storage quota reported, in bytes. account databaseName region documentCount Total document count reported. account databaseName region replicationLatencyMilliseconds P99 replication latency across source and target regions for geo-enabled account, in milliseconds. account sourceRegion targetRegion serviceAvailabilityPercent Account requests availability percentage in hour, day, or month granularity. No specific metadata. cassandraRequests Count of Cassandra requests made. account databaseName errorCode opperationType region resourceType cassandraRequestCharges Total number of request units consumed for Cassandra requests. account databaseName errorCode opperationType region resourceType cassandraConnectionClosures Total number of Cassandra connections that were closed. account closureReason region DataBase Data Metric Description Metadata totalRequests Total number of requests. account databaseName region statusCode metadataRequests Count of metadata requests. account databaseName region statusCode mongoRequests Count of Mongo requests made. account databaseName region commandName errorCode mongoRequestCharge Total number of Mongo request units consumed. account databaseName region commandName errorCode totalRequestUnits Total number of request units consumed. account databaseName region statusCode provisionedThroughput Throughput provisioned for the database or collection. account databaseName availableStorageBytes Total available storage, in bytes. account databaseName region dataUsageBytes Total data usage reported, in bytes. account databaseName region indexUsageBytes Total index usage reported, in bytes. account databaseName region documentQuotaBytes Total storage quota reported, in bytes. account databaseName region documentCount Total document count reported. account databaseName region replicationLatencyMilliseconds P99 replication latency across source and target regions for geo-enabled account, in milliseconds. account sourceRegion targetRegion serviceAvailabilityPercent Account requests availability percentage in hour, day, or month granularity. No specific metadata. cassandraRequests Count of Cassandra requests made. account databaseName errorCode opperationType region resourceType cassandraRequestCharges Total number of request units consumed for Cassandra requests. account databaseName errorCode opperationType region resourceType cassandraConnectionClosures Total number of Cassandra connections that were closed. account closureReason region Collection Data Metric Description Metadata totalRequests Total number of requests. account collectionName database region statusCode metadataRequests Count of metadata requests. account collectionName database region statusCode mongoRequests Count of Mongo requests made. account collectionName database region commandName errorCode mongoRequestCharge Total number of Mongo request units consumed. account collectionName database region commandName errorCode totalRequestUnits Total number of request units consumed. account collectionName database region statusCode provisionedThroughput Throughput provisioned for the database or collection. account collectionName database availableStorageBytes Total available storage, in bytes. account collectionName database region dataUsageBytes Total data usage reported, in bytes. account collectionName database region indexUsageBytes Total index usage reported, in bytes. account collectionName database region documentQuotaBytes Total storage quota reported, in bytes. account collectionName database region documentCount Total document count reported. account collectionName database region replicationLatencyMilliseconds P99 replication latency across source and target regions for geo-enabled account, in milliseconds. account collectionName sourceRegion targetRegion serviceAvailabilityPercent Account requests availability percentage in hour, day, or month granularity. No specific metadata. cassandraRequests Count of Cassandra requests made. account collectionName database errorCode opperationType region resourceType cassandraRequestCharges Total number of request units consumed for Cassandra requests. account collectionName database errorCode opperationType region resourceType cassandraConnectionClosures Total number of Cassandra connections that were closed. account collectionName closureReason region Inventory data EOL NOTICE After March 2022, we're discontinuing support for several capabilities, including inventory data for cloud integrations. For more details, including how you can easily prepare for this transition, see our Explorers Hub post. Inventory data is information about your system's state and configuration. For details on how to find and use inventory data, see Understand and use data. The Cosmos DB integration reports the inventory data for the entity type azure/cosmosdb/account/ using the following metadata: documentEndpoint: The document end point. databaseAccountOfferType: The database account offer type. consistencyPolicy: The consistency policy for the Cosmos DB database account. defaultConsistencyLevel: The default consistency level for the Cosmos DB database account. kind: The type of database account set at database account creation. resourceGroupName: The Azure resource group name that the Cosmos DB database account belong to. regionName: The region name in which the Azure DocumentDB database account is deployed. type: The azure resource type, which is Microsoft.DocumentDB/databaseAccounts.", + "body": "To get up and running with Auto-telemetry with Pixie, you start with our guided installation. The guided installation deploys Pixie with New Relic's Kubernetes integration on your cluster. You don't need to do any further configuration or installation to start using Pixie. If you want to install Auto-telemetry with Pixie on multiple clusters, re-run the guided install for each additional cluster. General prerequisites for using Pixie Ready to get started? If you don't already have one, sign up for a New Relic account. It's free, forever! Requirements: You must be a full platform user. Other user-related requirements: Users on our New Relic One user model must be assigned to a group that has a role with Pixie-related capabilities. Users on our original user model cannot be Restricted. In addition: Review this Pixie data security overview for actions to take to secure your data. Make sure you have sufficient memory. Pixie requires at least 8Gb of memory on each node in your cluster. More memory may be required for larger clusters. Pixie needs to run in privileged mode. Review the other Pixie technical requirements. Setup steps depend on your account status Use the following table to find out where to start installing Auto-telemetry with Pixie. Where you start the installation depends on whether you already have a New Relic or Pixie account, or both. New Relic Pixie Next steps Start the guided install at the beginning of the process. If you already have both types of accounts, and used the same email address for each of them, click the New Relic icon in the Pixie UI. This brings you to the Configure the HELM command/manifest (yaml) file section of the guided installation. Then, follow the steps. If you're using different email addresses in Pixie and New Relic, create a new account for either Pixie or New Relic to match email addresses across both products. You can also contact New Relic support to manually link your existing New Relic account with your Pixie account. If you follow a link to New Relic from the Pixie UI and do not have a New Relic account, you must first create one. Click the New Relic icon in the Pixie UI, and follow the steps to create a New Relic account. When you do so, your Pixie account is linked to it. Then, continue the guided install process with these steps. Sign up for a free New Relic account. Then, start the guided install at the beginning of the process. Install from the beginning of the guided install process Open our New Relic One guided install. Select the account you want to use for the guided install, and click Continue. Note: if you have a single account, you won't see this option. Select Kubernetes and then continue with step one in the next section. Install from the Configure the HELM command/manifest (yaml) file If you arrived in the guided installation process by following a link from Pixie or from within New Relic, your steps begin here. Select the account and cluster for the install. If needed, select a namespace. Important Currently, Pixie performs best on clusters with up to 100 nodes (exceeding 100 nodes can lead to excessive memory usage and scripts failing to run). Friendly reminder: autoscaling can quickly drive up your node numbers. Click Continue. Select the data you want to gather, observe, and debug, and click Continue. On the Choose install method page, select either Helm or manifest. Helm method Run the provided Helm command on your command line. If you're concerned about the amount of Pixie data you'll ingest, check out strategies for reducing ingest. Helm installs a bundle containing the New Relic infrastructure agent, an integration to gather Prometheus metrics and Kubernetes events, and the Pixie integration. The deployment takes a few minutes to complete. To see the status of the install to the cluster, run kubectl get pods -n newrelic. For general information about installing a Kubernetes integration, see this Helm install info. manifest method Run the provided command in your console, and insert the path to your downloaded manifest. If you're running your Kubernetes cluster in the cloud, see the additional steps in the Kubernetes docs. Click Continue to open the Listening for data page. When you get the message, See your data, click Kubernetes Cluster Explorer to see your cluster. Auto-telemetry with Pixie might restart after installation. This is caused by the auto update feature. If you link the wrong Pixie and New Relic account Contact support to unlink a Pixie account from your New Relic account. Be aware that if you unlink a Pixie account that was created automatically through the guided install, you'll lose access to that Pixie account.", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 162.15854, + "_score": 155.45703, "_version": null, "_explanation": null, "sort": null, "highlight": { - "body": " consumed. account kind region commandName offerType errorCode resourceGroup totalRequestUnits Total number of request units consumed. account kind region offerType statusCode resourceGroup provisionedThroughput Throughput provisioned for the database or collection. account offerType kind resourceGroup" + "title": "Install Auto-telemetry with Pixie", + "sections": "Install Auto-telemetry with Pixie", + "tags": "Kubernetes", + "body": "To get up and running with Auto-telemetry with Pixie, you start with our guided installation. The guided installation deploys Pixie with New Relic's Kubernetes integration on your cluster. You don't need to do any further configuration or installation to start using Pixie. If you want to install" }, - "id": "617dc763e7b9d2d3dac0580e" + "id": "6174ca14e7b9d26ba513cd12" } ], "/mlops-algorithmia/d0e829a6-ede4-4933-9065-9f0c56f8aa7e": [ @@ -66219,7 +66159,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 427.12885, + "_score": 422.59216, "_version": null, "_explanation": null, "sort": null, @@ -66257,7 +66197,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 320.1549, + "_score": 315.90213, "_version": null, "_explanation": null, "sort": null, @@ -66292,7 +66232,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 300.8895, + "_score": 296.88373, "_version": null, "_explanation": null, "sort": null, @@ -66328,7 +66268,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 285.24393, + "_score": 282.36722, "_version": null, "_explanation": null, "sort": null, @@ -66363,7 +66303,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 275.34442, + "_score": 274.79767, "_version": null, "_explanation": null, "sort": null, @@ -66408,7 +66348,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99103, + "_score": 95.21301, "_version": null, "_explanation": null, "sort": null, @@ -66450,7 +66390,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99097, + "_score": 95.21295, "_version": null, "_explanation": null, "sort": null, @@ -66492,7 +66432,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99097, + "_score": 95.21295, "_version": null, "_explanation": null, "sort": null, @@ -66534,7 +66474,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99081, + "_score": 95.2128, "_version": null, "_explanation": null, "sort": null, @@ -66573,7 +66513,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 93.49031, + "_score": 93.73055, "_version": null, "_explanation": null, "sort": null, @@ -66617,7 +66557,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.33618, + "_score": 81.56581, "_version": null, "_explanation": null, "sort": null, @@ -66659,7 +66599,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.276306, + "_score": 81.50674, "_version": null, "_explanation": null, "sort": null, @@ -66701,7 +66641,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -66743,7 +66683,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -66785,7 +66725,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -66826,7 +66766,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 390.81378, + "_score": 369.4848, "_version": null, "_explanation": null, "sort": null, @@ -66883,7 +66823,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 354.34232, + "_score": 335.77515, "_version": null, "_explanation": null, "sort": null, @@ -66924,7 +66864,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 260.3659, + "_score": 244.06186, "_version": null, "_explanation": null, "sort": null, @@ -66935,44 +66875,6 @@ }, "id": "603e967664441f7e6f4e889b" }, - { - "sections": [ - "Introduction to notifications", - "Destinations", - "Message templates" - ], - "title": "Introduction to notifications", - "type": "docs", - "tags": [ - "Error Inbox", - "Workflows", - "Notifications", - "Applied intelligence", - "Alerts and Applied Intelligence" - ], - "external_id": "f623ca3e096307c13e6e9214cc2fa6908707d101", - "image": "", - "url": "https://docs.newrelic.com/docs/alerts-applied-intelligence/notifications/intro-notifications/", - "published_at": "2022-02-14T04:51:20Z", - "updated_at": "2022-02-14T04:51:20Z", - "document_type": "page", - "popularity": 1, - "body": "Notifications are a consolidation of the different ways to send notification-events to third-party services, such as Slack, Jira, ServiceNow, and email. You can also use webhooks to send your data to any compatible third-party service. Integrate with your systems by configuring destinations and message templates. Destinations Destinations are unique identifiers and connection details for third-party systems. Use destinations to integrate, send notifications and share data between New Relic and your third-party systems. Message templates Configure the eventual notification events and map New Relic One data to your third-party services using message-templates.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 219.72678, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "title": "Introduction to notifications", - "sections": "Introduction to notifications", - "tags": "Notifications", - "body": "Notifications are a consolidation of the different ways to send notification-events to third-party services, such as Slack, Jira, ServiceNow, and email. You can also use webhooks to send your data to any compatible third-party service. Integrate with your systems by configuring destinations" - }, - "id": "6190270f64441f165fe9d12b" - }, { "sections": [ "Get started with Incident Intelligence", @@ -67023,7 +66925,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 217.71198, + "_score": 216.50174, "_version": null, "_explanation": null, "sort": null, @@ -67034,6 +66936,44 @@ "body": " copied from the Applied Intelligence Sources. Grafana You can integrate Incident Intelligence with Grafana's notifications for insight into events across your applications and environment. Grafana's webhook notification is a simple way to send information over HTTP to a custom endpoint. To integrate" }, "id": "603ea62e64441f119f4e883f" + }, + { + "sections": [ + "Introduction to notifications", + "Destinations", + "Message templates" + ], + "title": "Introduction to notifications", + "type": "docs", + "tags": [ + "Error Inbox", + "Workflows", + "Notifications", + "Applied intelligence", + "Alerts and Applied Intelligence" + ], + "external_id": "f623ca3e096307c13e6e9214cc2fa6908707d101", + "image": "", + "url": "https://docs.newrelic.com/docs/alerts-applied-intelligence/notifications/intro-notifications/", + "published_at": "2022-02-14T04:51:20Z", + "updated_at": "2022-02-14T04:51:20Z", + "document_type": "page", + "popularity": 1, + "body": "Notifications are a consolidation of the different ways to send notification-events to third-party services, such as Slack, Jira, ServiceNow, and email. You can also use webhooks to send your data to any compatible third-party service. Integrate with your systems by configuring destinations and message templates. Destinations Destinations are unique identifiers and connection details for third-party systems. Use destinations to integrate, send notifications and share data between New Relic and your third-party systems. Message templates Configure the eventual notification events and map New Relic One data to your third-party services using message-templates.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 207.59659, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "title": "Introduction to notifications", + "sections": "Introduction to notifications", + "tags": "Notifications", + "body": "Notifications are a consolidation of the different ways to send notification-events to third-party services, such as Slack, Jira, ServiceNow, and email. You can also use webhooks to send your data to any compatible third-party service. Integrate with your systems by configuring destinations" + }, + "id": "6190270f64441f165fe9d12b" } ], "/perfmon/db769025-d3a7-437b-896d-64f5a7a26cac": [ @@ -67067,7 +67007,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 110.92073, + "_score": 106.673164, "_version": null, "_explanation": null, "sort": null, @@ -67079,49 +67019,6 @@ }, "id": "6043fb59e7b9d272d9579a16" }, - { - "image": "https://docs.newrelic.com/static/d502377e9bc426dc5855e4fbaad0ae39/c1b63/nr1-ohi-windows-services-metric.png", - "url": "https://docs.newrelic.com/docs/infrastructure/host-integrations/host-integrations-list/windows-services-integration/", - "sections": [ - "Windows services integration", - "BETA FEATURE", - "Why it matters", - "Compatibility and requirements", - "Install and activate", - "Update your integration", - "Configure the integration", - "winservices-config.yml", - "Set up an alert", - "Service is not running", - "Start mode is auto, but current state is not running", - "How it works", - "Metric data", - "Metadata", - "Important", - "Source code" - ], - "published_at": "2022-02-14T10:47:10Z", - "title": "Windows services integration", - "updated_at": "2022-02-14T10:47:10Z", - "type": "docs", - "external_id": "8f6cc2f5ec4c751cef616f9724fc7ae6bf2b4969", - "document_type": "page", - "popularity": 1, - "body": "BETA FEATURE This feature is currently in open beta and only applies to the versions starting on 0.3.0 released in September 2021. Check our Explorers Hub post for more details. New Relic's Windows services integration collects data about the services running on your Microsoft Windows hosts and sends it to our platform. You can check the state and start mode of each service, find out which hosts are running a service, set up alerts for services, and more. Our integration is bundled with the Windows infrastructure agent. If you're monitoring Windows hosts on New Relic, you only need to enable the integration to get Windows services data into our platform. Why it matters Microsoft Windows services are the Windows equivalent of daemons in Unix and Linux: long-running executables that can be started, stopped, paused, and restarted without a graphical interface. Knowing which services are enabled, or their state or health, is essential when monitoring Windows infrastructure. With our Windows services integration you can: Check all your Windows services, their state (running, stopped, etc.), start mode (auto, manual, etc.), and other service metadata. See which hosts are running specific Windows services. Set alerts based on any Windows services that you are monitoring using New Relic. Create dashboards to track your Windows services. one.newrelic.com > Dashboards: You can use the Microsoft Windows services integration metrics to create tables for your services. Compatibility and requirements Our integration is compatible with all Windows versions supported by the New Relic agent. You must have version 1.12.1 or higher for our infrastructure monitoring agent installed on a supported host. The x86 Windows versions are not yet supported. Install and activate To install the Windows services integration: Change the directory to the integrations folder: cd C:\\Program Files\\New Relic\\newrelic-infra\\integrations.d\\ Copy Copy of the sample configuration file: copy winservices-config.yml.sample winservices-config.yml Copy Edit the winservices-config.yml file as described in the configuration settings and save it. Update your integration The Windows services integration is bundled with the infrastructure monitoring agent. To update the integration, update the agent to the latest version. Configure the integration To enable the integration, edit and save the winservices-config.yml: Uncomment exporter_bind_address: and exporter_bind_port: Add the name of the services you want to monitor to include_matching_entities: By default, no service is included. To include and filter services, you must edit include_matching_entities:. Also, the configuration option inventory_source is not compatible with the integration. Here's an example of the Windows services integration configuration: winservices-config.yml integrations: - name: nri-winservices config: # IP address and port used by the Prometheus exporter to bind the server. # # exporter_bind_address: 127.0.0.1 # exporter_bind_port: 9182 # To include services, create a list of filters to be applied to the service names. # Services that find a match with any of the matching lists are included. By default, # no service is included. # # Currently, only windowsService.name metadata is supported for filtering. # Prepend \"regex\" to indicate that the pattern is a regular expression. # include_matching_entities: windowsService.name: # - regex \".*\" # - \"newrelic-infra\" # Time between consecutive metric collection of the integration. # It must be a number followed by a time unit (s, m or h), without spaces. # scrape_interval: 30s # Timeout used by the agent to restart the integration if no heartbeats are # sent from the integration. Heartbeats are sent every 5s, so this timeout # shouldn't be less than that. # timeout: 60s # Since this is a long-running integration, interval is ignored. To # configure the interval period for collecting and sending data, edit # the scrape_interval parameter. # # interval: Copy For more information, see our documentation about the general structure of on-host integration configurations. Set up an alert You can create Windows services alerts using NRQL conditions to get notified about state changes of the services you wish to monitor. Here are two examples of alert conditions that use Windows services data: Service is not running SELECT count(*) FROM Metric WHERE metricName = 'windows_service_state' AND state != 'running' AND service_name = 'lfsvc' FACET entity.name Copy Start mode is auto, but current state is not running FROM Metric SELECT count(*) WHERE start_mode='auto' AND state!='running' FACET service_name Copy How it works To get data from Windows hosts, our Windows services integration uses a reduced version of the Prometheus exporter for Windows, which exposes Prometheus metrics on the port specified in the agent configuration. The integration collects these metrics, transforms them into entities, filters them, and then sends them to New Relic. The Windows services integration collects Service Functions data using the Windows Prometheus exporter. It then transforms and filters the data before sending it to New Relic. Metric data The Windows services integration provides the following data: Metric Description windows_service_start_mode Start mode of the service. Possible values are: boot system auto manual disabled windows_service_state State of the service. Possible values are: stopped start pending stop pending running continue pending pause pending paused unknown If you migrate from the old beta, the metric names have changed. The differences are: Old metric New metric windowsService.service.startMode windows_service_start_mode windowsService.service.state windows_service_state windowsService.service.status has been removed Metadata The Windows services integration sends the following metadata to New Relic: display_name: Name of the service as viewed in the services snap-in. service_name: Unique identifier of the service. process_id: Process identifier of the service. run_as: Account name under which a service runs. Depending on the service type, the format of the account name may be DomainName\\Username or Username@DomainName (UPN). The value is taken from the StartName attribute of the Win32_Service class, which can be NULL (in that case, the label is reported as an empty string). Important If the StartName attribute is NULL, the service is logged on under the LocalSystem account. For kernel or system-level drive, it runs with a default object name created by the I/O system based on the service name, for example, DWDOM\\Admin. If you migrate from the old beta, the metadata names have changed. The differences are: Old metadata New metadata windowsService.displayName display_name windowsService.entityName entity.name windowsService.hostname hostname windowsService.name service_name windowsService.processId process_id windowsService.runAs run_as Source code The Windows services integration is open source software. That means you can browse its source code and send improvements, or create your own fork and build it. For more information, see the README.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 103.45565, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "title": "Windows services integration", - "sections": "Windows services integration", - "body": " and sends it to our platform. You can check the state and start mode of each service, find out which hosts are running a service, set up alerts for services, and more. Our integration is bundled with the Windows infrastructure agent. If you're monitoring Windows hosts on New Relic, you only need to enable" - }, - "id": "6174af57196a678f722f1316" - }, { "sections": [ "Start, stop, and restart the infrastructure agent", @@ -67153,7 +67050,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 101.22528, + "_score": 99.48357, "_version": null, "_explanation": null, "sort": null, @@ -67200,7 +67097,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 99.415085, + "_score": 97.70507, "_version": null, "_explanation": null, "sort": null, @@ -67213,42 +67110,94 @@ "id": "60440e8fe7b9d252a2579a19" }, { + "image": "https://docs.newrelic.com/static/d502377e9bc426dc5855e4fbaad0ae39/c1b63/nr1-ohi-windows-services-metric.png", + "url": "https://docs.newrelic.com/docs/infrastructure/host-integrations/host-integrations-list/windows-services-integration/", "sections": [ - "AWS FSx monitoring integration", - "Important", - "Activate the integration", - "Configuration and polling", - "Find and use data", + "Windows services integration", + "BETA FEATURE", + "Why it matters", + "Compatibility and requirements", + "Install and activate", + "Update your integration", + "Configure the integration", + "winservices-config.yml", + "Set up an alert", + "Service is not running", + "Start mode is auto, but current state is not running", + "How it works", "Metric data", - "FSx WindowsFileServer data" + "Metadata", + "Important", + "Source code" ], - "title": "AWS FSx monitoring integration", + "published_at": "2022-02-14T10:47:10Z", + "title": "Windows services integration", + "updated_at": "2022-02-14T10:47:10Z", "type": "docs", - "tags": [ - "AWS integrations list", - "Amazon integrations", - "Integrations" + "external_id": "8f6cc2f5ec4c751cef616f9724fc7ae6bf2b4969", + "document_type": "page", + "popularity": 1, + "body": "BETA FEATURE This feature is currently in open beta and only applies to the versions starting on 0.3.0 released in September 2021. Check our Explorers Hub post for more details. New Relic's Windows services integration collects data about the services running on your Microsoft Windows hosts and sends it to our platform. You can check the state and start mode of each service, find out which hosts are running a service, set up alerts for services, and more. Our integration is bundled with the Windows infrastructure agent. If you're monitoring Windows hosts on New Relic, you only need to enable the integration to get Windows services data into our platform. Why it matters Microsoft Windows services are the Windows equivalent of daemons in Unix and Linux: long-running executables that can be started, stopped, paused, and restarted without a graphical interface. Knowing which services are enabled, or their state or health, is essential when monitoring Windows infrastructure. With our Windows services integration you can: Check all your Windows services, their state (running, stopped, etc.), start mode (auto, manual, etc.), and other service metadata. See which hosts are running specific Windows services. Set alerts based on any Windows services that you are monitoring using New Relic. Create dashboards to track your Windows services. one.newrelic.com > Dashboards: You can use the Microsoft Windows services integration metrics to create tables for your services. Compatibility and requirements Our integration is compatible with all Windows versions supported by the New Relic agent. You must have version 1.12.1 or higher for our infrastructure monitoring agent installed on a supported host. The x86 Windows versions are not yet supported. Install and activate To install the Windows services integration: Change the directory to the integrations folder: cd C:\\Program Files\\New Relic\\newrelic-infra\\integrations.d\\ Copy Copy of the sample configuration file: copy winservices-config.yml.sample winservices-config.yml Copy Edit the winservices-config.yml file as described in the configuration settings and save it. Update your integration The Windows services integration is bundled with the infrastructure monitoring agent. To update the integration, update the agent to the latest version. Configure the integration To enable the integration, edit and save the winservices-config.yml: Uncomment exporter_bind_address: and exporter_bind_port: Add the name of the services you want to monitor to include_matching_entities: By default, no service is included. To include and filter services, you must edit include_matching_entities:. Also, the configuration option inventory_source is not compatible with the integration. Here's an example of the Windows services integration configuration: winservices-config.yml integrations: - name: nri-winservices config: # IP address and port used by the Prometheus exporter to bind the server. # # exporter_bind_address: 127.0.0.1 # exporter_bind_port: 9182 # To include services, create a list of filters to be applied to the service names. # Services that find a match with any of the matching lists are included. By default, # no service is included. # # Currently, only windowsService.name metadata is supported for filtering. # Prepend \"regex\" to indicate that the pattern is a regular expression. # include_matching_entities: windowsService.name: # - regex \".*\" # - \"newrelic-infra\" # Time between consecutive metric collection of the integration. # It must be a number followed by a time unit (s, m or h), without spaces. # scrape_interval: 30s # Timeout used by the agent to restart the integration if no heartbeats are # sent from the integration. Heartbeats are sent every 5s, so this timeout # shouldn't be less than that. # timeout: 60s # Since this is a long-running integration, interval is ignored. To # configure the interval period for collecting and sending data, edit # the scrape_interval parameter. # # interval: Copy For more information, see our documentation about the general structure of on-host integration configurations. Set up an alert You can create Windows services alerts using NRQL conditions to get notified about state changes of the services you wish to monitor. Here are two examples of alert conditions that use Windows services data: Service is not running SELECT count(*) FROM Metric WHERE metricName = 'windows_service_state' AND state != 'running' AND service_name = 'lfsvc' FACET entity.name Copy Start mode is auto, but current state is not running FROM Metric SELECT count(*) WHERE start_mode='auto' AND state!='running' FACET service_name Copy How it works To get data from Windows hosts, our Windows services integration uses a reduced version of the Prometheus exporter for Windows, which exposes Prometheus metrics on the port specified in the agent configuration. The integration collects these metrics, transforms them into entities, filters them, and then sends them to New Relic. The Windows services integration collects Service Functions data using the Windows Prometheus exporter. It then transforms and filters the data before sending it to New Relic. Metric data The Windows services integration provides the following data: Metric Description windows_service_start_mode Start mode of the service. Possible values are: boot system auto manual disabled windows_service_state State of the service. Possible values are: stopped start pending stop pending running continue pending pause pending paused unknown If you migrate from the old beta, the metric names have changed. The differences are: Old metric New metric windowsService.service.startMode windows_service_start_mode windowsService.service.state windows_service_state windowsService.service.status has been removed Metadata The Windows services integration sends the following metadata to New Relic: display_name: Name of the service as viewed in the services snap-in. service_name: Unique identifier of the service. process_id: Process identifier of the service. run_as: Account name under which a service runs. Depending on the service type, the format of the account name may be DomainName\\Username or Username@DomainName (UPN). The value is taken from the StartName attribute of the Win32_Service class, which can be NULL (in that case, the label is reported as an empty string). Important If the StartName attribute is NULL, the service is logged on under the LocalSystem account. For kernel or system-level drive, it runs with a default object name created by the I/O system based on the service name, for example, DWDOM\\Admin. If you migrate from the old beta, the metadata names have changed. The differences are: Old metadata New metadata windowsService.displayName display_name windowsService.entityName entity.name windowsService.hostname hostname windowsService.name service_name windowsService.processId process_id windowsService.runAs run_as Source code The Windows services integration is open source software. That means you can browse its source code and send improvements, or create your own fork and build it. For more information, see the README.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 97.4886, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "title": "Windows services integration", + "sections": "Windows services integration", + "body": " and sends it to our platform. You can check the state and start mode of each service, find out which hosts are running a service, set up alerts for services, and more. Our integration is bundled with the Windows infrastructure agent. If you're monitoring Windows hosts on New Relic, you only need to enable" + }, + "id": "6174af57196a678f722f1316" + }, + { + "sections": [ + "No data appears (Infrastructure)", + "Problem", + "Solution", + "Important", + "Missing infrastructure data", + "Verify install for apt (Debian or Ubuntu)", + "Verify install for yum (Amazon Linux, CentOS, or RHEL)", + "Verify install for Windows Server", + "Verify status with SystemD", + "Verify status with System V", + "Verify status with Upstart", + "Verify status with Windows", + "Missing integration data", + "Other factors affecting access" + ], + "title": "No data appears (Infrastructure)", + "type": "docs", + "tags": [ + "Troubleshoot infrastructure", + "Infrastructure monitoring troubleshooting", + "Infrastructure" ], - "external_id": "4c028bf4156269ee61e00e004c7184cf4c5d238c", + "external_id": "fd618376814a1ec7b486c00e524b0203bbfa0e09", "image": "", - "url": "https://docs.newrelic.com/docs/infrastructure/amazon-integrations/aws-integrations-list/aws-fsx-monitoring-integration/", - "published_at": "2022-02-14T11:37:47Z", - "updated_at": "2022-02-14T11:37:47Z", - "document_type": "page", + "url": "https://docs.newrelic.com/docs/infrastructure/infrastructure-troubleshooting/troubleshoot-infrastructure/no-data-appears-infrastructure/", + "published_at": "2022-02-14T09:53:35Z", + "updated_at": "2022-01-27T12:38:09Z", + "document_type": "troubleshooting_doc", "popularity": 1, - "body": "Important Enable the AWS CloudWatch Metric Streams integration to monitor all CloudWatch metrics from your AWS services, including custom namespaces. Individual integrations are no longer our recommended option. We offer a cloud integration for reporting your AWS FSx data to our platform. Here we explain how to activate the integration and what data it collects. Activate the integration To enable this integration follow standard procedures to Connect AWS services. Configuration and polling You can change the polling frequency and filter data using configuration options. Default polling information for the AWS FSx integration: New Relic polling interval: 5 minutes Amazon CloudWatch data interval: 1 minute Find and use data To find your integration data, go to one.newrelic.com > Infrastructure > AWS and select the integration. Data is attached to the following event types: Entity Event Type Provider WindowsFileServer AwsFsxWindowsFileServerSample AwsFsxWindowsFileServer For more on how to use your data, see Understand and use integration data. Metric data This integration collects AWS FSx data for WindowsFileServer. FSx WindowsFileServer data Metric Unit Description dataReadBytes Bytes The number of bytes for file system read operations dataWriteBytes Bytes The number of bytes for file system write operations. dataReadOperations Count The number of read operations dataWriteOperations Count The number of write operations metadataOperations Count The number of metadata operations freeStorageCapacity Bytes The amount of available storage capacity", + "body": "Problem You installed the infrastructure agent and waited a few minutes, but no data appears in the infrastructure UI. Solution Data should appear in the Infrastructure monitoring UI within a few minutes for accounts with previously installed agents. Important For accounts installing the infrastructure agent for the first time, the latency for data appearing in the Infrastructure monitoring UI can be tens of minutes. If the following steps verify the installation and no obvious error conditions appear in the verbose logs, monitor the Infrastructure UI for a longer period before contacting support.newrelic.com for assistance. Important By default, the infrastructure agent doesn't send data about the operating system's processes. To enable the sending of process data set enable_process_metrics to true. To fine-tune which processes you want to monitor, configure include_matching_metrics. Missing infrastructure data If no data appears in the UI, try the following steps to diagnose the problem: Use your package manager to verify that the infrastructure agent is installed: Verify install for apt (Debian or Ubuntu) Use dpkg to verify that the agent is installed: dpkg -l | grep newrelic-infra Copy If dpkg returns no output, see Install with apt. Verify install for yum (Amazon Linux, CentOS, or RHEL) Use rpm to verify that agent is installed: rpm -qa | grep newrelic-infra Copy If rpm returns no output, see Install with yum. Verify install for Windows Server Use the Windows command prompt or Powershell to verify that the agent directory exists: dir \"C:\\Program Files\\New Relic\\newrelic-infra\" Copy If you receive a File not found error, see Install for Windows Server. Use your init system to verify that the agent is running: Verify status with SystemD Use SystemD commands with CentOS 7, Debian 8, RHEL 7, and Ubuntu 15.04 or higher: Check that the agent is running: sudo systemctl status newrelic-infra Copy If the agent isn't running, start the agent manually: sudo systemctl start newrelic-infra Copy Verify status with System V Use System V commands with Debian 7: Check that the agent is running: sudo /etc/init.d/newrelic-infra status Copy If the agent isn't running, start the agent manually: sudo /etc/init.d/newrelic-infra start Copy Verify status with Upstart Use Upstart commands with Amazon Linux, CentOS 6, RHEL 6, and Ubuntu 14.10 or lower: Check that the agent is running: sudo initctl status newrelic-infra Copy If the agent isn't running, start the agent manually: sudo initctl start newrelic-infra Copy Verify status with Windows Use the Windows command prompt: Check that the agent is running: sc query \"newrelic-infra\" | find \"RUNNING\" Copy If the agent isn't running, start the agent manually with the Windows command prompt: net start newrelic-infra Copy If running net start newrelic-infra returns The service name is invalid, the Infrastructure agent may not have been installed correctly and the service was not properly created. To test this: From Powershell, run the command get-service newrelic-infra, which will return the status of the service. If it returns an error Cannot find any service with service name newrelic-infra, then follow standard procedures to reinstall the agent. Use New Relic Diagnostics to try to automatically identify the issue. Verify that your newrelic-infra.yml configuration file contains a valid license_key setting. Verify that the host has a unique hostname, and verify that the hostname is not localhost. For more information, see this Explorers Hub post. Verify that no firewalls or proxies are blocking outbound connections from the agent process to the Infrastructure domains and ports. Confirm the host is reporting correctly even though it is not appearing in the Infrastructure monitoring UI by creating a basic query in Query builder, like: SELECT * FROM SystemSample SINCE 60 minutes ago LIMIT 100 Copy Use the query results to note the timestamps, which show when the data was reported. To determine when data was first received, look at the earliest timestamp. Generate verbose logs and examine the logs for errors. Missing integration data If you are missing data from an integration, see troubleshooting procedures for: APM data missing from infrastructure monitoring Amazon/AWS integrations On-host integrations Other factors affecting access For more on factors that can affect your ability to access New Relic features, see Factors affecting access.", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 93.20166, + "_score": 91.77702, "_version": null, "_explanation": null, "sort": null, "highlight": { - "sections": "FSx WindowsFileServer data", - "body": " information for the AWS FSx integration: New Relic polling interval: 5 minutes Amazon CloudWatch data interval: 1 minute Find and use data To find your integration data, go to one.newrelic.com > Infrastructure > AWS and select the integration. Data is attached to the following event types: Entity Event" + "title": "No data appears (Infrastructure)", + "sections": "No data appears (Infrastructure)", + "tags": "Troubleshoot infrastructure", + "body": " running, start the agent manually with the Windows command prompt: net start newrelic-infra Copy If running net start newrelic-infra returns The service name is invalid, the Infrastructure agent may not have been installed correctly and the service was not properly created. To test this: From" }, - "id": "617d6d1228ccbc8e2980094e" + "id": "603e90b9e7b9d26d8c2a07a9" } ], "/django/d8ab0fa9-205e-4c7d-92f1-09ef04b2b8e6": [ @@ -67283,7 +67232,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.33618, + "_score": 81.56581, "_version": null, "_explanation": null, "sort": null, @@ -67325,7 +67274,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.276306, + "_score": 81.50674, "_version": null, "_explanation": null, "sort": null, @@ -67367,7 +67316,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -67409,7 +67358,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -67451,7 +67400,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -67495,7 +67444,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.33618, + "_score": 81.56581, "_version": null, "_explanation": null, "sort": null, @@ -67537,7 +67486,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.276306, + "_score": 81.50674, "_version": null, "_explanation": null, "sort": null, @@ -67579,7 +67528,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -67621,7 +67570,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -67663,7 +67612,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -67707,7 +67656,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.33618, + "_score": 81.56581, "_version": null, "_explanation": null, "sort": null, @@ -67749,7 +67698,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.276306, + "_score": 81.50674, "_version": null, "_explanation": null, "sort": null, @@ -67791,7 +67740,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -67833,7 +67782,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -67875,7 +67824,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -67904,7 +67853,7 @@ "external_id": "60e26f7107577a43b47c778f30e2bda2ac2b63e0", "image": "", "url": "https://developer.newrelic.com/contribute-to-quickstarts/instrument-library/send-events/", - "published_at": "2022-02-15T01:39:46Z", + "published_at": "2022-02-16T01:44:29Z", "updated_at": "2022-02-10T01:41:23Z", "document_type": "page", "popularity": 1, @@ -67912,7 +67861,7 @@ "body": "Events capture things that occur in your product. For example, if your platform automates application deployments, you might generate an event every time a job runs. If your application scans for security vulnerabilities, you might generate an event every time you detect one. In this lesson, you learn a variety of ways to instrument your application to send events to our Event API: Use a command line utility in bash or PowerShell Use our Python or Java software development kit (SDK) Use an alternative method Use a command line utility You can use a command line utility to send events to New Relic. This is great approach if your library is already instrumented to process commands. For example, you might create a webhook to run the command. That way, with a quick copy and paste of their New Relic license key into your platform, your users can start sending events! Step 1 of 2 Set up a command line utility to run from your platform: Bash PowerShell 1 gzip -c example_events.json | curl -X POST -H \"Content-Type: application/json\" -H \"Api-Key: NEW_RELIC_INSERT_KEY\" -H \"Content-Encoding: gzip\" https://insights-collector.newrelic.com/v1/accounts/events --data-binary @- Bash Copy 1 # Replace with your custom event for the body 2 $body = $example_events 3 4 $headers = @{} 5 $headers.Add(\"Api-Key\", \"$NEW_RELIC_INSERT_KEY\") 6 $headers.Add(\"Content-Encoding\", \"gzip\") 7 8 9 $encoding = [System.Text.Encoding]::UTF8 10 $enc_data = $encoding.GetBytes($body) 11 12 $output = [System.IO.MemoryStream]::new() 13 $gzipStream = New-Object System.IO.Compression.GzipStream $output, ([IO.Compression.CompressionMode]::Compress) 14 15 $gzipStream.Write($enc_data, 0, $enc_data.Length) 16 $gzipStream.Close() 17 $gzipBody = $output.ToArray() 18 19 Invoke-WebRequest -Headers $headers -Method Post -Body $gzipBody \"https://insights-collector.newrelic.com/v1/accounts/events\" PowerShell Copy Step 2 of 2 Provide a place for your users to save their New Relic license key. They can learn how to find their license key in our documentation. Important These examples expect an environment variable called $NEW_RELIC_INSERT_KEY. Now your command line utility can send events directly from your platform to your user's New Relic account! Use our SDK We offer an open source telemetry SDK in several of the most popular programming languages. These send data to our data ingest APIs, including our Event API. Of these language SDKs, two work with the Event API: Python and Java. Here, you learn how to install and use each. Python Use our Python SDK to send your event data to New Relic. Step 1 of 2 Use pip to install the newrelic-telemetry-sdk package: bash Copy $ pip install newrelic-telemetry-sdk Instrument your Python library to send an event: import os import time from newrelic_telemetry_sdk import Event, EventClient #Replace with your event data event = Event( \"RateLimit\", {\"path\": \"/v1/endpoint\", \"accountId\": 1000, \"rejectRatio\": 0.1} ) event_client = EventClient(os.environ[\"NEW_RELIC_INSERT_KEY\"]) response = event_client.send(event) response.raise_for_status() print(\"Event sent successfully!\") Copy Here, you use an EventClient to send a single Event to New Relic. This event represents a rate limit on account 1000 that rejected 10 percent of requests. Read our documentation to learn more ways to customize your event data. Step 2 of 2 Instruct your users to store their New Relic license key in an environment variable, called $NEW_RELIC_INSERT_KEY: bash Copy $ export NEW_RELIC_INSERT_KEY= They can learn how to find their license key in our documentation. Java Use our Java SDK to send your event data to New Relic. Important To run this Java example, you need to use gradle. See our repository for a list of other examples and how to use them. Step 1 of 3 Configure your dependencies: //Maven Dependencies com.newrelic.telemetry telemetry-core 0.13.1 com.newrelic.telemetry telemetry-http-okhttp 0.13.1 //Gradle Dependencies implementation(\"com.newrelic.telemetry:telemetry-core:0.13.1\") implementation(\"com.newrelic.telemetry:telemetry-http-okhttp:0.13.1\") Copy Step 2 of 3 Instrument your Java library to send an event: //Imports package com.newrelic.telemetry.examples; import com.newrelic.telemetry.Attributes; import com.newrelic.telemetry.EventBatchSenderFactory; import com.newrelic.telemetry.OkHttpPoster; import com.newrelic.telemetry.events.Event; import com.newrelic.telemetry.events.EventBatchSender; import com.newrelic.telemetry.events.EventBuffer; public class EventExample { public static void main(String[] args) throws Exception { String licenseKey = args[0]; EventBatchSenderFactory factory = EventBatchSenderFactory.fromHttpImplementation(OkHttpPoster::new); EventBatchSender sender = EventBatchSender.create(factory.configureWith(NEW_RELIC_INSERT_KEY).useLicenseKey(true).build()); EventBuffer eventBuffer = new EventBuffer(getCommonAttributes()); Attributes attr = new Attributes(); attr.put(\"foo\", 1234); attr.put(\"bar\", \"baz\"); attr.put(\"quux\", true); long timestamp = System.currentTimeMillis(); Event event = new Event(\"SampleEvent\", attr, timestamp); eventBuffer.addEvent(event); sender.sendBatch(eventBuffer.createBatch()); } private static Attributes getCommonAttributes() { return new Attributes().put(\"exampleName\", \"CountExample\"); } } Copy Here, you created an EventBatchSender by configuring an EventBatchSenderFactory object with a license key. Then, you create an event with a type, some attributes, and the current time in milliseconds (UTC time). Finally, you add the event to an EventBuffer and send it with sender.sendBatch(). Step 3 of 3 Instruct your users to store their New Relic license key in an environment variable, called $NEW_RELIC_INSERT_KEY: bash Copy $ gradle -PapiKey= They can learn how to find their license key in our documentation. Alternative Options If a command line utility or language SDK doesn't fit your needs, try out one of our other options: Manual Implementation: If our SDK in your preferred language doesn't support events, you can always manually instrument your own library to make a POST request to the New Relic Event API. Prometheus Data: Prometheus data can be sent to New Relic in two ways, remote write and OpenMetrics. At a very high level, you should use remote write if you manage your own Prometheus servers and OpenMetrics if you don't. Flex Agent: Our serverless Flex agent is a possibility, but might be a more complex integration to get started.", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 143.21649, + "_score": 134.92857, "_version": null, "_explanation": null, "sort": null, @@ -67930,7 +67879,7 @@ "Guides to send telemetry data to new relic", "Send events from your product" ], - "published_at": "2022-02-15T01:41:15Z", + "published_at": "2022-02-16T01:44:29Z", "title": "Send telemetry data to New Relic", "updated_at": "2022-02-10T01:41:23Z", "type": "developer", @@ -67941,7 +67890,7 @@ "body": "The first step in the process of building a quickstart is instrumenting your library or configuring your platform to send telemetry data to New Relic. Once you set this up, your product's users will be able to start seeing that data in their account. Setting up your product to allow your users to easily send data from your product to New Relic is important because the quickstart you'll offer them will use it in dashboards, alerts, or other resources you provide. Once your user can see the data, they'll be able to use your quickstart to visualize it and gain actionable insights in a matter of minutes. Telemetry data types You need to send at least one type of telemetry data to New Relic from your library or platform in order to build an effective quickstart. New Relic supports four telemetry data types: Events: Events capture things that occur in your product. If your platform automates application deployments, you might generate an event every time a job runs. If your application scans for security vulnerabilities, you might generate an event every time you detect one. Logs: Logs are time-based text records that help your users see what's happening in your system. Metrics: Metrics are aggregated measurements of your system's performance and behaviors. If your product is a database, you might send metrics like CPU utilization, memory utilization, and query throughput. Note that metrics are generally only used if you want to limit the amount of Data sent to New Relic. Note that many metrics, such as error rates and throughput, can be computed by aggregating events. Traces: Traces capture details of a single request as it moves through a system. Guides to send telemetry data to new relic Send events from your product With our Events API you can send data from your library or platform.", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.36044, + "_score": 90.89867, "_version": null, "_explanation": null, "sort": null, @@ -67950,39 +67899,6 @@ }, "id": "62046d4364441f8fdcc64b7b" }, - { - "sections": [ - "NerdGraph tutorial template", - "Example request #1", - "Example request #2", - "Example request #3" - ], - "title": "NerdGraph tutorial template", - "type": "docs", - "tags": [ - "Article templates", - "Tech writer style guide" - ], - "external_id": "827639827da156b6a36096c0f6a3911052706a91", - "image": "", - "url": "https://docs.newrelic.com/docs/style-guide/writing-docs/article-templates/graphql-api-tutorial-template/", - "published_at": "2022-02-15T18:53:14Z", - "updated_at": "2022-02-15T18:53:13Z", - "document_type": "page", - "popularity": 1, - "body": "This is a template. First, we recommend reading How to write NerdGraph docs. The easiest way to create a NerdGraph tutorial is to clone an existing one that is fairly close to the functionality you're trying to document. This template below serves to explain why we use the general format we do. Ideally the final version of your NerdGraph doc will include: An introduction paragraph describing what problem or use case the users want to solve Several example requests (whatever seems sufficient) to help users understand the functionality Sample intro paragraph: You can use the New Relic GraphQL API to [ add key use cases here]. To construct these queries and see responses, you can use the New Relic GraphQL Explorer. This document explains some of the available functions to query [ data to be queried]. Example request #1 Add a description here for your example query. Tell users why they want to query this kind of data. Fill out the first block with your code, and the second with the data returned by the query: { actor { account(id: YOUR_ACCOUNT_ID) { nrql(query: \"SELECT count(*) FROM Transaction SINCE 1 HOUR AGO\") { results } } } } Copy This query returns the following: { \"data\": { \"actor\": { \"account\": { \"nrql\": { \"results\": [ { \"count\": 1000 } ] } } } } } Copy Example request #2 Add a description here for your example query. Tell the customer why they want to query this kind of data. Fill out the first block with your code, and the second with the data returned by the query: { actor { account(id: YOUR_ACCOUNT_ID) { nrql(query: \"SELECT count(*) FROM Transaction SINCE 1 HOUR AGO\") { results } } } } Copy This query returns the following: { \"data\": { \"actor\": { \"account\": { \"nrql\": { \"results\": [ { \"count\": 1000 } ] } } } } } Copy Example request #3 Add a description here for your example query. Tell the customer why they want to query this kind of data. Fill out the first block with your code, and the second with the data returned by the query: { actor { account(id: YOUR_ACCOUNT_ID) { nrql(query: \"SELECT count(*) FROM Transaction SINCE 1 HOUR AGO\") { results } } } } Copy This query returns the following: { \"data\": { \"actor\": { \"account\": { \"nrql\": { \"results\": [ { \"count\": 1000 } ] } } } } } Copy", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 79.859985, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "body": ". Ideally the final version of your NerdGraph doc will include: An introduction paragraph describing what problem or use case the users want to solve Several example requests (whatever seems sufficient) to help users understand the functionality Sample intro paragraph: You can use the New Relic GraphQL" - }, - "id": "61ab330c64441fd067927126" - }, { "sections": [ "Use integration data in New Relic dashboards", @@ -68020,7 +67936,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 79.649315, + "_score": 74.83083, "_version": null, "_explanation": null, "sort": null, @@ -68031,44 +67947,67 @@ }, { "sections": [ - "Populate your alert configurations with NerdGraph", - "Important", - "Set up your quickstart's alerts directory", - "Look up your alert's condition ID", - "Query alert conditions in NerdGraph", - "Query your static alert condition", - "Tip", - "Query your baseline alert condition", - "Summary" + "NerdGraph tutorial template", + "Example request #1", + "Example request #2", + "Example request #3" ], - "title": "Populate your alert configurations with NerdGraph", - "type": "developer", + "title": "NerdGraph tutorial template", + "type": "docs", "tags": [ - "nerdgraph query components", - "quickstart" + "Article templates", + "Tech writer style guide" ], - "external_id": "8b4b19743ae8b2f1b8b222df1244044b71697220", - "image": "https://developer.newrelic.com/static/dff07d9a1d9bc95aa1aea42233563152/0086b/static-query-response.png", - "url": "https://developer.newrelic.com/contribute-to-quickstarts/query-alerts-for-quickstart/", - "published_at": "2022-02-15T01:41:14Z", - "updated_at": "2021-12-22T01:39:14Z", + "external_id": "827639827da156b6a36096c0f6a3911052706a91", + "image": "", + "url": "https://docs.newrelic.com/docs/style-guide/writing-docs/article-templates/graphql-api-tutorial-template/", + "published_at": "2022-02-15T18:53:14Z", + "updated_at": "2022-02-15T18:53:13Z", "document_type": "page", "popularity": 1, - "info": "Use NerdGraph to query your existing alert configurations, and add those configurations to your quickstart.", - "body": "With a quickstart, you let your users quickly install dashboards, alerts, and other resources. Here, you learn how to use New Relic's GraphQL API, NerdGraph, to query your existing alert conditions and configure them in your quickstart. Important This guide assumes you already have alerts in your New Relic account and a quickstart that you want to add those alerts to. If you don't already have a quickstart, follow our lab to learn how to build one. Set up your quickstart's alerts directory This guide assumes you have a quickstart. However, your quickstart may or may not already have alerts. Either way, to add alerts to your quickstart, you need an alerts directory. If your quickstart already has one, you can skip this section. The _template directory of New Relic One quickstarts contains an alerts template folder. Copy this folder to your quickstart. From here, you use the YAML file that corresponds to the alert condition type you want to add to your quickstart. Once your quickstart is ready to add alerts, you need to look up your alert condition's ID. Look up your alert's condition ID To populate your alert configurations with NerdGraph, you first need to look up its identifier. Step 1 of 5 From your New Relic homepage, go to Alerts & AI. Step 2 of 5 Click Policies in left hand navigation. Step 3 of 5 Choose your policy from the list. Step 4 of 5 Here, you see the list of conditions. Choose the condition that you want to query. Step 5 of 5 On the Next page, you see ID, Account, and Policy associated with the condition. Copy the ID. With this identifier, you can now query your alert conditions and use the response to build out alert resources in your quickstart. Query alert conditions in NerdGraph There are three types of alerts you can have in New Relic: Static Baseline Jump to the appropriate section for the kind of alert you want to add to your quickstart. Query your static alert condition Step 1 of 4 Open the NerdGraph API explorer and select your key from the dropdown menu. Step 2 of 4 Copy the following GraphQL query and paste it in the center pane of the explorer. { actor { account(id: REPLACE_ACCOUNT_ID) { alerts { nrqlCondition(id: REPLACE_CONDITION_ID) { ... on AlertsNrqlStaticCondition { id name type nrql { query } valueFunction terms { operator priority threshold thresholdDuration thresholdOccurrences } violationTimeLimitSeconds } } } } } } Copy Here, you query AlertsNrqlStaticCondition for your condition's ID, name, query and more. These are the required fields you need to create the same alert in your quickstart. Important Make sure you replace your account ID and condition ID in the above query. Step 3 of 4 Execute the query to get a JSON representation of the specified condition. Next, use this response to add a static alert to your quickstart. Tip Notice the checkboxes in the left-hand pane get checked when you paste the query in the explorer. This query returns the fields required to add alert to the quickstart. If you've set custom fields or want to query more information, feel free to either edit the query in the center pane of the explorer or check the corresponding box in the left-hand pane. Step 4 of 4 Populate static-alert.yml from your alerts quickstart folder with the data returned from your query. Given the example response from the last step, our file looks like: --- # Name of the alert name: Static Condition # Description and details details: |+ This alert is triggered whenever the host count is < 2. # Type of alert type: STATIC # NRQL query nrql: query: \"SELECT uniqueCount(host) FROM Transaction\" # Function used to aggregate the NRQL query value(s) for comparison to the terms.threshold (Default: SINGLE_VALUE) valueFunction: SINGLE_VALUE # List of Critical and Warning thresholds for the condition terms: - priority: CRITICAL # Operator used to compare against the threshold. operator: BELOW # Value that triggers a violation threshold: 2 # Time in seconds; 120 - 3600 thresholdDuration: 300 # How many data points must be in violation for the duration thresholdOccurrences: AT_LEAST_ONCE # Duration after which a violation automatically closes # Time in seconds; 300 - 2592000 (Default: 86400 [1 day]) violationTimeLimitSeconds: 259200 Copy Here, you added a static alert to your quickstart. If it's helpful, you can rename this file to whatever you want. Query your baseline alert condition Step 1 of 4 Open the NerdGraph API explorer and select your key from the dropdown menu. Step 2 of 4 Copy the following GraphQL query and paste it in the center pane of the explorer. { actor { account(id: REPLACE_ACCOUNT_ID) { alerts { nrqlCondition(id: REPLACE_CONDITION_ID) { ... on AlertsNrqlBaselineCondition { id name nrql { query } baselineDirection terms { priority threshold thresholdDuration thresholdOccurrences } violationTimeLimitSeconds } } } } } } Copy Here, you query AlertsNrqlBaselineCondition for your condition's name, query, baselineDirection, and other fields required to add the condition to your quickstart. Important Make sure you replace your account ID and condition ID in the above query. Step 3 of 4 Execute the query to get the configuration data of your alert. Next, use this response to add baseline alert to your quickstart. Tip Notice the checkboxes in the left-hand pane get checked when you paste the query in the explorer. This query returns the fields required to add alert to the quickstart. If you've set custom fields or want to query more information, feel free to either edit the query in the center pane of the explorer or check the corresponding box in the left-hand pane. Step 4 of 4 Populate baseline-alert.yml from your alerts quickstart folder with the data returned from your query. Given the example response from the last step, our file looks like: --- # Name of the alert name: Baseline Condition # Description and details details: |+ This alert is triggered whenever the average Transaction duration deviates 2 standard deviations from the normal. # Type of alert type: BASELINE # NRQL query nrql: # Baseline alerts can use an optional FACET query: \"SELECT average(duration) FROM Transaction\" # Direction in which baseline is set (Default: LOWER_ONLY) baselineDirection: UPPER_ONLY # List of Critical and Warning thresholds for the condition terms: - priority: CRITICAL # Value that triggers a violation threshold: 2 # Time in seconds; 120 - 3600, must be a multiple of 60 for Baseline conditions thresholdDuration: 180 # How many data points must be in violation for the duration thresholdOccurrences: ALL # Duration after which a violation automatically closes # Time in seconds; 300 - 2592000 (Default: 86400 [1 day]) violationTimeLimitSeconds: 259200 Copy Here, you added a baseline alert to your quickstart. If it's helpful, you can rename this file to whatever you want. Summary In this guide, you learned how to query your existing alert conditions using NerdGraph and how to use the query's JSON response to populate corresponding alert files in quickstart. Read our documentation to learn more about: Alerts Building quickstarts", + "body": "This is a template. First, we recommend reading How to write NerdGraph docs. The easiest way to create a NerdGraph tutorial is to clone an existing one that is fairly close to the functionality you're trying to document. This template below serves to explain why we use the general format we do. Ideally the final version of your NerdGraph doc will include: An introduction paragraph describing what problem or use case the users want to solve Several example requests (whatever seems sufficient) to help users understand the functionality Sample intro paragraph: You can use the New Relic GraphQL API to [ add key use cases here]. To construct these queries and see responses, you can use the New Relic GraphQL Explorer. This document explains some of the available functions to query [ data to be queried]. Example request #1 Add a description here for your example query. Tell users why they want to query this kind of data. Fill out the first block with your code, and the second with the data returned by the query: { actor { account(id: YOUR_ACCOUNT_ID) { nrql(query: \"SELECT count(*) FROM Transaction SINCE 1 HOUR AGO\") { results } } } } Copy This query returns the following: { \"data\": { \"actor\": { \"account\": { \"nrql\": { \"results\": [ { \"count\": 1000 } ] } } } } } Copy Example request #2 Add a description here for your example query. Tell the customer why they want to query this kind of data. Fill out the first block with your code, and the second with the data returned by the query: { actor { account(id: YOUR_ACCOUNT_ID) { nrql(query: \"SELECT count(*) FROM Transaction SINCE 1 HOUR AGO\") { results } } } } Copy This query returns the following: { \"data\": { \"actor\": { \"account\": { \"nrql\": { \"results\": [ { \"count\": 1000 } ] } } } } } Copy Example request #3 Add a description here for your example query. Tell the customer why they want to query this kind of data. Fill out the first block with your code, and the second with the data returned by the query: { actor { account(id: YOUR_ACCOUNT_ID) { nrql(query: \"SELECT count(*) FROM Transaction SINCE 1 HOUR AGO\") { results } } } } Copy This query returns the following: { \"data\": { \"actor\": { \"account\": { \"nrql\": { \"results\": [ { \"count\": 1000 } ] } } } } } Copy", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 73.965385, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "body": ". Ideally the final version of your NerdGraph doc will include: An introduction paragraph describing what problem or use case the users want to solve Several example requests (whatever seems sufficient) to help users understand the functionality Sample intro paragraph: You can use the New Relic GraphQL" + }, + "id": "61ab330c64441fd067927126" + }, + { + "image": "", + "url": "https://docs.newrelic.com/whats-new/2021/10/instant-observability-10-13-21/", + "sections": [ + "Get instant observability with New Relic I/O", + "Solution overview", + "Demo", + "Get started", + "Partner quickstarts" + ], + "published_at": "2022-02-14T18:00:47Z", + "title": "Get instant observability with New Relic I/O", + "updated_at": "2022-01-17T16:29:06Z", + "type": "docs", + "external_id": "c5c0dc59e4c357e64d4d41f42eed85b035e5cc55", + "document_type": "nr1_announcement", + "popularity": 1, + "body": "We're excited to announce New Relic Instant Observability (I/0), the fastest way to instrument, monitor, and analyze your technology stack while avoiding the burden of manual setup. It is a rich open source catalog of 400+ quickstarts (pre-built bundles of observability tools) contributed by experts around the world, reviewed by New Relic, and ready for you to install in a few clicks. Solution overview No matter what technologies and tools you use in your stack, you can get more insights from your telemetry data in minutes. With New Relic I/O, you can: Reduce instrumentation toil with an easy, guided installation Get started faster with pre-built dashboards and alerts Leverage best practices from domain experts and technology partners Quickstarts can contain any combination of instrumentation, integrations, dashboards, and alerts. Note that the ability to access quickstart-linked dashboards requires a user type of core user or full platform user. When you sign up for New Relic, you get one full platform user for free. Demo Watch a quick demo and learn how to install a quickstart below: Get started Install a quickstart from New Relic I/O. If you don't have a New Relic account yet, browse the public New Relic I/O catalog. Want to share your monitoring use case or best practices? New Relic Instant Observability is open source, so it’s easy to add to quickstarts or build a brand new one. Help drive the mission to democratize observability—and be featured as a quickstart author. Contribute a quickstart! Learn more by reading the blog post. Partner quickstarts We are proud to launch New Relic Instant Observability with pre-built quickstarts from five leading enterprise software partners. We have partnered closely with them to create quickstarts that help you extend your New Relic One experience: Kentik is the network observability company. The Kentik quickstarts help network and development teams quickly identify and troubleshoot application performance issues correlated with network traffic performance and health data. Fastly is an edge cloud platform that enables its customers to create great digital experiences quickly, securely, and reliably. With the Fastly CDN quickstart, you can monitor key metrics from Fastly's content delivery network that can help you improve service reliability and ensure great online experiences for end users. Lacework is a data-driven security platform for the cloud that can collect, analyze, and accurately correlate data across an organization's AWS, Azure, GCP and Kubernetes environments, and narrow it down to the handful of security events that matter. The Lacework quickstart bridges the gap between observability and security teams, and integrates with New Relic's database to surface security events and alerts directly in New Relic One. Cribl is the observability pipeline company that lets customers parse and route any type of data. The Cribl quickstart allows you to get immediate visibility into your entire environment right from New Relic One without the need to create your own dashboards and alerts---simplifying your workflows and reducing time to value. Trend Micro is a global cyber security leader. The Trend Micro Cloud One quickstart ingests cloud security posture management (CSPM) data from Conformity into New Relic One to contextualize and correlate it with workload telemetry data, delivering AI-powered visualizations and quick insights. This allows security and cloud teams to immediately take action in improving their security and compliance postures. Gigamon is the cloud visibility company. The Gigamon Hawk hybrid-cloud visibility and analytics platform provides access to - and extracts intelligence from all network traffic. The Gigamon quickstart delivers advanced security capabilities that offer network detection and response to advanced threats, including shadow IT activities, crypto-mining and torrent activities, SSL cipher versions and expiration dates across both managed and unmanaged hosts, such as IoT/OT and containers.", + "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 69.67961, + "_score": 68.77337, "_version": null, "_explanation": null, "sort": null, "highlight": { - "sections": "Set up your quickstart's alerts directory", - "info": "Use NerdGraph to query your existing alert configurations, and add those configurations to your quickstart.", - "tags": "quickstart", - "body": "With a quickstart, you let your users quickly install dashboards, alerts, and other resources. Here, you learn how to use New Relic's GraphQL API, NerdGraph, to query your existing alert conditions and configure them in your quickstart. Important This guide assumes you already have alerts in your" + "sections": "Partner quickstarts", + "body": ", guided installation Get started faster with pre-built dashboards and alerts Leverage best practices from domain experts and technology partners Quickstarts can contain any combination of instrumentation, integrations, dashboards, and alerts. Note that the ability to access quickstart-linked" }, - "id": "617b50fa28ccbcad47822e01" + "id": "616c0b0f196a671a8c3c9c10" } ], "/gevent/30e39b88-ad5f-4850-b4e6-d9960bceece6": [ @@ -68103,7 +68042,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.33618, + "_score": 81.56581, "_version": null, "_explanation": null, "sort": null, @@ -68145,7 +68084,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.276306, + "_score": 81.50674, "_version": null, "_explanation": null, "sort": null, @@ -68187,7 +68126,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -68229,7 +68168,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -68271,7 +68210,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -68315,7 +68254,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 110.59555, + "_score": 102.699814, "_version": null, "_explanation": null, "sort": null, @@ -68357,7 +68296,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 109.75403, + "_score": 101.91943, "_version": null, "_explanation": null, "sort": null, @@ -68399,7 +68338,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 109.7466, + "_score": 101.913666, "_version": null, "_explanation": null, "sort": null, @@ -68441,7 +68380,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 105.93239, + "_score": 98.52904, "_version": null, "_explanation": null, "sort": null, @@ -68484,7 +68423,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 101.18535, + "_score": 95.344635, "_version": null, "_explanation": null, "sort": null, @@ -68527,7 +68466,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.33618, + "_score": 81.56581, "_version": null, "_explanation": null, "sort": null, @@ -68569,7 +68508,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.276306, + "_score": 81.50674, "_version": null, "_explanation": null, "sort": null, @@ -68611,7 +68550,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -68653,7 +68592,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -68695,7 +68634,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -68737,7 +68676,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 100.52371, + "_score": 100.46506, "_version": null, "_explanation": null, "sort": null, @@ -68780,7 +68719,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99103, + "_score": 95.215935, "_version": null, "_explanation": null, "sort": null, @@ -68822,7 +68761,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99097, + "_score": 95.21587, "_version": null, "_explanation": null, "sort": null, @@ -68864,7 +68803,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99097, + "_score": 95.21587, "_version": null, "_explanation": null, "sort": null, @@ -68906,7 +68845,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99081, + "_score": 95.21571, "_version": null, "_explanation": null, "sort": null, @@ -68950,7 +68889,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.33618, + "_score": 81.56581, "_version": null, "_explanation": null, "sort": null, @@ -68992,7 +68931,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.276306, + "_score": 81.50674, "_version": null, "_explanation": null, "sort": null, @@ -69034,7 +68973,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -69076,7 +69015,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -69118,7 +69057,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -69162,7 +69101,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99103, + "_score": 95.21301, "_version": null, "_explanation": null, "sort": null, @@ -69204,7 +69143,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99097, + "_score": 95.21295, "_version": null, "_explanation": null, "sort": null, @@ -69246,7 +69185,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99097, + "_score": 95.21295, "_version": null, "_explanation": null, "sort": null, @@ -69288,7 +69227,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99081, + "_score": 95.2128, "_version": null, "_explanation": null, "sort": null, @@ -69327,7 +69266,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 93.49031, + "_score": 93.73055, "_version": null, "_explanation": null, "sort": null, @@ -69340,6 +69279,45 @@ } ], "/azure-kubernetes-service/ce59998f-0daf-434b-9e80-b8ba0f576300": [ + { + "sections": [ + "Azure Kubernetes Service", + "What's included", + "Azure Kubernetes Service installation docs" + ], + "title": "Azure Kubernetes Service", + "type": "quickstarts", + "tags": [ + "kubernetes", + "azure", + "k8s", + "aks" + ], + "quick_start_name": "Azure Kubernetes Service", + "external_id": "5757bbd4d4bfa93fb980bc34d3f097d7b9ec2ec0", + "image": "", + "url": "https://developer.newrelic.com/instant-observability/azure-kubernetes-service/ce59998f-0daf-434b-9e80-b8ba0f576300/", + "published_at": "2022-02-07T01:43:50Z", + "updated_at": "2021-12-08T01:42:11Z", + "document_type": "page", + "popularity": 1, + "body": "What's included Azure Kubernetes Service Dashboard Azure Kubernetes Service installation docs Provides a managed environment for deploying, managing, and scaling your containerized applications using Azure infrastructure. Doc Provides a managed environment for deploying, managing, and scaling your containerized applications using Azure infrastructure. Azure Kubernetes Service quickstart contains 1 dashboard . These interactive visualizations let you easily explore your data, understand context, and resolve problems faster. Azure Kubernetes Service This quickstart doesn't include any alerts . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo Azure Kubernetes Service observability quickstart contains 1 data source . This is how you'll get your data into New Relic. Azure Kubernetes Service installation docs Provides a managed environment for deploying, managing, and scaling your containerized applications using Azure infrastructure. Docs", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 243.75279, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "title": "Azure Kubernetes Service", + "sections": "Azure Kubernetes Service", + "tags": "kubernetes", + "quick_start_name": "Azure Kubernetes Service", + "body": "What's included Azure Kubernetes Service Dashboard Azure Kubernetes Service installation docs Provides a managed environment for deploying, managing, and scaling your containerized applications using Azure infrastructure. Doc Provides a managed environment for deploying, managing, and scaling your" + }, + "id": "61b00d73e7b9d29936acde94" + }, { "sections": [ "Install the Kubernetes integration using Helm", @@ -69381,7 +69359,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 246.37349, + "_score": 229.00409, "_version": null, "_explanation": null, "sort": null, @@ -69393,45 +69371,6 @@ }, "id": "617d5841196a67bb40f7c1de" }, - { - "sections": [ - "Azure Kubernetes Service", - "What's included", - "Azure Kubernetes Service installation docs" - ], - "title": "Azure Kubernetes Service", - "type": "quickstarts", - "tags": [ - "kubernetes", - "azure", - "k8s", - "aks" - ], - "quick_start_name": "Azure Kubernetes Service", - "external_id": "5757bbd4d4bfa93fb980bc34d3f097d7b9ec2ec0", - "image": "", - "url": "https://developer.newrelic.com/instant-observability/azure-kubernetes-service/ce59998f-0daf-434b-9e80-b8ba0f576300/", - "published_at": "2022-02-07T01:43:50Z", - "updated_at": "2021-12-08T01:42:11Z", - "document_type": "page", - "popularity": 1, - "body": "What's included Azure Kubernetes Service Dashboard Azure Kubernetes Service installation docs Provides a managed environment for deploying, managing, and scaling your containerized applications using Azure infrastructure. Doc Provides a managed environment for deploying, managing, and scaling your containerized applications using Azure infrastructure. Azure Kubernetes Service quickstart contains 1 dashboard . These interactive visualizations let you easily explore your data, understand context, and resolve problems faster. Azure Kubernetes Service This quickstart doesn't include any alerts . Do you think it should? You can edit this quickstart to add helpful components. View the repository and open a pull request. View repo Azure Kubernetes Service observability quickstart contains 1 data source . This is how you'll get your data into New Relic. Azure Kubernetes Service installation docs Provides a managed environment for deploying, managing, and scaling your containerized applications using Azure infrastructure. Docs", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 243.97728, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "title": "Azure Kubernetes Service", - "sections": "Azure Kubernetes Service", - "tags": "kubernetes", - "quick_start_name": "Azure Kubernetes Service", - "body": "What's included Azure Kubernetes Service Dashboard Azure Kubernetes Service installation docs Provides a managed environment for deploying, managing, and scaling your containerized applications using Azure infrastructure. Doc Provides a managed environment for deploying, managing, and scaling your" - }, - "id": "61b00d73e7b9d29936acde94" - }, { "sections": [ "New Relic Metrics Adapter", @@ -69465,7 +69404,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 227.60455, + "_score": 222.93948, "_version": null, "_explanation": null, "sort": null, @@ -69506,7 +69445,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 223.17163, + "_score": 218.63768, "_version": null, "_explanation": null, "sort": null, @@ -69562,7 +69501,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 222.89464, + "_score": 218.4278, "_version": null, "_explanation": null, "sort": null, @@ -69607,7 +69546,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -69648,7 +69587,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -69690,7 +69629,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -69732,7 +69671,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -69774,7 +69713,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -69817,7 +69756,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.33618, + "_score": 81.56581, "_version": null, "_explanation": null, "sort": null, @@ -69859,7 +69798,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.276306, + "_score": 81.50674, "_version": null, "_explanation": null, "sort": null, @@ -69901,7 +69840,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -69943,7 +69882,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -69985,7 +69924,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -70029,7 +69968,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.33618, + "_score": 81.56581, "_version": null, "_explanation": null, "sort": null, @@ -70071,7 +70010,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.276306, + "_score": 81.50674, "_version": null, "_explanation": null, "sort": null, @@ -70113,7 +70052,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -70155,7 +70094,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -70197,7 +70136,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -70241,7 +70180,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.33618, + "_score": 81.56581, "_version": null, "_explanation": null, "sort": null, @@ -70283,7 +70222,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.276306, + "_score": 81.50674, "_version": null, "_explanation": null, "sort": null, @@ -70325,7 +70264,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -70367,7 +70306,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -70409,7 +70348,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -70454,7 +70393,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 64.175095, + "_score": 60.015556, "_version": null, "_explanation": null, "sort": null, @@ -70506,7 +70445,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 57.881683, + "_score": 53.63669, "_version": null, "_explanation": null, "sort": null, @@ -70573,7 +70512,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 57.209778, + "_score": 53.539505, "_version": null, "_explanation": null, "sort": null, @@ -70605,7 +70544,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 54.90339, + "_score": 51.504333, "_version": null, "_explanation": null, "sort": null, @@ -70638,7 +70577,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 54.864746, + "_score": 51.473335, "_version": null, "_explanation": null, "sort": null, @@ -70683,7 +70622,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -70724,7 +70663,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -70766,7 +70705,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -70808,7 +70747,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -70850,7 +70789,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -70893,7 +70832,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.33618, + "_score": 81.56581, "_version": null, "_explanation": null, "sort": null, @@ -70935,7 +70874,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.276306, + "_score": 81.50674, "_version": null, "_explanation": null, "sort": null, @@ -70977,7 +70916,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -71019,7 +70958,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -71061,7 +71000,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -71074,89 +71013,6 @@ } ], "/aiohttp/e7948525-8726-46a5-83fa-04732ad42fd1": [ - { - "sections": [ - "OpenTelemetry quick start", - "Step 1. Prerequisites", - "Step 2. Instrument your service with OpenTelemetry", - "Step 3. Export your telemetry data to New Relic", - "Review New Relic settings for exports", - "Important", - "A note about ports", - "A note about endpoints", - "Complete the export configuration steps", - "Export data to an OpenTelemetry Collector (optional)", - "Step 4. View your data in the New Relic UI", - "View our OpenTelemetry examples", - "Unsupported specifications", - "What's next?" - ], - "title": "OpenTelemetry quick start", - "type": "docs", - "tags": [ - "OpenTelemetry", - "Open source telemetry integrations", - "Integrations" - ], - "external_id": "067b2e00bd167f4d78a1398575acd6f3ac76e069", - "image": "", - "url": "https://docs.newrelic.com/docs/more-integrations/open-source-telemetry-integrations/opentelemetry/opentelemetry-quick-start/", - "published_at": "2022-02-14T19:47:12Z", - "updated_at": "2022-02-14T19:47:11Z", - "document_type": "page", - "popularity": 1, - "body": "OpenTelemetry is a flexible toolkit that you can implement in a variety of ways. We recommend a basic four-step approach for setting up OpenTelemetry with New Relic. Here's an overview of the process, followed by details for each step. Prerequisites Instrument your service with OpenTelemetry Export your telemetry data to New Relic View your data in the New Relic UI Step 1. Prerequisites First things first: If we don't already know you, sign up for a free New Relic account. Copy your account license key. Step 2. Instrument your service with OpenTelemetry To get started, you instrument your service with OpenTelemetry. OpenTelemetry has language-specific products and SDKs to help you. Many languages offer out-the-box instrumentation for common libraries and frameworks. Each language also provides an API for further instrumenting your service manually. Go to the repository for your language and follow the instructions to instrument your service. When you're done, return here to complete Step 3. Export your telemetry data to New Relic. C++ Erlang Go Java Javascript/Node.js .NET PHP Python Ruby Rust Swift ...See a complete list of languages in GitHub Step 3. Export your telemetry data to New Relic The OpenTelemetry Protocol, or OTLP for short, is a general purpose telemetry data delivery protocol designed for the OpenTelemetry project. This protocol describes how to encode and transmit telemetry data, which makes it a natural choice for data transport. Each language SDK provides an OTLP exporter you can configure to export data over OTLP. In this step, we focus on how to configure an OTLP exporter in your service to export data directly to New Relic. If you prefer to export your data first to an OpenTelemetry collector, we have separate instructions. Here's an example of sending data from your service directly to New Relic. To complete this third step, first familiarize yourself with some required New Relic settings, and then complete the steps in the OTLP exporter documentation for your language. Review New Relic settings for exports Before you go to the external OTLP exporter documentation, consult the table below so you're ready to do the following: Configure the OTLP exporter to add a header (api-key) whose value is the license key for the New Relic account you want to send data to. Based on your integration, configure the endpoint where the exporter sends data to New Relic. Most users will want to use the US OTLP or EU OTLP endpoints. Integration gRPC HTTP Endpoint Supported Ports API header name API header value TLS encryption required US OTLP ✅ ✅ https://otlp.nr-data.net 443, 4317, 4318 api-key License key ✅ EU OTLP ✅ ✅ https://otlp.eu01.nr-data.net 443, 4317, 4318 api-key License key ✅ US FedRamp OTLP (See see FedRAMP compliance for more information) ✅ ✅ https://gov-otlp.nr-data.net 443, 4317, 4318 api-key License key ✅ Infinite Tracing (See best practices for endpoint details ✅ ❌ https://{trace-observer} 443 api-key License key ✅ Important In Node.js, the opentelemetry-collector-exporter-grpc library requires additional options to enable TLS. A note about ports OTLP standards designate gRPC traffic to port 4317, and HTTP traffic to port 4318. The New Relic US FedRamp Native OTLP endpoint adheres to those specifications, as well as allowing gRPC traffic on port 443. However, non-FedRamp New Relic endpoints will accept both gRPC and HTTP traffic on any of the ports listed in the above chart. A note about endpoints Per the OpenTelemetry spec on endpoint URLs for OTLP/HTTP: If you are sending HTTP traffic and using the non-per-signal environment variable (OTEL_EXPORTER_OTLP_ENDPOINT), you can simply set OTEL_EXPORTER_OTLP_ENDPOINT=https://otlp.nr-data.net:{port} and the exporter should append the appropriate path for the signal type (i.e., v1/traces or v1/metrics). If you are using a per-signal environment variable (i.e., OTEL_EXPORTER_OTLP_TRACES_ENDPOINT and/or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT), you are required to set it with the appropriate path. For example, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=https://otlp.nr-data.net:4318/v1/traces. Not doing so will result in a 404. Note that this environment variable takes precedence over the non-per-signal one. Complete the export configuration steps Click on the link below for your language and complete the configuration steps. When you're done, return here to complete Step 4. View your data in the New Relic UI. C++ Erlang Go Java Javascript/Node.js .NET PHP Python Ruby Rust Swift ...Find additional OTLP language support in GitHub Export data to an OpenTelemetry Collector (optional) The OpenTelemetry Collector is a configurable and extensible software component to receive, process, and export telemetry data. When you set up a collector, it can operate as a gateway or as an agent: Gateway: The collector receives data from a variety of sources and applies standard processing before exporting to some backend. Agent: The collector is deployed on each host in an environment and can collect telemetry data about the host and processes running on it. When you use a collector, you start by following the same routine as above for setting up OTLP in your service. In this case, instead of exporting data directly to New Relic, you export through a collector that you set up. In the collector, you configure the OTLP exporter to export data to New Relic. When your data goes through a collector, the transport looks like this: Here's a Docker example of how to set up and run an OpenTelemetry collector with the collector YAML: Save the following as otel-config.yaml: receivers: otlp: protocols: grpc: http: processors: batch: exporters: otlp: endpoint: ${OTEL_EXPORTER_OTLP_ENDPOINT} headers: api-key: ${NEW_RELIC_LICENSE_KEY} service: pipelines: traces: receivers: [otlp] processors: [batch] exporters: [otlp] metrics: receivers: [otlp] processors: [batch] exporters: [otlp] logs: receivers: [otlp] processors: [batch] exporters: [otlp] Copy Run the OpenTelemetry collector after you make the following changes: Replace OTLP_ENDPOINT_HERE with the appropriate [endpoint] (#review-settings. Replace YOUR_KEY_HERE with your account's license key. export OTEL_EXPORTER_OTLP_ENDPOINT=OTLP_ENDPOINT_HERE export NEW_RELIC_LICENSE_KEY=YOUR_KEY_HERE docker run --rm \\ -e OTEL_EXPORTER_OTLP_ENDPOINT \\ -e NEW_RELIC_LICENSE_KEY \\ -p 4317:4317 \\ -v \"${PWD}/otel-config.yaml\":/otel-config.yaml \\ --name otelcol \\ otel/opentelemetry-collector \\ --config otel-config.yaml Copy Step 4. View your data in the New Relic UI Once you’ve instrumented your service and configured it to export its data to New Relic, watch the New Relic One user interface for your traces, metrics, and logs! The UI for OpenTelemetry has some similarities to the APM agent UI, so if you are familiar with that, you can go right to the UI. If you need help understanding your OpenTelemetry UI options or how to make sure your data appears in the UI, see View your OpenTelemetry data in New Relic. View our OpenTelemetry examples View some of our examples for using OpenTelemetry with New Relic. Unsupported specifications New Relic currently supports opentelemetry-specification v1.8.0 with a few exceptions: Successful responses from New Relic have no response body, instead of a Protobuf-encoded response based on the data type. New Relic also responds with success after authenticating, before decoding and validation. Failure responses from New Relic do not include Status.message or Status.details, since OTLP clients don't use the Status object. JSON-encoded Protobuf messages are not yet supported. What's next? After you do your initial setup, check out our best-practices guide for tips about various configurations to improve your use of OpenTelemetry and New Relic.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 86.33807, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "body": " and follow the instructions to instrument your service. When you're done, return here to complete Step 3. Export your telemetry data to New Relic. C++ Erlang Go Java Javascript/Node.js .NET PHP Python Ruby Rust Swift ...See a complete list of languages in GitHub Step 3. Export your telemetry data" - }, - "id": "617dca7464441f8186fbc951" - }, - { - "sections": [ - "Azure Functions monitoring integration", - "Features", - "Activate integration", - "Configuration and polling", - "Find and use data", - "Metric data", - "Inventory data", - "EOL NOTICE", - "Other system data" - ], - "title": "Azure Functions monitoring integration", - "type": "docs", - "tags": [ - "Azure integrations list", - "Microsoft Azure integrations", - "Integrations" - ], - "external_id": "1147ad31ec1cd017dc873e039083dc33248581ba", - "image": "", - "url": "https://docs.newrelic.com/docs/infrastructure/microsoft-azure-integrations/azure-integrations-list/azure-functions-monitoring-integration/", - "published_at": "2022-02-15T19:37:53Z", - "updated_at": "2022-02-15T19:37:53Z", - "document_type": "page", - "popularity": 1, - "body": "Our infrastructure monitoring provides an integration for Microsoft Azure Functions that reports data from your Azure Functions service to New Relic. This document explains how to activate this integration and describes the data that can be captured. Features Our Azure Functions integration reports metric data about your Functions service, like the number of function executions, bytes sent and received and HTTP error counts. It also collects data about the status and configuration of the service. You can monitor and alert on your Azure Functions data with our Infrastructure monitoring, and you can create custom queries and chart dashboards. Activate integration To enable this integration follow the standard procedures to activate your Azure service for infrastructure monitoring. Configuration and polling You can change the polling frequency and filter data using configuration options. Default polling information for the Azure Functions integration: Polling interval: 5 minutes Resolution: 1 data point per minute Find and use data To find your integration data, go to one.newrelic.com, in the top nav click Infrastructure, then click Azure and look for the integration. You can query and explore your data using the AzureFunctionsAppSample event type. Inventory data has a provider value of AzureFunctionsApp. For more on how to find and use integration data, see Understand and use data. Metric data To view metrics reported by the Azure Functions integration, query the AzureFunctionsAppSample event type. Use the metadata associated with each metric to filter and facet the data being reported. For detailed metric information, see the Azure supported metrics documentation. Metric Description Metadata averageMemoryWorkingSetBytes Average count in bytes of memory working set. instance functionExecutionCount Function execution count. instance functionExecutionUnits Function execution in units. instance http5xx Total number of HTTP server errors 500-599. instance memoryWorkingSetBytes Total bytes of memory working set. instance receivedBytes Total bytes received by the functions. instance sentBytes Total bytes sent by the functions. instance Inventory data EOL NOTICE After March 2022, we're discontinuing support for several capabilities, including inventory data for cloud integrations. For more details, including how you can easily prepare for this transition, see our Explorers Hub post. Inventory data is information about your system's state and configuration. For details on how to find and use inventory data, see Understand and use data. The Azure Functions integration reports this inventory data: alwaysOn availabilityState clientAffinityEnabled clientCertEnabled containerSize cors defaultDocuments defaultHostName detailedErrorLoggingEnabled enabled enabledHostNames gatewaySiteName hostNames hostNameBinding hostNamesDisabled hostNameSslStates httpLoggingEnabled isPremiumApp lastModifiedTime loadBalancing logsDirectorySizeLimit managedPipelineMode microService netFrameworkVersion numberOfWorkers operatingSystem outboundIpAddresses phpVersion platformArchitecture remoteDebuggingEnabled remoteDebuggingVersion repositorySiteName scmType state scmSiteAlsoStopped storageAccount targetSwapSlot tracingOptions usageState Other system data The Azure Functions integration also collects these attributes about the service and its configuration: availabilityState defaultHostName enabled isPremiumApp javaVersion netFrameworkVersion nodeVersion operatingSystem phpVersion pythonVersion platformArchitecture state usageState", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.638855, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "body": " metric data about your Functions service, like the number of function executions, bytes sent and received and HTTP error counts. It also collects data about the status and configuration of the service. You can monitor and alert on your Azure Functions data with our Infrastructure monitoring" - }, - "id": "617d72cf64441f775afbd935" - }, { "sections": [ "Configure request queue reporting", @@ -71186,7 +71042,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.543724, + "_score": 81.745026, "_version": null, "_explanation": null, "sort": null, @@ -71228,7 +71084,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.33618, + "_score": 81.56332, "_version": null, "_explanation": null, "sort": null, @@ -71270,7 +71126,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.276306, + "_score": 81.50425, "_version": null, "_explanation": null, "sort": null, @@ -71280,6 +71136,90 @@ "body": "What's included Python Dashboard Apdex Score This alert is triggered when the Apdex score is below 0.5 for 5 minutes Alert High CPU Utilization This alert is triggered when the CPU Utilization is above 90%. Alert Transaction Errors This alert is triggered when the the transactions fail more than 10" }, "id": "61566c4c64441fc860099644" + }, + { + "sections": [ + "Pika", + "What's included", + "Python", + "Apdex Score", + "High CPU Utilization", + "Transaction Errors", + "Pika installation docs", + "What is Pika?", + "Get started!", + "More info" + ], + "title": "Pika", + "type": "quickstarts", + "tags": [ + "apm", + "python" + ], + "quick_start_name": "Pika", + "external_id": "3f7e4155b883dfe528f42a1d3d74ef2a9b249c78", + "image": "https://raw.githubusercontent.com/newrelic/newrelic-quickstarts/v0.96.0/quickstarts/python/pika/dashboards/python.png", + "url": "https://developer.newrelic.com/instant-observability/pika/7bdea1cd-d723-4655-ae9f-c13468d593b7/", + "published_at": "2022-02-05T01:43:08Z", + "updated_at": "2021-10-29T01:50:34Z", + "document_type": "page", + "popularity": 1, + "body": "What's included Python Dashboard Apdex Score This alert is triggered when the Apdex score is below 0.5 for 5 minutes Alert High CPU Utilization This alert is triggered when the CPU Utilization is above 90%. Alert Transaction Errors This alert is triggered when the the transactions fail more than 10% of the time in 5 minutes. Alert Pika installation docs Pika is a fully featured, dynamic programming language. Doc What is Pika? Pika is a fully featured, dynamic programming language. Get started! Leverage community expertise and instantly get value out of your telemetry data. This quickstart automatically instruments Pika with the New Relic Python agent, and allows you to instantly monitor your Python application with out-of-the-box dashboards and alerts. Further leverage New Relic's APM capabilities by setting up errors inbox , transaction tracing , and service maps . More info Check out the documentation to learn more about New Relic monitoring for Pika. Pika quickstart contains 1 dashboard . These interactive visualizations let you easily explore your data, understand context, and resolve problems faster. Python Python Pika observability quickstart contains 3 alerts . These alerts detect changes in key performance metrics. Integrate these alerts with your favorite tools (like Slack, PagerDuty, etc.) and New Relic will let you know when something needs your attention. Apdex Score Alert Type: STATIC This alert is triggered when the Apdex score is below 0.5 for 5 minutes High CPU Utilization Alert Type: STATIC This alert is triggered when the CPU Utilization is above 90%. Transaction Errors Alert Type: STATIC This alert is triggered when the the transactions fail more than 10% of the time in 5 minutes. Pika observability quickstart contains 1 data source . This is how you'll get your data into New Relic. Pika installation docs Pika is a fully featured, dynamic programming language. Docs", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 81.50405, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "sections": "Python", + "tags": "apm", + "body": " automatically instruments Pika with the New Relic Python agent, and allows you to instantly monitor your Python application with out-of-the-box dashboards and alerts. Further leverage New Relic's APM capabilities by setting up errors inbox , transaction tracing , and service maps . More info Check out" + }, + "id": "61566846e7b9d2ef508de398" + }, + { + "sections": [ + "Proboscis", + "What's included", + "Python", + "Apdex Score", + "High CPU Utilization", + "Transaction Errors", + "Proboscis installation docs", + "What is Proboscis?", + "Get started!", + "More info" + ], + "title": "Proboscis", + "type": "quickstarts", + "tags": [ + "apm", + "python" + ], + "quick_start_name": "Proboscis", + "external_id": "5212c5d4b199b22a5b2391b962e63bb0e018c290", + "image": "https://raw.githubusercontent.com/newrelic/newrelic-quickstarts/v0.96.0/quickstarts/python/proboscis/dashboards/python.png", + "url": "https://developer.newrelic.com/instant-observability/proboscis/073b82b5-0cb7-43c2-9f39-de4fc1411b88/", + "published_at": "2022-02-04T01:50:40Z", + "updated_at": "2021-10-29T01:50:33Z", + "document_type": "page", + "popularity": 1, + "body": "What's included Python Dashboard Apdex Score This alert is triggered when the Apdex score is below 0.5 for 5 minutes Alert High CPU Utilization This alert is triggered when the CPU Utilization is above 90%. Alert Transaction Errors This alert is triggered when the the transactions fail more than 10% of the time in 5 minutes. Alert Proboscis installation docs Proboscis is a Python test framework that extends Python's built- in unittest module and Nose with features from TestNG. Doc What is Proboscis? Proboscis is a Python test framework that extends Python's built- in unittest module and Nose with features from TestNG. Get started! Leverage community expertise and instantly get value out of your telemetry data. This quickstart automatically instruments Proboscis with the New Relic Python agent, and allows you to instantly monitor your Python application with out-of-the-box dashboards and alerts. Further leverage New Relic's APM capabilities by setting up errors inbox , transaction tracing , and service maps . More info Check out the documentation to learn more about New Relic monitoring for Proboscis. Proboscis quickstart contains 1 dashboard . These interactive visualizations let you easily explore your data, understand context, and resolve problems faster. Python Python Proboscis observability quickstart contains 3 alerts . These alerts detect changes in key performance metrics. Integrate these alerts with your favorite tools (like Slack, PagerDuty, etc.) and New Relic will let you know when something needs your attention. Apdex Score Alert Type: STATIC This alert is triggered when the Apdex score is below 0.5 for 5 minutes High CPU Utilization Alert Type: STATIC This alert is triggered when the CPU Utilization is above 90%. Transaction Errors Alert Type: STATIC This alert is triggered when the the transactions fail more than 10% of the time in 5 minutes. Proboscis observability quickstart contains 1 data source . This is how you'll get your data into New Relic. Proboscis installation docs Proboscis is a Python test framework that extends Python's built- in unittest module and Nose with features from TestNG. Docs", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 81.50404, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "sections": "Python", + "tags": "apm", + "body": "What's included Python Dashboard Apdex Score This alert is triggered when the Apdex score is below 0.5 for 5 minutes Alert High CPU Utilization This alert is triggered when the CPU Utilization is above 90%. Alert Transaction Errors This alert is triggered when the the transactions fail more than 10" + }, + "id": "61566c4d64441fceac099619" } ], "/jinja2/09e0582c-dd93-4bf9-9b5c-d4faa4aa2c28": [ @@ -71314,7 +71254,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.33618, + "_score": 81.56581, "_version": null, "_explanation": null, "sort": null, @@ -71356,7 +71296,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.276306, + "_score": 81.50674, "_version": null, "_explanation": null, "sort": null, @@ -71398,7 +71338,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -71440,7 +71380,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -71482,7 +71422,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -71527,7 +71467,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -71568,7 +71508,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -71610,7 +71550,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -71652,7 +71592,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -71694,7 +71634,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -71732,7 +71672,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 194.57608, + "_score": 193.384, "_version": null, "_explanation": null, "sort": null, @@ -71797,7 +71737,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 118.09776, + "_score": 111.34474, "_version": null, "_explanation": null, "sort": null, @@ -71826,7 +71766,7 @@ "external_id": "1f13326e09d6da78f08f645bc069c22342fbac6c", "image": "", "url": "https://docs.newrelic.com/docs/infrastructure/install-infrastructure-agent/config-management-tools/configure-infrastructure-agent-using-ansible/", - "published_at": "2022-02-14T09:31:37Z", + "published_at": "2022-02-16T01:42:02Z", "updated_at": "2022-02-04T11:22:48Z", "document_type": "page", "popularity": 1, @@ -71834,7 +71774,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 109.761856, + "_score": 107.75279, "_version": null, "_explanation": null, "sort": null, @@ -71873,7 +71813,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 106.56532, + "_score": 99.14227, "_version": null, "_explanation": null, "sort": null, @@ -71885,44 +71825,38 @@ }, { "sections": [ - "Amazon RDS Enhanced Monitoring integration", - "Important", - "Features", - "Enable enhanced monitoring", - "Stream logs to Lambda function", - "Configuration and polling", - "Find and use data", - "Metric data", - "Metric data for all DB engines (except MS SQL Server) [#DB metrics]", - "Metric data for MS SQL", - "Definitions" + "OS versions page", + "Viewing the OS versions page", + "Viewing drill-down details" ], - "title": "Amazon RDS Enhanced Monitoring integration", + "title": "OS versions page", "type": "docs", "tags": [ - "AWS integrations list", - "Amazon integrations", - "Integrations" + "Mobile app pages", + "Mobile monitoring UI", + "Mobile monitoring" ], - "external_id": "b8fc293ef2181c19a5e816449b9a320e44e13ab3", - "image": "", - "url": "https://docs.newrelic.com/docs/infrastructure/amazon-integrations/aws-integrations-list/aws-rds-enhanced-monitoring-integration/", - "published_at": "2022-02-15T20:48:24Z", - "updated_at": "2022-02-15T20:48:24Z", + "external_id": "370b6f1584d001a17f414066097692b9189e1a50", + "image": "https://docs.newrelic.com/static/8d84abf966c2f4b75ca298b362995c0e/c1b63/os-version-pic_0.png", + "url": "https://docs.newrelic.com/docs/mobile-monitoring/mobile-monitoring-ui/mobile-app-pages/os-versions-page/", + "published_at": "2022-02-14T10:34:40Z", + "updated_at": "2021-07-09T11:46:41Z", "document_type": "page", "popularity": 1, - "body": "Important Enable the AWS CloudWatch Metric Streams integration to monitor all CloudWatch metrics from your AWS services, including custom namespaces. Individual integrations are no longer our recommended option. New Relic infrastructure integrations include an integration for collecting enhanced Amazon RDS data; this supplements the basic New Relic RDS integration with real-time metrics about the operating system the database instance runs on. Features This New Relic integration allows you to monitor and alert on RDS Enhanced Monitoring. You can use integration data and alerts to monitor the DB processes and identify potential trouble spots as well as to profile the DB allowing you to improve and optimize their response and cost. Enable enhanced monitoring Important Enabling this integration will incur some additional charges to your Amazon CloudWatch account. In addition there are some limitations and CPU metric data collection differences that are explained in Amazon's enhanced monitoring documentation. You must first have the New Relic Amazon RDS monitoring integration enabled before enabling RDS Enhanced Monitoring. Be sure that you have completed the steps in Connect AWS services to New Relic. New Relic uses AWS Lambda in order to collect RDS logs and provide near-real time data of your RDS instances, this capability is called RDS Enhanced Monitoring in AWS. Follow these steps in order to enable RDS Enhanced Monitoring integration: Specify the RDS instances that need Enable RDS Enhanced Monitoring. You can specify this when creating or modifying the instance: under Monitoring, set Enable Enhanced Monitoring to Yes. We recommend setting the data Granularity to 15 seconds. After enhanced monitoring is enabled, a stream called RDSOSMetrics is created in AWS CloudWatch Logs. Enhanced monitoring metrics are available via this stream. Create a lambda function and subscribe it to that stream in the following steps to obtain the data. Create a new AWS Lambda function from the Serverless Repository: Go to Lambda > Create Function > Browse serverless App repository, check the box for Show apps that create custom IAM roles or resource policies, and search for NewRelic-log-ingestion. Populate the LICENSE_KEY environment variable with your New Relic account license key. Select Deploy to create a new CloudFormation stack, a new function called newrelic-log-ingestion, and the required role. Make sure that the NewRelic-log-ingestion function execution role has attached the arn:aws:iam::aws:policy/CloudWatchLogsReadOnlyAccess policy, giving it the appropriate permissions to read CloudWatch Logs. Go to the newrelic-log-ingestion function. Continue with the procedure to stream logs to the Lambda function. Stream logs to Lambda function To link the RDSOSMetrics log stream to the Lambda function (JSON format): From AWS Console > CloudWatch > Logs, select RDSOSMetrics log group, and apply Actions > Create Lambda subscription filter. For the Lambda function, select newrelic-log-ingestion. From the Log Format dropdown, select JSON as the Log format. At the bottom, click the Start streaming button to save your Lambda subscription filter. Once completed, the Lambda function will send all the log lines from RDSOSMetrics to New Relic's ingest services. Configuration and polling You can change the polling frequency and filter data using configuration options. Default polling information for the Amazon RDS Enhanced Monitoring integration: New Relic polling interval: 30 seconds on average (collected via CloudWatch Logs) Configurable when setting up AWS Lambda Amazon CloudWatch data interval: 1 minute Find and use data To find your integration data, go to one.newrelic.com > Infrastructure > AWS and select the RDS > Enhanced monitoring dashboard link. You can query and explore your data using the DatastoreSample event type, with a provider value of RdsDbInstance . For more on how to use your data, see Understand and use integration data. Metric data New Relic collects the following enhanced RDS data: Metric data for all DB engines (except MS SQL Server) [#DB metrics] Group Metrics Description General engine The database engine for the DB instance. instanceId The DB instance identifier. instanceResourceId A region-unique, immutable identifier for the DB instance, also used as the log stream identifier. numVCpus The number of virtual CPUs for the DB instance. timestamp The time at which the metrics were taken. uptime The amount of time that the DB instance has been active. version The version of the OS metrics' stream JSON format. cpuUtilization guest The percentage of CPU in use by guest programs. idle The percentage of CPU that is idle. irq The percentage of CPU in use by software interrupts. nice The percentage of CPU in use by programs running at lowest priority. steal The percentage of CPU in use by other virtual machines. system The percentage of CPU in use by the kernel. total The total percentage of the CPU in use. This value excludes the nice value. user The percentage of CPU in use by user programs. wait The percentage of CPU unused while waiting for I/O access. diskIO (not available for Amazon Aurora) avgQueueLen The number of requests waiting in the I/O device's queue. avgReqSz The average request size, in kilobytes. await The number of milliseconds required to respond to requests, including queue time and service time. device The identifier of the disk device in use. readIOsPS The number of read operations per second. readKb The total number of kilobytes read. readKbPS The number of kilobytes read per second. rrqmPS The number of merged read requests queued per second. tps The number of I/O transactions per second. util The percentage of CPU time during which requests were issued. writeIOsPS The number of write operations per second. writeKb The total number of kilobytes written. writeKbPS The number of kilobytes written per second. wrqmPS The number of merged write requests queued per second. fileSys maxFiles The maximum number of files that can be created for the file system. total The total number of disk space available for the file system, in kilobytes. used The amount of disk space used by files in the file system, in kilobytes. usedFilePercent The percentage of available files in use. usedFiles The number of files in the file system. usedPercent The percentage of the file-system disk space in use. loadAverageMinute fifteen The number of processes requesting CPU time over the last 15 minutes. five The number of processes requesting CPU time over the last 5 minutes. one The number of processes requesting CPU time over the last minute. memory active The amount of assigned memory, in kilobytes. buffers The amount of memory used for buffering I/O requests prior to writing to the storage device, in kilobytes. cached The amount of memory used for caching file system–based I/O. dirty The amount of memory pages in RAM that have been modified but not written to their related data block in storage, in kilobytes. free The amount of unassigned memory, in kilobytes. hugePagesFree The number of free huge pages. Huge pages are a feature of the Linux kernel. hugePagesRsvd The number of committed huge pages. hugePagesSize The size for each huge pages unit, in kilobytes. hugePagesSurp The number of available surplus huge pages over the total. hugePagesTotal The total number of huge pages for the system. inactive The amount of least-frequently used memory pages, in kilobytes. mapped The total amount of file-system contents that is memory mapped inside a process address space, in kilobytes. pageTables The amount of memory used by page tables, in kilobytes. slab The amount of reusable kernel data structures, in kilobytes. total The total amount of memory, in kilobytes. writeback The amount ofn kilobytes. network rx The number of bytes received per second. tx The number of bytes uploaded per second. process cpuUsedPc The percentage of CPU used by the process. rss The amount of RAM allocated to the process, in kilobytes. memoryUsedPc The amount of memory used by the process, in kilobytes. processName The name of the process. swap cached The amount of swap memory, in kilobytes, used as cache memory. free The total amount of swap memory free, in kilobytes. total The total amount of swap memory available, in kilobytes. tasks blocked The number of tasks that are blocked. running The number of tasks that are running. sleeping The number of tasks that are sleeping. stopped The number of tasks that are stopped. total The total number of tasks. zombie The number of child tasks that are inactive with an active parent task. Metric data for MS SQL Group Metrics Description disks totalKb The total space of the disk, in kilobytes. usedKb The amount of space used on the disk, in kilobytes. usedPc The percentage of space used on the disk. availKb The space available on the disk, in kilobytes. availPc The percentage of space available on the disk. rdCountPS The number of read operations per second rdBytesPS The number of bytes read per second. wrCountPS The number of write operations per second. wBytesPS The amount of bytes written per second. memory commitToKb The amount of pagefile-backed virtual address space in use, that is, the current commit charge. This value is composed of main memory (RAM) and disk (pagefiles). commitLimitKb The maximum possible value for the commitTotKb metric. This value is the sum of the current pagefile size plus the physical memory available for pageable contents–excluding RAM that is assigned to non-pageable areas. commitPeakKb The largest value of the commitTotKb metric since the operating system was last started. kernTotKb The sum of the memory in the paged and non-paged kernel pools, in kilobytes. kernPagedKb The amount of memory in the paged kernel pool, in kilobytes. kernNonpagedKb The amount of memory in the non-paged kernel pool, in kilobytes. pageSize The size of a page, in bytes. physTotKb The amount of physical memory, in kilobytes. physAvailKb The amount of available physical memory, in kilobytes. sqlServerTotKb The amount of memory committed to Microsoft SQL Server, in kilobytes. sysCacheKb The amount of system cache memory, in kilobytes. network rdBytesPS The number of bytes received per second. wrBytesPS The number of bytes sent per second. process cpuUsedPc The percentage of CPU used by the process. memUsedPc The amount of memory used by the process, in kilobytes. processName The name of the process. workingSetKb The amount of memory in the private working set plus the amount of memory that is in use by the process and can be shared with other processes, in kilobytes. workingSetPrivKb The amount of memory that is in use by a process, but can't be shared with other processes, in kilobytes. workingSetShareableKb The amount of memory that is in use by a process and can be shared with other processes, in kilobytes. virtKb The amount of virtual address space the process is using, in kilobytes. Use of virtual address space does not necessarily imply corresponding use of either disk or main memory pages. system handles The number of handles that the system is using. processes The number of processes running on the system. threads The number of threads running on the system. Definitions Term Description Event type DataStoreSample Provider RdsDbInstance Processes Enhanced Monitoring allows you to monitor the following processes associated with your RDS instances. : RDS Process: Shows a summary of the resources used by the RDS management agent, diagnostics monitoring processes, and other AWS processes that are required to support RDS DB instances. RDS Child Process: Nested under RDS Processes, shows a summary of the RDS processes that support the DB instance, for example aurora for Amazon Aurora DB clusters and mysqld for MySQL DB instances. OS Processes: Shows a summary of the kernel and system processes, which generally have minimal impact on performance.", + "body": "The OS versions page for mobile monitoring provides performance details about the top operating system versions hosting your mobile application, such as iOS and Android. Charts compare the OS versions by: HTTP request time Network failures Requests per minute Active devices From here you can drill down into details by a major or minor OS version (for example, iOS 8, iOS 7.1.1, Android 4.2.2). Viewing the OS versions page one.newrelic.com > Mobile > (select an app) > App > OS versions: Use this page to view, sort, or drill down into detailed information about the top five types of operation system versions using your mobile app. To view performance details about the operating system versions for your mobile app users: Go to one.newrelic.com > Mobile > (select an app) > App > OS versions. To select the mobile app versions or time period, use the Versions menu and time picker below the UI menu bar. Optional: Select the Sort by and Hide < 1% throughput options. To expand or collapse the list of operating systems to include versions, select the operating system's name (for example, iOS 7). Viewing drill-down details To drill down into detailed information, use any of our standard user interface functions and page functions to drill down into detailed information. In addition: To view details for the minor and point releases of a major OS version (including interaction time, HTTP request times, network failures, active devices, and slowest traces or all subversions), select a major OS version from the list. To view details for a specific OS version, select its name from the expanded OS list. To view trace details a slow transaction (if available), select its link. For more information, see Interactions page. To return to the main OS versions page, select the Close (X) button.", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 97.135895, + "_score": 95.48022, "_version": null, "_explanation": null, "sort": null, "highlight": { - "body": " Amazon RDS data; this supplements the basic New Relic RDS integration with real-time metrics about the operating system the database instance runs on. Features This New Relic integration allows you to monitor and alert on RDS Enhanced Monitoring. You can use integration data and alerts to monitor" + "title": "OS versions page", + "sections": "OS versions page", + "body": "The OS versions page for mobile monitoring provides performance details about the top operating system versions hosting your mobile application, such as iOS and Android. Charts compare the OS versions by: HTTP request time Network failures Requests per minute Active devices From here you can drill" }, - "id": "617d6d5d64441fb952fbcb5d" + "id": "603eaee9e7b9d260112a0809" } ], "/statsd/293c4da1-11cd-4942-8e40-8b6e85663003": [ @@ -71950,7 +71884,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 125.995674, + "_score": 126.224014, "_version": null, "_explanation": null, "sort": null, @@ -71996,7 +71930,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 85.161545, + "_score": 80.60472, "_version": null, "_explanation": null, "sort": null, @@ -72038,7 +71972,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 71.070465, + "_score": 68.373886, "_version": null, "_explanation": null, "sort": null, @@ -72095,7 +72029,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 60.366703, + "_score": 59.388115, "_version": null, "_explanation": null, "sort": null, @@ -72144,7 +72078,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 59.39953, + "_score": 56.21028, "_version": null, "_explanation": null, "sort": null, @@ -72188,7 +72122,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -72229,7 +72163,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -72271,7 +72205,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -72313,7 +72247,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -72355,7 +72289,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -72393,7 +72327,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 194.57608, + "_score": 193.384, "_version": null, "_explanation": null, "sort": null, @@ -72458,7 +72392,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 118.09767, + "_score": 111.34474, "_version": null, "_explanation": null, "sort": null, @@ -72487,7 +72421,7 @@ "external_id": "1f13326e09d6da78f08f645bc069c22342fbac6c", "image": "", "url": "https://docs.newrelic.com/docs/infrastructure/install-infrastructure-agent/config-management-tools/configure-infrastructure-agent-using-ansible/", - "published_at": "2022-02-14T09:31:37Z", + "published_at": "2022-02-16T01:42:02Z", "updated_at": "2022-02-04T11:22:48Z", "document_type": "page", "popularity": 1, @@ -72495,7 +72429,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 109.761826, + "_score": 107.75279, "_version": null, "_explanation": null, "sort": null, @@ -72534,7 +72468,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 106.56522, + "_score": 99.14227, "_version": null, "_explanation": null, "sort": null, @@ -72546,44 +72480,38 @@ }, { "sections": [ - "Amazon RDS Enhanced Monitoring integration", - "Important", - "Features", - "Enable enhanced monitoring", - "Stream logs to Lambda function", - "Configuration and polling", - "Find and use data", - "Metric data", - "Metric data for all DB engines (except MS SQL Server) [#DB metrics]", - "Metric data for MS SQL", - "Definitions" + "OS versions page", + "Viewing the OS versions page", + "Viewing drill-down details" ], - "title": "Amazon RDS Enhanced Monitoring integration", + "title": "OS versions page", "type": "docs", "tags": [ - "AWS integrations list", - "Amazon integrations", - "Integrations" + "Mobile app pages", + "Mobile monitoring UI", + "Mobile monitoring" ], - "external_id": "b8fc293ef2181c19a5e816449b9a320e44e13ab3", - "image": "", - "url": "https://docs.newrelic.com/docs/infrastructure/amazon-integrations/aws-integrations-list/aws-rds-enhanced-monitoring-integration/", - "published_at": "2022-02-15T20:48:24Z", - "updated_at": "2022-02-15T20:48:24Z", + "external_id": "370b6f1584d001a17f414066097692b9189e1a50", + "image": "https://docs.newrelic.com/static/8d84abf966c2f4b75ca298b362995c0e/c1b63/os-version-pic_0.png", + "url": "https://docs.newrelic.com/docs/mobile-monitoring/mobile-monitoring-ui/mobile-app-pages/os-versions-page/", + "published_at": "2022-02-14T10:34:40Z", + "updated_at": "2021-07-09T11:46:41Z", "document_type": "page", "popularity": 1, - "body": "Important Enable the AWS CloudWatch Metric Streams integration to monitor all CloudWatch metrics from your AWS services, including custom namespaces. Individual integrations are no longer our recommended option. New Relic infrastructure integrations include an integration for collecting enhanced Amazon RDS data; this supplements the basic New Relic RDS integration with real-time metrics about the operating system the database instance runs on. Features This New Relic integration allows you to monitor and alert on RDS Enhanced Monitoring. You can use integration data and alerts to monitor the DB processes and identify potential trouble spots as well as to profile the DB allowing you to improve and optimize their response and cost. Enable enhanced monitoring Important Enabling this integration will incur some additional charges to your Amazon CloudWatch account. In addition there are some limitations and CPU metric data collection differences that are explained in Amazon's enhanced monitoring documentation. You must first have the New Relic Amazon RDS monitoring integration enabled before enabling RDS Enhanced Monitoring. Be sure that you have completed the steps in Connect AWS services to New Relic. New Relic uses AWS Lambda in order to collect RDS logs and provide near-real time data of your RDS instances, this capability is called RDS Enhanced Monitoring in AWS. Follow these steps in order to enable RDS Enhanced Monitoring integration: Specify the RDS instances that need Enable RDS Enhanced Monitoring. You can specify this when creating or modifying the instance: under Monitoring, set Enable Enhanced Monitoring to Yes. We recommend setting the data Granularity to 15 seconds. After enhanced monitoring is enabled, a stream called RDSOSMetrics is created in AWS CloudWatch Logs. Enhanced monitoring metrics are available via this stream. Create a lambda function and subscribe it to that stream in the following steps to obtain the data. Create a new AWS Lambda function from the Serverless Repository: Go to Lambda > Create Function > Browse serverless App repository, check the box for Show apps that create custom IAM roles or resource policies, and search for NewRelic-log-ingestion. Populate the LICENSE_KEY environment variable with your New Relic account license key. Select Deploy to create a new CloudFormation stack, a new function called newrelic-log-ingestion, and the required role. Make sure that the NewRelic-log-ingestion function execution role has attached the arn:aws:iam::aws:policy/CloudWatchLogsReadOnlyAccess policy, giving it the appropriate permissions to read CloudWatch Logs. Go to the newrelic-log-ingestion function. Continue with the procedure to stream logs to the Lambda function. Stream logs to Lambda function To link the RDSOSMetrics log stream to the Lambda function (JSON format): From AWS Console > CloudWatch > Logs, select RDSOSMetrics log group, and apply Actions > Create Lambda subscription filter. For the Lambda function, select newrelic-log-ingestion. From the Log Format dropdown, select JSON as the Log format. At the bottom, click the Start streaming button to save your Lambda subscription filter. Once completed, the Lambda function will send all the log lines from RDSOSMetrics to New Relic's ingest services. Configuration and polling You can change the polling frequency and filter data using configuration options. Default polling information for the Amazon RDS Enhanced Monitoring integration: New Relic polling interval: 30 seconds on average (collected via CloudWatch Logs) Configurable when setting up AWS Lambda Amazon CloudWatch data interval: 1 minute Find and use data To find your integration data, go to one.newrelic.com > Infrastructure > AWS and select the RDS > Enhanced monitoring dashboard link. You can query and explore your data using the DatastoreSample event type, with a provider value of RdsDbInstance . For more on how to use your data, see Understand and use integration data. Metric data New Relic collects the following enhanced RDS data: Metric data for all DB engines (except MS SQL Server) [#DB metrics] Group Metrics Description General engine The database engine for the DB instance. instanceId The DB instance identifier. instanceResourceId A region-unique, immutable identifier for the DB instance, also used as the log stream identifier. numVCpus The number of virtual CPUs for the DB instance. timestamp The time at which the metrics were taken. uptime The amount of time that the DB instance has been active. version The version of the OS metrics' stream JSON format. cpuUtilization guest The percentage of CPU in use by guest programs. idle The percentage of CPU that is idle. irq The percentage of CPU in use by software interrupts. nice The percentage of CPU in use by programs running at lowest priority. steal The percentage of CPU in use by other virtual machines. system The percentage of CPU in use by the kernel. total The total percentage of the CPU in use. This value excludes the nice value. user The percentage of CPU in use by user programs. wait The percentage of CPU unused while waiting for I/O access. diskIO (not available for Amazon Aurora) avgQueueLen The number of requests waiting in the I/O device's queue. avgReqSz The average request size, in kilobytes. await The number of milliseconds required to respond to requests, including queue time and service time. device The identifier of the disk device in use. readIOsPS The number of read operations per second. readKb The total number of kilobytes read. readKbPS The number of kilobytes read per second. rrqmPS The number of merged read requests queued per second. tps The number of I/O transactions per second. util The percentage of CPU time during which requests were issued. writeIOsPS The number of write operations per second. writeKb The total number of kilobytes written. writeKbPS The number of kilobytes written per second. wrqmPS The number of merged write requests queued per second. fileSys maxFiles The maximum number of files that can be created for the file system. total The total number of disk space available for the file system, in kilobytes. used The amount of disk space used by files in the file system, in kilobytes. usedFilePercent The percentage of available files in use. usedFiles The number of files in the file system. usedPercent The percentage of the file-system disk space in use. loadAverageMinute fifteen The number of processes requesting CPU time over the last 15 minutes. five The number of processes requesting CPU time over the last 5 minutes. one The number of processes requesting CPU time over the last minute. memory active The amount of assigned memory, in kilobytes. buffers The amount of memory used for buffering I/O requests prior to writing to the storage device, in kilobytes. cached The amount of memory used for caching file system–based I/O. dirty The amount of memory pages in RAM that have been modified but not written to their related data block in storage, in kilobytes. free The amount of unassigned memory, in kilobytes. hugePagesFree The number of free huge pages. Huge pages are a feature of the Linux kernel. hugePagesRsvd The number of committed huge pages. hugePagesSize The size for each huge pages unit, in kilobytes. hugePagesSurp The number of available surplus huge pages over the total. hugePagesTotal The total number of huge pages for the system. inactive The amount of least-frequently used memory pages, in kilobytes. mapped The total amount of file-system contents that is memory mapped inside a process address space, in kilobytes. pageTables The amount of memory used by page tables, in kilobytes. slab The amount of reusable kernel data structures, in kilobytes. total The total amount of memory, in kilobytes. writeback The amount ofn kilobytes. network rx The number of bytes received per second. tx The number of bytes uploaded per second. process cpuUsedPc The percentage of CPU used by the process. rss The amount of RAM allocated to the process, in kilobytes. memoryUsedPc The amount of memory used by the process, in kilobytes. processName The name of the process. swap cached The amount of swap memory, in kilobytes, used as cache memory. free The total amount of swap memory free, in kilobytes. total The total amount of swap memory available, in kilobytes. tasks blocked The number of tasks that are blocked. running The number of tasks that are running. sleeping The number of tasks that are sleeping. stopped The number of tasks that are stopped. total The total number of tasks. zombie The number of child tasks that are inactive with an active parent task. Metric data for MS SQL Group Metrics Description disks totalKb The total space of the disk, in kilobytes. usedKb The amount of space used on the disk, in kilobytes. usedPc The percentage of space used on the disk. availKb The space available on the disk, in kilobytes. availPc The percentage of space available on the disk. rdCountPS The number of read operations per second rdBytesPS The number of bytes read per second. wrCountPS The number of write operations per second. wBytesPS The amount of bytes written per second. memory commitToKb The amount of pagefile-backed virtual address space in use, that is, the current commit charge. This value is composed of main memory (RAM) and disk (pagefiles). commitLimitKb The maximum possible value for the commitTotKb metric. This value is the sum of the current pagefile size plus the physical memory available for pageable contents–excluding RAM that is assigned to non-pageable areas. commitPeakKb The largest value of the commitTotKb metric since the operating system was last started. kernTotKb The sum of the memory in the paged and non-paged kernel pools, in kilobytes. kernPagedKb The amount of memory in the paged kernel pool, in kilobytes. kernNonpagedKb The amount of memory in the non-paged kernel pool, in kilobytes. pageSize The size of a page, in bytes. physTotKb The amount of physical memory, in kilobytes. physAvailKb The amount of available physical memory, in kilobytes. sqlServerTotKb The amount of memory committed to Microsoft SQL Server, in kilobytes. sysCacheKb The amount of system cache memory, in kilobytes. network rdBytesPS The number of bytes received per second. wrBytesPS The number of bytes sent per second. process cpuUsedPc The percentage of CPU used by the process. memUsedPc The amount of memory used by the process, in kilobytes. processName The name of the process. workingSetKb The amount of memory in the private working set plus the amount of memory that is in use by the process and can be shared with other processes, in kilobytes. workingSetPrivKb The amount of memory that is in use by a process, but can't be shared with other processes, in kilobytes. workingSetShareableKb The amount of memory that is in use by a process and can be shared with other processes, in kilobytes. virtKb The amount of virtual address space the process is using, in kilobytes. Use of virtual address space does not necessarily imply corresponding use of either disk or main memory pages. system handles The number of handles that the system is using. processes The number of processes running on the system. threads The number of threads running on the system. Definitions Term Description Event type DataStoreSample Provider RdsDbInstance Processes Enhanced Monitoring allows you to monitor the following processes associated with your RDS instances. : RDS Process: Shows a summary of the resources used by the RDS management agent, diagnostics monitoring processes, and other AWS processes that are required to support RDS DB instances. RDS Child Process: Nested under RDS Processes, shows a summary of the RDS processes that support the DB instance, for example aurora for Amazon Aurora DB clusters and mysqld for MySQL DB instances. OS Processes: Shows a summary of the kernel and system processes, which generally have minimal impact on performance.", + "body": "The OS versions page for mobile monitoring provides performance details about the top operating system versions hosting your mobile application, such as iOS and Android. Charts compare the OS versions by: HTTP request time Network failures Requests per minute Active devices From here you can drill down into details by a major or minor OS version (for example, iOS 8, iOS 7.1.1, Android 4.2.2). Viewing the OS versions page one.newrelic.com > Mobile > (select an app) > App > OS versions: Use this page to view, sort, or drill down into detailed information about the top five types of operation system versions using your mobile app. To view performance details about the operating system versions for your mobile app users: Go to one.newrelic.com > Mobile > (select an app) > App > OS versions. To select the mobile app versions or time period, use the Versions menu and time picker below the UI menu bar. Optional: Select the Sort by and Hide < 1% throughput options. To expand or collapse the list of operating systems to include versions, select the operating system's name (for example, iOS 7). Viewing drill-down details To drill down into detailed information, use any of our standard user interface functions and page functions to drill down into detailed information. In addition: To view details for the minor and point releases of a major OS version (including interaction time, HTTP request times, network failures, active devices, and slowest traces or all subversions), select a major OS version from the list. To view details for a specific OS version, select its name from the expanded OS list. To view trace details a slow transaction (if available), select its link. For more information, see Interactions page. To return to the main OS versions page, select the Close (X) button.", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 97.1358, + "_score": 95.48022, "_version": null, "_explanation": null, "sort": null, "highlight": { - "body": " Amazon RDS data; this supplements the basic New Relic RDS integration with real-time metrics about the operating system the database instance runs on. Features This New Relic integration allows you to monitor and alert on RDS Enhanced Monitoring. You can use integration data and alerts to monitor" + "title": "OS versions page", + "sections": "OS versions page", + "body": "The OS versions page for mobile monitoring provides performance details about the top operating system versions hosting your mobile application, such as iOS and Android. Charts compare the OS versions by: HTTP request time Network failures Requests per minute Active devices From here you can drill" }, - "id": "617d6d5d64441fb952fbcb5d" + "id": "603eaee9e7b9d260112a0809" } ], "/curb/dbe3684e-d688-4d2d-9279-f900c2f76c36": [ @@ -72619,7 +72547,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -72660,7 +72588,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -72702,7 +72630,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -72744,7 +72672,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -72786,7 +72714,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -72824,7 +72752,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 268.07825, + "_score": 250.10059, "_version": null, "_explanation": null, "sort": null, @@ -72880,7 +72808,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.87093, + "_score": 165.793, "_version": null, "_explanation": null, "sort": null, @@ -72891,69 +72819,6 @@ }, "id": "60452628196a679380960f32" }, - { - "sections": [ - "Quality foundation implementation guide", - "Overview", - "Desired Outcome", - "Key Performance Indicators", - "Availability", - "Largest contentful paint (LCP)", - "First input delay (FID)", - "Cumulative layout shift (CLS)", - "Time to first byte (TTFB)", - "Ajax response times", - "HTTP error rate", - "JavaScript error rate", - "Prerequisites", - "Required knowledge", - "Required Installation and Configuration", - "Establish current state", - "Review instrumented pages", - "Validate Browser URL grouping", - "Understand how you will segment your data", - "Import the quality foundation dashboard", - "Capture current performance for each dashboard page", - "Improvement Process", - "Plan your work", - "Decide which KPIs to improve", - "Improve targeted KPIs", - "Improve page load performance", - "Improve AJAX response times", - "Improve the AJAX error rate", - "Improve JavaScript errors", - "Conclusion" - ], - "title": "Quality foundation implementation guide", - "type": "docs", - "tags": [ - "Quality Foundation", - "Implementation guide", - "Digital customer experience", - "Customer experience", - "Observability maturity" - ], - "external_id": "91186ed56e33e040c73d1fff940cec0644c199f6", - "image": "https://docs.newrelic.com/static/9238160720501f4423dff703746fb59d/d9199/cx-what-you-can-measure-nr.png", - "url": "https://docs.newrelic.com/docs/new-relic-solutions/observability-maturity/customer-experience/quality-foundation-implementation-guide/", - "published_at": "2022-02-15T12:55:51Z", - "updated_at": "2022-02-15T12:55:51Z", - "document_type": "page", - "popularity": 1, - "body": "Overview Digital customer experience is your end user’s experience across all your digital touch points. There are four core factors that impact a user’s experience: Availability (Is it reachable?) Performance (Does it perform well enough to be usable?) Content quality (Does it have what users need and can they find it?) Product and content relevance (Does it have what users care about?) Digital customer experience includes web, mobile, and IoT. The first version of this guide is focused on measuring the end user web experience. Quality Foundation is about creating a standard practice to help you understand your digital customer experience in a meaningful way. This implementation guide will help you: Look at customer experience in relation to: Global functions, such as search and login Lines of business Regions Report back to business stakeholders on what they care about Prioritize what you work on Create a repeatable practice Desired Outcome Improve customer engagement and retention by measuring and improving performance in a way that better aligns to the end user experience. Key Performance Indicators Quality Foundation measures the following KPIs: Availability This KPI measures whether or not your application or its pages can be accessed by your users Goal: Improve uptime and availablity Thresholds: < 99% warning < 95% critical 99% or \"2 9's\" is a good minimum standard of availability, even for employee applications or sub-pages. We configure these default thresholds into the dashboards. You can easily change this to better suit expectations for your application. Largest contentful paint (LCP) Part of Core Web Vitals. Largest Contentful Paint (LCP) measures the time it takes to load the largest image after a user has navigated to a new page. Goal: Reduce LCP to 2.5 seconds or better for the 75% percentile for all pages or at least the most critical pages. Thresholds: Warning: > 2.5 seconds Critical: > 4.0 seconds LCP thresholds are defined by the team at Google. The thresholds and the supporting logic behind them can be found here. First input delay (FID) Part of Core Web Vitals. Measures the interactivity of a page by tracking the time between user interaction (such as clicking a link or entering text) when the browser begins processing the event. Goal: Reduce FID to 100 milliseconds or better for the 75% percentile for all pages or at least the most critical pages. Thresholds: Warning: > 100 milliseconds Critical: > 300 milliseconds FID thresholds are defined by the team at Google. The thresholds and the supporting logic behind them can be found here. Cumulative layout shift (CLS) Part of Core Web Vitals. Measures how much the page layout shifts during render. Goal: Maintain a score of 0.1 or less for the 75% percentile for all pages or at least the most critical pages. Thresholds: Warning: > 0.1 score Critical: > 0.25 score CLS thresholds are defined by the team at Google. The thresholds and the supporting logic behind them can be found here. Time to first byte (TTFB) This KPI measures the time from navigation start (a user clicking a link) to the browser receiving the first byte of the response from the server. Google considers TTFB secondary to Core Web Vitals. We recommend measuring it for a more complete picture. It can be revealing if you see a change in LCP, because it answers the question as to whether the change occurred server side or client side. Goal: Reduce the time to first byte by improving CDN, network, and service performance. Thresholds: Warning > 0.5 seconds Critical > 1.0 seconds According to Google and Search Engine People, 500 milliseconds is a decent TTFB for pages with dynamic content. You can find mention of these recommendations here. Ajax response times Slow ajax calls can make the user feel as though nothing is happening or the page is broken. If the response time is slow enough, users may even abandon the journey. Goal: Measure and improve ajax response times. Thresholds: Warning > 2 seconds Critical > 2.5 seconds These thresholds come from experience with customers across a variety of industries. HTTP error rate HTTP errors (or HTTP 4xx and 5xx responses) happen when calls to the backend are not successful. Goal: Measure and reduce the HTTP error rate to ensure your customers are able to do what they came to your site to do. Thresholds: Warning < 99% of requests are successful Critical < 97% of requests are successful These thresholds come from experience with customers across a variety of industries. We made the assumption that every ajax request is associated with something the user is trying to achieve and treat it accordingly. Because users will often retry failed actions, we allowed for space between warning and critical thresholds. If the ajax requests being measured are an important part of the user journey, we recommended aiming for higher success rates, such as 99.5% or 99.9%. If the ajax requests are tied to login requests, separate 4xx response codes from 5xx response codes and set a much lower threshold for the 4xx responses. You can look to historical response code rates to determine a reasonable threshold. JavaScript error rate This KPI measures the number of JavaScript errors per page view. Goal: Remove irrelevant JavaScript errors being tracked either by tuning ingest or using filtering. Reduce JavaScript errors that impact customer performance. Thresholds: Warning: > 5% errors per page view Critical: > 10% errors per page view These thresholds come from experience with customers across a variety of industries. For each KPI, we defined thresholds - one for warning, another for critical. You might ask where these values come from or how you can be sure they should apply to your application. Our thresholds are the ones recommended by Google (as with Core Web Vitals) or by us, based on our experience across a large number of customers and applications. If you feel strongly that they should be different, you can adjust them, but you should do this at the organizational level rather than on an application by application basis. Quality Foundation helps you identify where in your application you need to make improvements that will optimize user retention, conversion and satisfaction. It is less about where things are and more about where to get to. It also shows you what you should be measuring going forward. You can use this to define service level objectives (SLOs) (in a service level dashboard) and alert on them. Prerequisites Required knowledge Familiarity with synthetic monitoring Familiarity with browser monitoring Familiarity with basic Browser UI views Familiarity with SPA data in Browser UI Required Installation and Configuration Browser Pro installed in all pages SPA enabled for single page applications Synthetics monitors configured: Ping monitors configured for anonymous users Scripted synthetics check configured for login flow Monitors should be configured to test from all regions applicable to your users Monitors should be configured for each domain and each login flow Data retention for browser events greater than or equal to 2x an average sprint Establish current state Review instrumented pages Validate Browser URL grouping Understand how you will segment your data Import the quality foundation dashboard Capture current performance for each dashboard page Review instrumented pages Review Browser apps and pages to make sure that everything you expect to report back to New Relic is. You can do this by reviewing the Page Views tab in the Browser UI or running the following query: SELECT uniques(pageUrl) from PageView LIMIT MAX Copy You may need to filter out URLs that contain request or customer ID. Validate Browser URL grouping Ensure Browser segments are captured correctly so user experience performance is measurable in both the NewRelic UI as well as at the aggregate level when querying via NRQL. A segment is the text between two / in a URL or between . of a domain name. For example, in the URL website.com/product/widget-name, the segments are: website .com product widget-name When there are a lot of URLs with a lot of segments, URLs can get crushed, so that website.com/product/widget-name becomes website.com/ or website.com/product/. In this example, the first crushed URL is not particularly useful, but the second one may be a useful way of aggregating customer experience data for the product. Not sure whether you need to tune your configuration? Import the Segment Allow List Investigation dashboard in GitHub to help. Once you’ve identified which segments to add, you can add them using Segment allow lists in Browser. Understand how you will segment your data Make Customer Experience data understandable and actionable by breaking it out into different segments. In this case, segments refer to groups of data. It does not refer to sections of URLs, as in segment allow lists. Consider the following statements: Most of our users experience 3 seconds or better to first input delay. On average, we see 2 seconds to the largest contentful paint. Last week, there were 1 million page views. Compared to: Most of the users in the US, Canada, and EMEA experience 2 seconds or better to first input delay. Malaysia and Indonesia users experience 4 seconds; we are looking into this. Customers buying car insurance typically see 1 second to largest contentful paint. For home insurance, it’s 4 seconds. Last week, there were 700,000 page views on mobile browser apps compared to 300,000 on desktop. Let’s make sure we’re optimizing our mobile experience. Typical segmentation involves breaking down user experience into the following categories: Segment Guidance Region/Location Basic: Group by country. Browser events automatically contain the country code of requests, so there is nothing you need to do to break it out further. Advanced: Make regional grouping match regional SLO groups by creating your own region attribute using custom attributes in Browser. Facet by countryCode. Related attributes: regionCode city asnLatitude asnLongitude Device Break out performance and engagement device type so you can understand: Typical breakdown of desktop vs mobile browser users Experience of desktop vs mobile browser users Facet by deviceType. Related attributes: userAgentName userAgentOS userAgentVersion Product/Line of Business In this scenario, a product is a separate line of business or service provided by your organization. Some examples of industries and respective products: An insurance company that sells both car and house insurance A media company that has multiple streaming services or channels A travel company that provides car rental as well as hotel bookings Basic: Break out performance by product by: Faceting on pageUrl: Use this approach when multiple products are grouped into one browser app in New Relic. Faceting by appName: Use this approach when each product is instrumented as a separate web app. Grouping by appName and then facet: Use this approach when there are multiple apps in browser supporting one product. Advanced: Add product offering as a custom attribute to browser pages using custom attributes. Environment During instrumentation or afterwards, follow a naming convention that specifies the environment in Browser. Well named browser apps specify product and/or function as well as environment. Examples: account-management.prod hotels-book.prod car-insurance.uat Using app naming conventions to specify the environment supports filtering data in both the UI and in dashboards. For more information, see the documentation for how to rename Browser apps. Team In some organizations, a single team supports multiple products, while in others, a product is big enough to be supported by multiple teams. Report on team performance against customer experience or engagement by either adding the team name to the Browser app name (for example, account-management.prod.unicorn-squad) or by using custom attributes. Import the quality foundation dashboard This step creates the dashboard that you will use to measure your customer experience and improve it. Clone the GitHub repository. Follow the GitHub repository README instructions to implement the dashboard. Make sure to align the dashboard to lines of business or customer facing offerings rather than teams. This ensures optimization time is spent where it is most impactful. Capture current performance for each dashboard page Follow the GitHub README instructions. Use the dashboard from the previous step to understand the overall performance for each line of business. If relevant, apply filters to see performance across region or device. If values drop below targets and it matters, add it to the sheet as a candidate for improvement. Not worth tracking: A company that sells insurance in the US only notices poor performance in Malaysia. Worth tracking: A company that sells insurance in the US only notices poor performance with respect to mobile users in the US. Improvement Process Plan your work Decide which KPIs to improve Improve targeted KPIs Improve page load performance Improve AJAX response times Improve the AJAX error rate Improve JavaScript errors Plan your work Whether you have a dedicated initiative to improve performance or classifying as ongoing maintenance, you need to track your progress at the end of every sprint. Decide which KPIs to improve You now know what your user experience looks like across multiple lines of business. Where should you be improving? Start with business priorities. If you have clear business directives or have access to a senior manager above who does, you should focus on what matters most to your organization. For example, let’s say your company has recently launched a new initiative around a line of business but the KPIs associated with the UI are below target. This is where you should focus time initially. Next, focus on KPIs for each line of business. Finally, filter each line of business by device, region, etc., to see if additional focus is needed for specific regions or devices. Improve targeted KPIs To track your progress, create a new dashboard or add a new page to the existing dashboard and name it Quality Foundation KPI Improvement. For more information, see Improve Web Uptime. Improve page load performance Narrow your focus to specific pages that aren’t meeting target KPI values. For each page load KPI result that is out of bounds in the Quality Foundation Dashboard, remove the COMPARE WITH clause and add FACET pageUrl/targetGroupedUrl LIMIT MAX to find which pages are the poor performers. Use targetGroupedUrl when there are many results; for example, when the customer ID is part of the URL. Otherwise, use pageUrl. Original Dashboard query: FROM PageViewTiming SELECT percentile(largestContentfulPaint, 75) WHERE appName ='WebPortal' AND pageUrl LIKE '%phone%' SINCE 1 week AGO COMPARE WITH 1 week AGO Copy New query to identify problem pages: FROM PageViewTiming SELECT percentile(largestContentfulPaint, 75) WHERE appName ='WebPortal' AND pageUrl LIKE '%phone%' FACET targetGroupedUrl LIMIT MAX Copy Once you have identified pages to improve, see the guidance in Improve page load performance. Improve AJAX response times Find the slow requests. Go to the Ajax duration widget on the dashboard. View query, then open in query builder. Add facet requestUrl LIMIT MAX to the end of the query. Run the query. View the results as a table and save to your KPI Improvement dashboard as LOB - AjaxResponseTimes. Focus improving requests with a timeToSettle > 2.5s. Use New Relic’s recommended best practices to improve response times. See AJAX troubleshooting tips. Improve the AJAX error rate Find the failing requests. Go to Dashboards > Query builder. Enter FROM AjaxRequest SELECT percentage(count(*), WHERE httpResponseCode >= 400) WHERE httpResponseCode >= 200 AND SINCE 1 week AGO facet pageUrl, appName Copy Run the query. View the results as a table and save to your KPI Improvement dashboard as LOB - Pages with AjaxErrors. Run the query again for the most problematic pages to find the requests that are failing: FROM AjaxRequest SELECT percentage(count(*), WHERE httpResponseCode >= 400) WHERE httpResponseCode >= 200 AND pageUrl= AND appName = SINCE 1 week AGO facet requestUrl Copy Use New Relic’s recommended best practices to improve response times. See AJAX troubleshooting tips. Improve JavaScript errors Find the most common failures. Go to Dashboards > Query builder Enter FROM JavaScriptError SELECT count(errorClass) SINCE 1 week AGO WHERE FACET transactionName, errorClass, errorMessage, domain Copy Run the query. View the results as a table and save to your KPI Improvement dashboard as LOB - Javascript Errors. Use this information to figure out which errors need to be addressed Use New Relic’s recommended best practices to resolve errors that need addressing. See JavaScript errors page: Detect and analyze errors. Remove third party errors that do not add value. You may be using a third party JavaScript that is noisy but works as expected. You can take a couple of approaches: Remove the domain name from the JavaScript error/Pageview ratio widget and add it as its own widget so you can see unexpected changes. You can alert on this using Baseline NRQL alerts. Drop the JavaScript error using drop filters. Only use this option if the volume of errors is impacting your data ingest in a significant way. Be as specific as you can in the drop filter. Conclusion Best practices to adopt Revisit performance metrics (shared in this document as Quality Foundation KPIs) at the end of each sprint. Incorporate performance improvements into developer sprints. Openly share metrics with the lines of the business you support as well as other internal stakeholders. Define Customer Experience SLOs. Create alerts for business critical drops in Quality Foundation KPIs. Value Realization At the end of this process you should now: Have an understanding of your end user experience in a way that is tangible, actionable, and easy for engineers as well as the business to understand. Know how releases impact your end customers. Know how your customers are impacted by service, infrastructure, or network level events. See latency issues caused by backend services if they exist. Have created, or be on the path to create, a common language with business owners so you are working together. This can open new avenues for recognition and sponsorship for new projects.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 165.16348, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "sections": "HTTP error rate", - "body": " Familiarity with basic Browser UI views Familiarity with SPA data in Browser UI Required Installation and Configuration Browser Pro installed in all pages SPA enabled for single page applications Synthetics monitors configured: Ping monitors configured for anonymous users Scripted synthetics check configured" - }, - "id": "61461531e7b9d25774b6f22d" - }, { "sections": [ "Handle sites with authentication", @@ -72986,7 +72851,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 162.2758, + "_score": 162.43059, "_version": null, "_explanation": null, "sort": null, @@ -73020,7 +72885,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 161.33522, + "_score": 160.74484, "_version": null, "_explanation": null, "sort": null, @@ -73031,6 +72896,69 @@ "body": " monitor API name: SCRIPT_API Provide a url and this monitor will test all the links on the page for success. If a failure is detected you can view the individual non-successful links that caused the failure. Certificate check monitor API name: SCRIPT_API Proactively ping your domain certificates based" }, "id": "603e873864441f3e154e888f" + }, + { + "sections": [ + "Quality foundation implementation guide", + "Overview", + "Desired Outcome", + "Key Performance Indicators", + "Availability", + "Largest contentful paint (LCP)", + "First input delay (FID)", + "Cumulative layout shift (CLS)", + "Time to first byte (TTFB)", + "Ajax response times", + "HTTP error rate", + "JavaScript error rate", + "Prerequisites", + "Required knowledge", + "Required Installation and Configuration", + "Establish current state", + "Review instrumented pages", + "Validate Browser URL grouping", + "Understand how you will segment your data", + "Import the quality foundation dashboard", + "Capture current performance for each dashboard page", + "Improvement Process", + "Plan your work", + "Decide which KPIs to improve", + "Improve targeted KPIs", + "Improve page load performance", + "Improve AJAX response times", + "Improve the AJAX error rate", + "Improve JavaScript errors", + "Conclusion" + ], + "title": "Quality foundation implementation guide", + "type": "docs", + "tags": [ + "Quality Foundation", + "Implementation guide", + "Digital customer experience", + "Customer experience", + "Observability maturity" + ], + "external_id": "91186ed56e33e040c73d1fff940cec0644c199f6", + "image": "https://docs.newrelic.com/static/9238160720501f4423dff703746fb59d/d9199/cx-what-you-can-measure-nr.png", + "url": "https://docs.newrelic.com/docs/new-relic-solutions/observability-maturity/customer-experience/quality-foundation-implementation-guide/", + "published_at": "2022-02-15T12:55:51Z", + "updated_at": "2022-02-15T12:55:51Z", + "document_type": "page", + "popularity": 1, + "body": "Overview Digital customer experience is your end user’s experience across all your digital touch points. There are four core factors that impact a user’s experience: Availability (Is it reachable?) Performance (Does it perform well enough to be usable?) Content quality (Does it have what users need and can they find it?) Product and content relevance (Does it have what users care about?) Digital customer experience includes web, mobile, and IoT. The first version of this guide is focused on measuring the end user web experience. Quality Foundation is about creating a standard practice to help you understand your digital customer experience in a meaningful way. This implementation guide will help you: Look at customer experience in relation to: Global functions, such as search and login Lines of business Regions Report back to business stakeholders on what they care about Prioritize what you work on Create a repeatable practice Desired Outcome Improve customer engagement and retention by measuring and improving performance in a way that better aligns to the end user experience. Key Performance Indicators Quality Foundation measures the following KPIs: Availability This KPI measures whether or not your application or its pages can be accessed by your users Goal: Improve uptime and availablity Thresholds: < 99% warning < 95% critical 99% or \"2 9's\" is a good minimum standard of availability, even for employee applications or sub-pages. We configure these default thresholds into the dashboards. You can easily change this to better suit expectations for your application. Largest contentful paint (LCP) Part of Core Web Vitals. Largest Contentful Paint (LCP) measures the time it takes to load the largest image after a user has navigated to a new page. Goal: Reduce LCP to 2.5 seconds or better for the 75% percentile for all pages or at least the most critical pages. Thresholds: Warning: > 2.5 seconds Critical: > 4.0 seconds LCP thresholds are defined by the team at Google. The thresholds and the supporting logic behind them can be found here. First input delay (FID) Part of Core Web Vitals. Measures the interactivity of a page by tracking the time between user interaction (such as clicking a link or entering text) when the browser begins processing the event. Goal: Reduce FID to 100 milliseconds or better for the 75% percentile for all pages or at least the most critical pages. Thresholds: Warning: > 100 milliseconds Critical: > 300 milliseconds FID thresholds are defined by the team at Google. The thresholds and the supporting logic behind them can be found here. Cumulative layout shift (CLS) Part of Core Web Vitals. Measures how much the page layout shifts during render. Goal: Maintain a score of 0.1 or less for the 75% percentile for all pages or at least the most critical pages. Thresholds: Warning: > 0.1 score Critical: > 0.25 score CLS thresholds are defined by the team at Google. The thresholds and the supporting logic behind them can be found here. Time to first byte (TTFB) This KPI measures the time from navigation start (a user clicking a link) to the browser receiving the first byte of the response from the server. Google considers TTFB secondary to Core Web Vitals. We recommend measuring it for a more complete picture. It can be revealing if you see a change in LCP, because it answers the question as to whether the change occurred server side or client side. Goal: Reduce the time to first byte by improving CDN, network, and service performance. Thresholds: Warning > 0.5 seconds Critical > 1.0 seconds According to Google and Search Engine People, 500 milliseconds is a decent TTFB for pages with dynamic content. You can find mention of these recommendations here. Ajax response times Slow ajax calls can make the user feel as though nothing is happening or the page is broken. If the response time is slow enough, users may even abandon the journey. Goal: Measure and improve ajax response times. Thresholds: Warning > 2 seconds Critical > 2.5 seconds These thresholds come from experience with customers across a variety of industries. HTTP error rate HTTP errors (or HTTP 4xx and 5xx responses) happen when calls to the backend are not successful. Goal: Measure and reduce the HTTP error rate to ensure your customers are able to do what they came to your site to do. Thresholds: Warning < 99% of requests are successful Critical < 97% of requests are successful These thresholds come from experience with customers across a variety of industries. We made the assumption that every ajax request is associated with something the user is trying to achieve and treat it accordingly. Because users will often retry failed actions, we allowed for space between warning and critical thresholds. If the ajax requests being measured are an important part of the user journey, we recommended aiming for higher success rates, such as 99.5% or 99.9%. If the ajax requests are tied to login requests, separate 4xx response codes from 5xx response codes and set a much lower threshold for the 4xx responses. You can look to historical response code rates to determine a reasonable threshold. JavaScript error rate This KPI measures the number of JavaScript errors per page view. Goal: Remove irrelevant JavaScript errors being tracked either by tuning ingest or using filtering. Reduce JavaScript errors that impact customer performance. Thresholds: Warning: > 5% errors per page view Critical: > 10% errors per page view These thresholds come from experience with customers across a variety of industries. For each KPI, we defined thresholds - one for warning, another for critical. You might ask where these values come from or how you can be sure they should apply to your application. Our thresholds are the ones recommended by Google (as with Core Web Vitals) or by us, based on our experience across a large number of customers and applications. If you feel strongly that they should be different, you can adjust them, but you should do this at the organizational level rather than on an application by application basis. Quality Foundation helps you identify where in your application you need to make improvements that will optimize user retention, conversion and satisfaction. It is less about where things are and more about where to get to. It also shows you what you should be measuring going forward. You can use this to define service level objectives (SLOs) (in a service level dashboard) and alert on them. Prerequisites Required knowledge Familiarity with synthetic monitoring Familiarity with browser monitoring Familiarity with basic Browser UI views Familiarity with SPA data in Browser UI Required Installation and Configuration Browser Pro installed in all pages SPA enabled for single page applications Synthetics monitors configured: Ping monitors configured for anonymous users Scripted synthetics check configured for login flow Monitors should be configured to test from all regions applicable to your users Monitors should be configured for each domain and each login flow Data retention for browser events greater than or equal to 2x an average sprint Establish current state Review instrumented pages Validate Browser URL grouping Understand how you will segment your data Import the quality foundation dashboard Capture current performance for each dashboard page Review instrumented pages Review Browser apps and pages to make sure that everything you expect to report back to New Relic is. You can do this by reviewing the Page Views tab in the Browser UI or running the following query: SELECT uniques(pageUrl) from PageView LIMIT MAX Copy You may need to filter out URLs that contain request or customer ID. Validate Browser URL grouping Ensure Browser segments are captured correctly so user experience performance is measurable in both the NewRelic UI as well as at the aggregate level when querying via NRQL. A segment is the text between two / in a URL or between . of a domain name. For example, in the URL website.com/product/widget-name, the segments are: website .com product widget-name When there are a lot of URLs with a lot of segments, URLs can get crushed, so that website.com/product/widget-name becomes website.com/ or website.com/product/. In this example, the first crushed URL is not particularly useful, but the second one may be a useful way of aggregating customer experience data for the product. Not sure whether you need to tune your configuration? Import the Segment Allow List Investigation dashboard in GitHub to help. Once you’ve identified which segments to add, you can add them using Segment allow lists in Browser. Understand how you will segment your data Make Customer Experience data understandable and actionable by breaking it out into different segments. In this case, segments refer to groups of data. It does not refer to sections of URLs, as in segment allow lists. Consider the following statements: Most of our users experience 3 seconds or better to first input delay. On average, we see 2 seconds to the largest contentful paint. Last week, there were 1 million page views. Compared to: Most of the users in the US, Canada, and EMEA experience 2 seconds or better to first input delay. Malaysia and Indonesia users experience 4 seconds; we are looking into this. Customers buying car insurance typically see 1 second to largest contentful paint. For home insurance, it’s 4 seconds. Last week, there were 700,000 page views on mobile browser apps compared to 300,000 on desktop. Let’s make sure we’re optimizing our mobile experience. Typical segmentation involves breaking down user experience into the following categories: Segment Guidance Region/Location Basic: Group by country. Browser events automatically contain the country code of requests, so there is nothing you need to do to break it out further. Advanced: Make regional grouping match regional SLO groups by creating your own region attribute using custom attributes in Browser. Facet by countryCode. Related attributes: regionCode city asnLatitude asnLongitude Device Break out performance and engagement device type so you can understand: Typical breakdown of desktop vs mobile browser users Experience of desktop vs mobile browser users Facet by deviceType. Related attributes: userAgentName userAgentOS userAgentVersion Product/Line of Business In this scenario, a product is a separate line of business or service provided by your organization. Some examples of industries and respective products: An insurance company that sells both car and house insurance A media company that has multiple streaming services or channels A travel company that provides car rental as well as hotel bookings Basic: Break out performance by product by: Faceting on pageUrl: Use this approach when multiple products are grouped into one browser app in New Relic. Faceting by appName: Use this approach when each product is instrumented as a separate web app. Grouping by appName and then facet: Use this approach when there are multiple apps in browser supporting one product. Advanced: Add product offering as a custom attribute to browser pages using custom attributes. Environment During instrumentation or afterwards, follow a naming convention that specifies the environment in Browser. Well named browser apps specify product and/or function as well as environment. Examples: account-management.prod hotels-book.prod car-insurance.uat Using app naming conventions to specify the environment supports filtering data in both the UI and in dashboards. For more information, see the documentation for how to rename Browser apps. Team In some organizations, a single team supports multiple products, while in others, a product is big enough to be supported by multiple teams. Report on team performance against customer experience or engagement by either adding the team name to the Browser app name (for example, account-management.prod.unicorn-squad) or by using custom attributes. Import the quality foundation dashboard This step creates the dashboard that you will use to measure your customer experience and improve it. Clone the GitHub repository. Follow the GitHub repository README instructions to implement the dashboard. Make sure to align the dashboard to lines of business or customer facing offerings rather than teams. This ensures optimization time is spent where it is most impactful. Capture current performance for each dashboard page Follow the GitHub README instructions. Use the dashboard from the previous step to understand the overall performance for each line of business. If relevant, apply filters to see performance across region or device. If values drop below targets and it matters, add it to the sheet as a candidate for improvement. Not worth tracking: A company that sells insurance in the US only notices poor performance in Malaysia. Worth tracking: A company that sells insurance in the US only notices poor performance with respect to mobile users in the US. Improvement Process Plan your work Decide which KPIs to improve Improve targeted KPIs Improve page load performance Improve AJAX response times Improve the AJAX error rate Improve JavaScript errors Plan your work Whether you have a dedicated initiative to improve performance or classifying as ongoing maintenance, you need to track your progress at the end of every sprint. Decide which KPIs to improve You now know what your user experience looks like across multiple lines of business. Where should you be improving? Start with business priorities. If you have clear business directives or have access to a senior manager above who does, you should focus on what matters most to your organization. For example, let’s say your company has recently launched a new initiative around a line of business but the KPIs associated with the UI are below target. This is where you should focus time initially. Next, focus on KPIs for each line of business. Finally, filter each line of business by device, region, etc., to see if additional focus is needed for specific regions or devices. Improve targeted KPIs To track your progress, create a new dashboard or add a new page to the existing dashboard and name it Quality Foundation KPI Improvement. For more information, see Improve Web Uptime. Improve page load performance Narrow your focus to specific pages that aren’t meeting target KPI values. For each page load KPI result that is out of bounds in the Quality Foundation Dashboard, remove the COMPARE WITH clause and add FACET pageUrl/targetGroupedUrl LIMIT MAX to find which pages are the poor performers. Use targetGroupedUrl when there are many results; for example, when the customer ID is part of the URL. Otherwise, use pageUrl. Original Dashboard query: FROM PageViewTiming SELECT percentile(largestContentfulPaint, 75) WHERE appName ='WebPortal' AND pageUrl LIKE '%phone%' SINCE 1 week AGO COMPARE WITH 1 week AGO Copy New query to identify problem pages: FROM PageViewTiming SELECT percentile(largestContentfulPaint, 75) WHERE appName ='WebPortal' AND pageUrl LIKE '%phone%' FACET targetGroupedUrl LIMIT MAX Copy Once you have identified pages to improve, see the guidance in Improve page load performance. Improve AJAX response times Find the slow requests. Go to the Ajax duration widget on the dashboard. View query, then open in query builder. Add facet requestUrl LIMIT MAX to the end of the query. Run the query. View the results as a table and save to your KPI Improvement dashboard as LOB - AjaxResponseTimes. Focus improving requests with a timeToSettle > 2.5s. Use New Relic’s recommended best practices to improve response times. See AJAX troubleshooting tips. Improve the AJAX error rate Find the failing requests. Go to Dashboards > Query builder. Enter FROM AjaxRequest SELECT percentage(count(*), WHERE httpResponseCode >= 400) WHERE httpResponseCode >= 200 AND SINCE 1 week AGO facet pageUrl, appName Copy Run the query. View the results as a table and save to your KPI Improvement dashboard as LOB - Pages with AjaxErrors. Run the query again for the most problematic pages to find the requests that are failing: FROM AjaxRequest SELECT percentage(count(*), WHERE httpResponseCode >= 400) WHERE httpResponseCode >= 200 AND pageUrl= AND appName = SINCE 1 week AGO facet requestUrl Copy Use New Relic’s recommended best practices to improve response times. See AJAX troubleshooting tips. Improve JavaScript errors Find the most common failures. Go to Dashboards > Query builder Enter FROM JavaScriptError SELECT count(errorClass) SINCE 1 week AGO WHERE FACET transactionName, errorClass, errorMessage, domain Copy Run the query. View the results as a table and save to your KPI Improvement dashboard as LOB - Javascript Errors. Use this information to figure out which errors need to be addressed Use New Relic’s recommended best practices to resolve errors that need addressing. See JavaScript errors page: Detect and analyze errors. Remove third party errors that do not add value. You may be using a third party JavaScript that is noisy but works as expected. You can take a couple of approaches: Remove the domain name from the JavaScript error/Pageview ratio widget and add it as its own widget so you can see unexpected changes. You can alert on this using Baseline NRQL alerts. Drop the JavaScript error using drop filters. Only use this option if the volume of errors is impacting your data ingest in a significant way. Be as specific as you can in the drop filter. Conclusion Best practices to adopt Revisit performance metrics (shared in this document as Quality Foundation KPIs) at the end of each sprint. Incorporate performance improvements into developer sprints. Openly share metrics with the lines of the business you support as well as other internal stakeholders. Define Customer Experience SLOs. Create alerts for business critical drops in Quality Foundation KPIs. Value Realization At the end of this process you should now: Have an understanding of your end user experience in a way that is tangible, actionable, and easy for engineers as well as the business to understand. Know how releases impact your end customers. Know how your customers are impacted by service, infrastructure, or network level events. See latency issues caused by backend services if they exist. Have created, or be on the path to create, a common language with business owners so you are working together. This can open new avenues for recognition and sponsorship for new projects.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 154.23795, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "sections": "HTTP error rate", + "body": " Familiarity with basic Browser UI views Familiarity with SPA data in Browser UI Required Installation and Configuration Browser Pro installed in all pages SPA enabled for single page applications Synthetics monitors configured: Ping monitors configured for anonymous users Scripted synthetics check configured" + }, + "id": "61461531e7b9d25774b6f22d" } ], "/httprb/18adf8b8-f2d2-47a5-b8a2-278f526b5243": [ @@ -73066,7 +72994,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -73107,7 +73035,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -73149,7 +73077,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -73191,7 +73119,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -73233,7 +73161,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -73277,7 +73205,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -73318,7 +73246,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -73360,7 +73288,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -73402,7 +73330,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -73444,7 +73372,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -73485,7 +73413,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.6063, + "_score": 92.56492, "_version": null, "_explanation": null, "sort": null, @@ -73547,7 +73475,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 89.729675, + "_score": 88.04499, "_version": null, "_explanation": null, "sort": null, @@ -73558,41 +73486,6 @@ }, "id": "6072a66564441fb28e9d8595" }, - { - "sections": [ - "Troubleshoot enabling serverless monitoring of AWS Lambda", - "Problem", - "Solution", - "Recommended: Attach your CloudWatch logs to the ticket", - "Important" - ], - "title": "Troubleshoot enabling serverless monitoring of AWS Lambda", - "type": "docs", - "tags": [ - "Troubleshooting", - "AWS Lambda monitoring", - "Serverless function monitoring" - ], - "external_id": "73f864add78be5efb2429485506dc5a679a9820e", - "image": "", - "url": "https://docs.newrelic.com/docs/serverless-function-monitoring/aws-lambda-monitoring/troubleshooting/troubleshoot-enabling-serverless-monitoring-aws-lambda/", - "published_at": "2022-02-15T17:56:10Z", - "updated_at": "2022-02-15T17:56:09Z", - "document_type": "troubleshooting_doc", - "popularity": 1, - "body": "Problem You're attempting to enable serverless monitoring for AWS Lambda and are having an issue or error. Solution There are two common problems related to not completing all of the enablement procedures: Not seeing data on CloudWatch metrics UI page. This means the AWS integration step wasn't completed. Not seeing data on Troubleshooting category UI pages. If you aren't seeing data on the Distributed tracing, Errors, and Invocations UI tabs, this means the APM agent instrumentation step wasn't completed. Besides these basic enablement problems, there are some additional problems that may cause an issue: CloudWatch error \"HTTP error 401: unauthorized.\" This is due to an incorrect API Key. The --nr-api-keyargument in the Configure AWS enable step takes your user key, which is different from the REST API key. Custom metrics are missing. Lambda monitoring is not compatible with our custom metrics. Use custom attributes to add metadata. Invocations missing. To see invocation breakdown details, distributed tracing must be enabled as part of the Lambda instrumentation step. Distributed tracing is required so that span details can be displayed in the invocation details pane. You've completed the installation, integration, and instrumentation steps correctly, and your function is sending logs to CloudWatch but you're not seeing traces for specific dependencies (or any traces) in the UI. This may result from the order of layer merging (if you're using our Lambda layers) or from the order of import (if you're instrumenting manually): If you're instrumenting with layers: make sure in your function configuration that the New Relic layer is merged before other layers (though if your function uses webpack, the New Relic layer should be merged after the webpack layer). If you're instrumenting a Node function manually, make sure that logging is enabled, and that your function imports newrelic before it imports any dependencies you expect to monitor. If none of these solutions help you, contact our support team. The following information will help you when you talk to support technicians: Has the Lambda function appeared in the UI before? If so, what is the name of the function? If some data for the Lambda function is appearing in the UI, what specific data is appearing? What APM language agent are you using to instrument the function? Recommended: Attach your CloudWatch logs to the ticket To provide our support team with logging information when opening a ticket: Invoke the function in AWS Lambda. Click on the logs link after your function runs. This will take you to the CloudWatch logs in AWS. On the left-hand sidebar in AWS, under Logs, click on Insights. Select your function and also the newrelic-log-ingestion stream. Apply an appropriate Time Filter, and a log entry limit (the default of 20 may not be enough). Under Actions select Copy query results (ASCII). Paste the copied text into a new text file, then save and upload the text file to the ticket. Important The NR_LAMBDA_MONITORING payload contains all the information the agent attempts to send up, including metrics, events, some AWS account metadata, invocations and errors data. Note that some of that data (for example, our legacy metrics) will not make it to our UI because our ingest pipeline does not consume them.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 89.002975, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "body": " technicians: Has the Lambda function appeared in the UI before? If so, what is the name of the function? If some data for the Lambda function is appearing in the UI, what specific data is appearing? What APM language agent are you using to instrument the function? Recommended: Attach your CloudWatch" - }, - "id": "603ea6bb64441f85284e889b" - }, { "sections": [ "Net::HTTP", @@ -73625,7 +73518,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.074814, "_version": null, "_explanation": null, "sort": null, @@ -73666,7 +73559,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.074814, "_version": null, "_explanation": null, "sort": null, @@ -73675,6 +73568,48 @@ "body": " applications in Ruby. Get started! Leverage community expertise and instantly get value out of your telemetry data. This quickstart automatically instruments Rack with the New Relic Ruby agent, and allows you to further leverage New Relic's APM capabilities by setting up custom dashboards , errors inbox" }, "id": "6156684728ccbc1bf0f2141d" + }, + { + "sections": [ + "Rainbows!", + "What's included", + "Rainbows", + "High CPU Utilization", + "Memory Usage", + "Apdex Score", + "Transaction Errors", + "Rainbows! installation docs", + "What is Rainbows!?", + "Get started!", + "More info" + ], + "title": "Rainbows!", + "type": "quickstarts", + "tags": [ + "apm", + "ruby" + ], + "quick_start_name": "Rainbows!", + "external_id": "7ea83201bb00d2d293339aaae5d60882c019916c", + "image": "", + "url": "https://developer.newrelic.com/instant-observability/rainbows/5dd54f65-c84d-4830-8432-9a0be7d30d1b/", + "published_at": "2022-02-08T01:39:53Z", + "updated_at": "2021-11-30T01:47:44Z", + "document_type": "page", + "popularity": 1, + "body": "What's included Rainbows Dashboard High CPU Utilization This alert is triggered when the CPU Utilization is above 90%. Alert Memory Usage This alert is triggered when Memory usage is above 90% Alert Apdex Score This alert is triggered when the Apdex score is below 0.5 for 5 minutes Alert Transaction Errors This alert is triggered when the the transactions fail more than 10% of the time in 5 minutes. Alert Rainbows! installation docs An HTTP Rack app server designed to handle applications that expect long request/response times and/or slow clients. Doc What is Rainbows!? An HTTP Rack app server designed to handle applications that expect long request/response times and/or slow clients. Get started! Leverage community expertise and instantly get value out of your telemetry data. This quickstart automatically instruments Rainbows! with the New Relic Ruby agent, and allows you to further leverage New Relic's APM capabilities by setting up custom dashboards , errors inbox , transaction tracing , and service maps . More info Check out the documentation to learn more about New Relic monitoring for Rainbows!. Rainbows! quickstart contains 1 dashboard . These interactive visualizations let you easily explore your data, understand context, and resolve problems faster. Rainbows Rainbows! observability quickstart contains 4 alerts . These alerts detect changes in key performance metrics. Integrate these alerts with your favorite tools (like Slack, PagerDuty, etc.) and New Relic will let you know when something needs your attention. High CPU Utilization Alert Type: STATIC This alert is triggered when the CPU Utilization is above 90%. Memory Usage Alert Type: STATIC This alert is triggered when Memory usage is above 90% Apdex Score Alert Type: STATIC This alert is triggered when the Apdex score is below 0.5 for 5 minutes Transaction Errors Alert Type: STATIC This alert is triggered when the the transactions fail more than 10% of the time in 5 minutes. Rainbows! observability quickstart contains 1 data source . This is how you'll get your data into New Relic. Rainbows! installation docs An HTTP Rack app server designed to handle applications that expect long request/response times and/or slow clients. Docs", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 84.074814, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "tags": "apm", + "body": " server designed to handle applications that expect long request/response times and/or slow clients. Get started! Leverage community expertise and instantly get value out of your telemetry data. This quickstart automatically instruments Rainbows! with the New Relic Ruby agent, and allows you to further" + }, + "id": "6156689428ccbc41dcf2143a" } ], "/odbc/f6728c42-38bb-4e23-b78f-298555640b98": [ @@ -73709,7 +73644,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99103, + "_score": 95.21301, "_version": null, "_explanation": null, "sort": null, @@ -73751,7 +73686,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99097, + "_score": 95.21295, "_version": null, "_explanation": null, "sort": null, @@ -73793,7 +73728,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99097, + "_score": 95.21295, "_version": null, "_explanation": null, "sort": null, @@ -73835,7 +73770,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99081, + "_score": 95.2128, "_version": null, "_explanation": null, "sort": null, @@ -73874,7 +73809,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 93.49031, + "_score": 93.73055, "_version": null, "_explanation": null, "sort": null, @@ -73923,7 +73858,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 85.161606, + "_score": 80.604774, "_version": null, "_explanation": null, "sort": null, @@ -73965,7 +73900,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 71.0705, + "_score": 68.37392, "_version": null, "_explanation": null, "sort": null, @@ -74016,7 +73951,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 69.895935, + "_score": 65.3901, "_version": null, "_explanation": null, "sort": null, @@ -74096,7 +74031,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 66.35594, + "_score": 65.11074, "_version": null, "_explanation": null, "sort": null, @@ -74151,7 +74086,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 60.366714, + "_score": 59.388126, "_version": null, "_explanation": null, "sort": null, @@ -74198,7 +74133,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 49.545578, + "_score": 48.534103, "_version": null, "_explanation": null, "sort": null, @@ -74230,7 +74165,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 40.142944, + "_score": 40.02409, "_version": null, "_explanation": null, "sort": null, @@ -74275,7 +74210,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 33.13157, + "_score": 32.82866, "_version": null, "_explanation": null, "sort": null, @@ -74320,7 +74255,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -74361,7 +74296,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -74403,7 +74338,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -74445,7 +74380,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -74487,7 +74422,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -74525,7 +74460,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 203.00626, + "_score": 189.37022, "_version": null, "_explanation": null, "sort": null, @@ -74581,7 +74516,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.87093, + "_score": 165.793, "_version": null, "_explanation": null, "sort": null, @@ -74645,7 +74580,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 146.4, + "_score": 136.64267, "_version": null, "_explanation": null, "sort": null, @@ -74681,7 +74616,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 133.54099, + "_score": 126.08999, "_version": null, "_explanation": null, "sort": null, @@ -74722,7 +74657,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 114.887024, + "_score": 114.99555, "_version": null, "_explanation": null, "sort": null, @@ -74768,7 +74703,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 82.80652, + "_score": 82.43481, "_version": null, "_explanation": null, "sort": null, @@ -74810,7 +74745,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.889145, + "_score": 82.04414, "_version": null, "_explanation": null, "sort": null, @@ -74822,49 +74757,6 @@ }, "id": "603ebe19e7b9d2b9342a07c9" }, - { - "sections": [ - "Azure Cosmos DB (Document DB) monitoring integration", - "Features", - "Activate integration", - "Configuration and polling", - "View and query data", - "Metric data", - "Important", - "Account Data", - "DataBase Data", - "Collection Data", - "Inventory data", - "EOL NOTICE" - ], - "title": "Azure Cosmos DB (Document DB) monitoring integration", - "type": "docs", - "tags": [ - "Azure integrations list", - "Microsoft Azure integrations", - "Integrations" - ], - "external_id": "e4bb0ee9204d3af8c336f3bccd58052df2451116", - "image": "", - "url": "https://docs.newrelic.com/docs/infrastructure/microsoft-azure-integrations/azure-integrations-list/azure-cosmos-db-document-db-monitoring-integration/", - "published_at": "2022-02-15T19:28:51Z", - "updated_at": "2022-02-15T19:28:51Z", - "document_type": "page", - "popularity": 1, - "body": "New Relic infrastructure monitoring provides an integration for Microsoft Azure's Cosmos DB service that reports your Cosmos DB metrics and other data to New Relic. This document explains how to activate the Cosmos DB integration and describes the data that can be captured. Features New Relic gathers both database data and collection billing data from your Azure Cosmos DB service. You can monitor and alert on your Azure Cosmos DB data from New Relic, and you can create custom queries and custom chart dashboards. Activate integration To enable this integration follow standard procedures to activate your Azure service in New Relic. The Cosmos DB integration requires you to create an additional role and permission to fetch database and collection data: Go to the Azure Portal and open a shell by selecting the terminal icon. Add the following command: az role definition create --role-definition '{ \"Name\": \"NewRelic Integrations\", \"Actions\": [ \"*/read\", \"Microsoft.DocumentDB/databaseAccounts/listKeys/action\" ], \"NotActions\": [], \"AssignableScopes\": [ \"/subscriptions/YOUR_INSERT_SUBSCRIPTION_ID\" ], \"Description\": \"Read Only for NewRelic Integrations\", \"IsCustom\": \"true\" }' Copy From Services > Subscriptions, select the subscription, go to Access control (IAM), and then select Add. In the Role search box, add the name of the newly created role definition (for example, NewRelic Integrations). In the Select search box, add the name of the New Relic integration application, and select it. Ensure that the application is added to the Selected members list, then Save. Configuration and polling You can change the polling frequency and filter data using configuration options. Default polling information for the Cosmos DB integration: Polling interval: 5 minutes Resolution: 1 minute or 5 minutes, varies by metric. For resolution information on a specific metric, see Microsoft Azure's documentation about support metrics. View and query data To view your integration data, go to one.newrelic.com > Infrastructure > Azure and select the Cosmos DB Integration. You can query and explore your data using the following event types: Entity Event Type Provider Account AzureCosmosDbAccountSample AzureCosmosDbAccount Database AzureCosmosDbDatabaseSample AzureCosmosDbDatabase Collection AzureCosmosDbCollectionSample AzureCosmosDbCollection For more on how to find and use data, see Understand and use integration data. Metric data Important For information on deprecated Cosmos DB events or metrics, see Azure Cosmos DB integration (deprecated). We strongly recommend migrating to the supported events and metrics in this document. To view metrics reported by the Cosmos DB integration, query the Entities below. Use the metadata associated with each metric to filter and facet the data being reported. For detailed metric information, see the Azure supported metrics documentation. Account Data Metric Description Metadata totalRequests Total number of requests. account kind region offerType statusCode resourceGroup metadataRequests Count of metadata requests. account kind region offerType statusCode resourceGroup mongoRequests Count of Mongo requests made. account kind region commandName offerType errorCode resourceGroup mongoRequestCharge Total number of Mongo request units consumed. account kind region commandName offerType errorCode resourceGroup totalRequestUnits Total number of request units consumed. account kind region offerType statusCode resourceGroup provisionedThroughput Throughput provisioned for the database or collection. account offerType kind resourceGroup availableStorageBytes Total available storage, in bytes. account kind offerType region resourceGroup dataUsageBytes Total data usage reported, in bytes. account kind offerType region resourceGroup indexUsageBytes Total index usage reported, in bytes. account kind offerType region resourceGroup documentQuotaBytes Total storage quota reported, in bytes. account kind offerType region resourceGroup documentCount Total document count reported. account kind offerType region resourceGroup ReplicationLatency P99 replication latency across source and target regions for geo-enabled account, in milliseconds. account kind sourceRegion offerType targetRegion resourceGroup ServiceAvailability Account requests availability percentage in hour, day, or month granularity. No specific metadata. cassandraRequests Count of Cassandra requests made. account kind errorCode offerType opperationType region resourceType resourceGroup cassandraRequestCharges Total number of request units consumed for Cassandra requests. account kind errorCode offerType opperationType region resourceType resourceGroup cassandraConnectionClosures Total number of Cassandra connections that were closed. account kind closureReason offerType region resourceGroup DataBase Data Metric Description Metadata totalRequests Total number of requests. account databaseName region statusCode metadataRequests Count of metadata requests. account databaseName region statusCode mongoRequests Count of Mongo requests made. account databaseName region commandName errorCode mongoRequestCharge Total number of Mongo request units consumed. account databaseName region commandName errorCode totalRequestUnits Total number of request units consumed. account databaseName region statusCode provisionedThroughput Throughput provisioned for the database or collection. account databaseName availableStorageBytes Total available storage, in bytes. account databaseName region dataUsageBytes Total data usage reported, in bytes. account databaseName region indexUsageBytes Total index usage reported, in bytes. account databaseName region documentQuotaBytes Total storage quota reported, in bytes. account databaseName region documentCount Total document count reported. account databaseName region replicationLatencyMilliseconds P99 replication latency across source and target regions for geo-enabled account, in milliseconds. account sourceRegion targetRegion serviceAvailabilityPercent Account requests availability percentage in hour, day, or month granularity. No specific metadata. cassandraRequests Count of Cassandra requests made. account databaseName errorCode opperationType region resourceType cassandraRequestCharges Total number of request units consumed for Cassandra requests. account databaseName errorCode opperationType region resourceType cassandraConnectionClosures Total number of Cassandra connections that were closed. account closureReason region DataBase Data Metric Description Metadata totalRequests Total number of requests. account databaseName region statusCode metadataRequests Count of metadata requests. account databaseName region statusCode mongoRequests Count of Mongo requests made. account databaseName region commandName errorCode mongoRequestCharge Total number of Mongo request units consumed. account databaseName region commandName errorCode totalRequestUnits Total number of request units consumed. account databaseName region statusCode provisionedThroughput Throughput provisioned for the database or collection. account databaseName availableStorageBytes Total available storage, in bytes. account databaseName region dataUsageBytes Total data usage reported, in bytes. account databaseName region indexUsageBytes Total index usage reported, in bytes. account databaseName region documentQuotaBytes Total storage quota reported, in bytes. account databaseName region documentCount Total document count reported. account databaseName region replicationLatencyMilliseconds P99 replication latency across source and target regions for geo-enabled account, in milliseconds. account sourceRegion targetRegion serviceAvailabilityPercent Account requests availability percentage in hour, day, or month granularity. No specific metadata. cassandraRequests Count of Cassandra requests made. account databaseName errorCode opperationType region resourceType cassandraRequestCharges Total number of request units consumed for Cassandra requests. account databaseName errorCode opperationType region resourceType cassandraConnectionClosures Total number of Cassandra connections that were closed. account closureReason region Collection Data Metric Description Metadata totalRequests Total number of requests. account collectionName database region statusCode metadataRequests Count of metadata requests. account collectionName database region statusCode mongoRequests Count of Mongo requests made. account collectionName database region commandName errorCode mongoRequestCharge Total number of Mongo request units consumed. account collectionName database region commandName errorCode totalRequestUnits Total number of request units consumed. account collectionName database region statusCode provisionedThroughput Throughput provisioned for the database or collection. account collectionName database availableStorageBytes Total available storage, in bytes. account collectionName database region dataUsageBytes Total data usage reported, in bytes. account collectionName database region indexUsageBytes Total index usage reported, in bytes. account collectionName database region documentQuotaBytes Total storage quota reported, in bytes. account collectionName database region documentCount Total document count reported. account collectionName database region replicationLatencyMilliseconds P99 replication latency across source and target regions for geo-enabled account, in milliseconds. account collectionName sourceRegion targetRegion serviceAvailabilityPercent Account requests availability percentage in hour, day, or month granularity. No specific metadata. cassandraRequests Count of Cassandra requests made. account collectionName database errorCode opperationType region resourceType cassandraRequestCharges Total number of request units consumed for Cassandra requests. account collectionName database errorCode opperationType region resourceType cassandraConnectionClosures Total number of Cassandra connections that were closed. account collectionName closureReason region Inventory data EOL NOTICE After March 2022, we're discontinuing support for several capabilities, including inventory data for cloud integrations. For more details, including how you can easily prepare for this transition, see our Explorers Hub post. Inventory data is information about your system's state and configuration. For details on how to find and use inventory data, see Understand and use data. The Cosmos DB integration reports the inventory data for the entity type azure/cosmosdb/account/ using the following metadata: documentEndpoint: The document end point. databaseAccountOfferType: The database account offer type. consistencyPolicy: The consistency policy for the Cosmos DB database account. defaultConsistencyLevel: The default consistency level for the Cosmos DB database account. kind: The type of database account set at database account creation. resourceGroupName: The Azure resource group name that the Cosmos DB database account belong to. regionName: The region name in which the Azure DocumentDB database account is deployed. type: The azure resource type, which is Microsoft.DocumentDB/databaseAccounts.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.60828, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "sections": "DataBase Data", - "body": " gathers both database data and collection billing data from your Azure Cosmos DB service. You can monitor and alert on your Azure Cosmos DB data from New Relic, and you can create custom queries and custom chart dashboards. Activate integration To enable this integration follow standard procedures" - }, - "id": "617dc763e7b9d2d3dac0580e" - }, { "sections": [ "Gearman", @@ -74896,7 +74788,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.33618, + "_score": 81.56332, "_version": null, "_explanation": null, "sort": null, @@ -74938,7 +74830,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.276306, + "_score": 81.50425, "_version": null, "_explanation": null, "sort": null, @@ -74948,6 +74840,48 @@ "body": "What's included Python Dashboard Apdex Score This alert is triggered when the Apdex score is below 0.5 for 5 minutes Alert High CPU Utilization This alert is triggered when the CPU Utilization is above 90%. Alert Transaction Errors This alert is triggered when the the transactions fail more than 10" }, "id": "61566c4c64441fc860099644" + }, + { + "sections": [ + "Pika", + "What's included", + "Python", + "Apdex Score", + "High CPU Utilization", + "Transaction Errors", + "Pika installation docs", + "What is Pika?", + "Get started!", + "More info" + ], + "title": "Pika", + "type": "quickstarts", + "tags": [ + "apm", + "python" + ], + "quick_start_name": "Pika", + "external_id": "3f7e4155b883dfe528f42a1d3d74ef2a9b249c78", + "image": "https://raw.githubusercontent.com/newrelic/newrelic-quickstarts/v0.96.0/quickstarts/python/pika/dashboards/python.png", + "url": "https://developer.newrelic.com/instant-observability/pika/7bdea1cd-d723-4655-ae9f-c13468d593b7/", + "published_at": "2022-02-05T01:43:08Z", + "updated_at": "2021-10-29T01:50:34Z", + "document_type": "page", + "popularity": 1, + "body": "What's included Python Dashboard Apdex Score This alert is triggered when the Apdex score is below 0.5 for 5 minutes Alert High CPU Utilization This alert is triggered when the CPU Utilization is above 90%. Alert Transaction Errors This alert is triggered when the the transactions fail more than 10% of the time in 5 minutes. Alert Pika installation docs Pika is a fully featured, dynamic programming language. Doc What is Pika? Pika is a fully featured, dynamic programming language. Get started! Leverage community expertise and instantly get value out of your telemetry data. This quickstart automatically instruments Pika with the New Relic Python agent, and allows you to instantly monitor your Python application with out-of-the-box dashboards and alerts. Further leverage New Relic's APM capabilities by setting up errors inbox , transaction tracing , and service maps . More info Check out the documentation to learn more about New Relic monitoring for Pika. Pika quickstart contains 1 dashboard . These interactive visualizations let you easily explore your data, understand context, and resolve problems faster. Python Python Pika observability quickstart contains 3 alerts . These alerts detect changes in key performance metrics. Integrate these alerts with your favorite tools (like Slack, PagerDuty, etc.) and New Relic will let you know when something needs your attention. Apdex Score Alert Type: STATIC This alert is triggered when the Apdex score is below 0.5 for 5 minutes High CPU Utilization Alert Type: STATIC This alert is triggered when the CPU Utilization is above 90%. Transaction Errors Alert Type: STATIC This alert is triggered when the the transactions fail more than 10% of the time in 5 minutes. Pika observability quickstart contains 1 data source . This is how you'll get your data into New Relic. Pika installation docs Pika is a fully featured, dynamic programming language. Docs", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 81.50405, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "sections": "Python", + "tags": "apm", + "body": " automatically instruments Pika with the New Relic Python agent, and allows you to instantly monitor your Python application with out-of-the-box dashboards and alerts. Further leverage New Relic's APM capabilities by setting up errors inbox , transaction tracing , and service maps . More info Check out" + }, + "id": "61566846e7b9d2ef508de398" } ], "/httplib2/6bab5c37-1444-4d47-a87a-80edbe316c46": [ @@ -74982,7 +74916,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.33618, + "_score": 81.56581, "_version": null, "_explanation": null, "sort": null, @@ -75024,7 +74958,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.276306, + "_score": 81.50674, "_version": null, "_explanation": null, "sort": null, @@ -75066,7 +75000,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -75108,7 +75042,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -75150,7 +75084,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -75195,7 +75129,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -75236,7 +75170,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -75278,7 +75212,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -75320,7 +75254,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -75362,7 +75296,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -75400,7 +75334,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 97.51752, + "_score": 97.32644, "_version": null, "_explanation": null, "sort": null, @@ -75437,7 +75371,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 84.04886, + "_score": 78.45167, "_version": null, "_explanation": null, "sort": null, @@ -75485,7 +75419,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 76.12233, + "_score": 76.115105, "_version": null, "_explanation": null, "sort": null, @@ -75543,7 +75477,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 72.81366, + "_score": 72.741165, "_version": null, "_explanation": null, "sort": null, @@ -75589,7 +75523,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 65.93267, + "_score": 66.07501, "_version": null, "_explanation": null, "sort": null, @@ -75627,7 +75561,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 267.969, + "_score": 249.96114, "_version": null, "_explanation": null, "sort": null, @@ -75683,7 +75617,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 176.43738, + "_score": 164.44989, "_version": null, "_explanation": null, "sort": null, @@ -75694,69 +75628,6 @@ }, "id": "60452628196a679380960f32" }, - { - "sections": [ - "Quality foundation implementation guide", - "Overview", - "Desired Outcome", - "Key Performance Indicators", - "Availability", - "Largest contentful paint (LCP)", - "First input delay (FID)", - "Cumulative layout shift (CLS)", - "Time to first byte (TTFB)", - "Ajax response times", - "HTTP error rate", - "JavaScript error rate", - "Prerequisites", - "Required knowledge", - "Required Installation and Configuration", - "Establish current state", - "Review instrumented pages", - "Validate Browser URL grouping", - "Understand how you will segment your data", - "Import the quality foundation dashboard", - "Capture current performance for each dashboard page", - "Improvement Process", - "Plan your work", - "Decide which KPIs to improve", - "Improve targeted KPIs", - "Improve page load performance", - "Improve AJAX response times", - "Improve the AJAX error rate", - "Improve JavaScript errors", - "Conclusion" - ], - "title": "Quality foundation implementation guide", - "type": "docs", - "tags": [ - "Quality Foundation", - "Implementation guide", - "Digital customer experience", - "Customer experience", - "Observability maturity" - ], - "external_id": "91186ed56e33e040c73d1fff940cec0644c199f6", - "image": "https://docs.newrelic.com/static/9238160720501f4423dff703746fb59d/d9199/cx-what-you-can-measure-nr.png", - "url": "https://docs.newrelic.com/docs/new-relic-solutions/observability-maturity/customer-experience/quality-foundation-implementation-guide/", - "published_at": "2022-02-15T12:55:51Z", - "updated_at": "2022-02-15T12:55:51Z", - "document_type": "page", - "popularity": 1, - "body": "Overview Digital customer experience is your end user’s experience across all your digital touch points. There are four core factors that impact a user’s experience: Availability (Is it reachable?) Performance (Does it perform well enough to be usable?) Content quality (Does it have what users need and can they find it?) Product and content relevance (Does it have what users care about?) Digital customer experience includes web, mobile, and IoT. The first version of this guide is focused on measuring the end user web experience. Quality Foundation is about creating a standard practice to help you understand your digital customer experience in a meaningful way. This implementation guide will help you: Look at customer experience in relation to: Global functions, such as search and login Lines of business Regions Report back to business stakeholders on what they care about Prioritize what you work on Create a repeatable practice Desired Outcome Improve customer engagement and retention by measuring and improving performance in a way that better aligns to the end user experience. Key Performance Indicators Quality Foundation measures the following KPIs: Availability This KPI measures whether or not your application or its pages can be accessed by your users Goal: Improve uptime and availablity Thresholds: < 99% warning < 95% critical 99% or \"2 9's\" is a good minimum standard of availability, even for employee applications or sub-pages. We configure these default thresholds into the dashboards. You can easily change this to better suit expectations for your application. Largest contentful paint (LCP) Part of Core Web Vitals. Largest Contentful Paint (LCP) measures the time it takes to load the largest image after a user has navigated to a new page. Goal: Reduce LCP to 2.5 seconds or better for the 75% percentile for all pages or at least the most critical pages. Thresholds: Warning: > 2.5 seconds Critical: > 4.0 seconds LCP thresholds are defined by the team at Google. The thresholds and the supporting logic behind them can be found here. First input delay (FID) Part of Core Web Vitals. Measures the interactivity of a page by tracking the time between user interaction (such as clicking a link or entering text) when the browser begins processing the event. Goal: Reduce FID to 100 milliseconds or better for the 75% percentile for all pages or at least the most critical pages. Thresholds: Warning: > 100 milliseconds Critical: > 300 milliseconds FID thresholds are defined by the team at Google. The thresholds and the supporting logic behind them can be found here. Cumulative layout shift (CLS) Part of Core Web Vitals. Measures how much the page layout shifts during render. Goal: Maintain a score of 0.1 or less for the 75% percentile for all pages or at least the most critical pages. Thresholds: Warning: > 0.1 score Critical: > 0.25 score CLS thresholds are defined by the team at Google. The thresholds and the supporting logic behind them can be found here. Time to first byte (TTFB) This KPI measures the time from navigation start (a user clicking a link) to the browser receiving the first byte of the response from the server. Google considers TTFB secondary to Core Web Vitals. We recommend measuring it for a more complete picture. It can be revealing if you see a change in LCP, because it answers the question as to whether the change occurred server side or client side. Goal: Reduce the time to first byte by improving CDN, network, and service performance. Thresholds: Warning > 0.5 seconds Critical > 1.0 seconds According to Google and Search Engine People, 500 milliseconds is a decent TTFB for pages with dynamic content. You can find mention of these recommendations here. Ajax response times Slow ajax calls can make the user feel as though nothing is happening or the page is broken. If the response time is slow enough, users may even abandon the journey. Goal: Measure and improve ajax response times. Thresholds: Warning > 2 seconds Critical > 2.5 seconds These thresholds come from experience with customers across a variety of industries. HTTP error rate HTTP errors (or HTTP 4xx and 5xx responses) happen when calls to the backend are not successful. Goal: Measure and reduce the HTTP error rate to ensure your customers are able to do what they came to your site to do. Thresholds: Warning < 99% of requests are successful Critical < 97% of requests are successful These thresholds come from experience with customers across a variety of industries. We made the assumption that every ajax request is associated with something the user is trying to achieve and treat it accordingly. Because users will often retry failed actions, we allowed for space between warning and critical thresholds. If the ajax requests being measured are an important part of the user journey, we recommended aiming for higher success rates, such as 99.5% or 99.9%. If the ajax requests are tied to login requests, separate 4xx response codes from 5xx response codes and set a much lower threshold for the 4xx responses. You can look to historical response code rates to determine a reasonable threshold. JavaScript error rate This KPI measures the number of JavaScript errors per page view. Goal: Remove irrelevant JavaScript errors being tracked either by tuning ingest or using filtering. Reduce JavaScript errors that impact customer performance. Thresholds: Warning: > 5% errors per page view Critical: > 10% errors per page view These thresholds come from experience with customers across a variety of industries. For each KPI, we defined thresholds - one for warning, another for critical. You might ask where these values come from or how you can be sure they should apply to your application. Our thresholds are the ones recommended by Google (as with Core Web Vitals) or by us, based on our experience across a large number of customers and applications. If you feel strongly that they should be different, you can adjust them, but you should do this at the organizational level rather than on an application by application basis. Quality Foundation helps you identify where in your application you need to make improvements that will optimize user retention, conversion and satisfaction. It is less about where things are and more about where to get to. It also shows you what you should be measuring going forward. You can use this to define service level objectives (SLOs) (in a service level dashboard) and alert on them. Prerequisites Required knowledge Familiarity with synthetic monitoring Familiarity with browser monitoring Familiarity with basic Browser UI views Familiarity with SPA data in Browser UI Required Installation and Configuration Browser Pro installed in all pages SPA enabled for single page applications Synthetics monitors configured: Ping monitors configured for anonymous users Scripted synthetics check configured for login flow Monitors should be configured to test from all regions applicable to your users Monitors should be configured for each domain and each login flow Data retention for browser events greater than or equal to 2x an average sprint Establish current state Review instrumented pages Validate Browser URL grouping Understand how you will segment your data Import the quality foundation dashboard Capture current performance for each dashboard page Review instrumented pages Review Browser apps and pages to make sure that everything you expect to report back to New Relic is. You can do this by reviewing the Page Views tab in the Browser UI or running the following query: SELECT uniques(pageUrl) from PageView LIMIT MAX Copy You may need to filter out URLs that contain request or customer ID. Validate Browser URL grouping Ensure Browser segments are captured correctly so user experience performance is measurable in both the NewRelic UI as well as at the aggregate level when querying via NRQL. A segment is the text between two / in a URL or between . of a domain name. For example, in the URL website.com/product/widget-name, the segments are: website .com product widget-name When there are a lot of URLs with a lot of segments, URLs can get crushed, so that website.com/product/widget-name becomes website.com/ or website.com/product/. In this example, the first crushed URL is not particularly useful, but the second one may be a useful way of aggregating customer experience data for the product. Not sure whether you need to tune your configuration? Import the Segment Allow List Investigation dashboard in GitHub to help. Once you’ve identified which segments to add, you can add them using Segment allow lists in Browser. Understand how you will segment your data Make Customer Experience data understandable and actionable by breaking it out into different segments. In this case, segments refer to groups of data. It does not refer to sections of URLs, as in segment allow lists. Consider the following statements: Most of our users experience 3 seconds or better to first input delay. On average, we see 2 seconds to the largest contentful paint. Last week, there were 1 million page views. Compared to: Most of the users in the US, Canada, and EMEA experience 2 seconds or better to first input delay. Malaysia and Indonesia users experience 4 seconds; we are looking into this. Customers buying car insurance typically see 1 second to largest contentful paint. For home insurance, it’s 4 seconds. Last week, there were 700,000 page views on mobile browser apps compared to 300,000 on desktop. Let’s make sure we’re optimizing our mobile experience. Typical segmentation involves breaking down user experience into the following categories: Segment Guidance Region/Location Basic: Group by country. Browser events automatically contain the country code of requests, so there is nothing you need to do to break it out further. Advanced: Make regional grouping match regional SLO groups by creating your own region attribute using custom attributes in Browser. Facet by countryCode. Related attributes: regionCode city asnLatitude asnLongitude Device Break out performance and engagement device type so you can understand: Typical breakdown of desktop vs mobile browser users Experience of desktop vs mobile browser users Facet by deviceType. Related attributes: userAgentName userAgentOS userAgentVersion Product/Line of Business In this scenario, a product is a separate line of business or service provided by your organization. Some examples of industries and respective products: An insurance company that sells both car and house insurance A media company that has multiple streaming services or channels A travel company that provides car rental as well as hotel bookings Basic: Break out performance by product by: Faceting on pageUrl: Use this approach when multiple products are grouped into one browser app in New Relic. Faceting by appName: Use this approach when each product is instrumented as a separate web app. Grouping by appName and then facet: Use this approach when there are multiple apps in browser supporting one product. Advanced: Add product offering as a custom attribute to browser pages using custom attributes. Environment During instrumentation or afterwards, follow a naming convention that specifies the environment in Browser. Well named browser apps specify product and/or function as well as environment. Examples: account-management.prod hotels-book.prod car-insurance.uat Using app naming conventions to specify the environment supports filtering data in both the UI and in dashboards. For more information, see the documentation for how to rename Browser apps. Team In some organizations, a single team supports multiple products, while in others, a product is big enough to be supported by multiple teams. Report on team performance against customer experience or engagement by either adding the team name to the Browser app name (for example, account-management.prod.unicorn-squad) or by using custom attributes. Import the quality foundation dashboard This step creates the dashboard that you will use to measure your customer experience and improve it. Clone the GitHub repository. Follow the GitHub repository README instructions to implement the dashboard. Make sure to align the dashboard to lines of business or customer facing offerings rather than teams. This ensures optimization time is spent where it is most impactful. Capture current performance for each dashboard page Follow the GitHub README instructions. Use the dashboard from the previous step to understand the overall performance for each line of business. If relevant, apply filters to see performance across region or device. If values drop below targets and it matters, add it to the sheet as a candidate for improvement. Not worth tracking: A company that sells insurance in the US only notices poor performance in Malaysia. Worth tracking: A company that sells insurance in the US only notices poor performance with respect to mobile users in the US. Improvement Process Plan your work Decide which KPIs to improve Improve targeted KPIs Improve page load performance Improve AJAX response times Improve the AJAX error rate Improve JavaScript errors Plan your work Whether you have a dedicated initiative to improve performance or classifying as ongoing maintenance, you need to track your progress at the end of every sprint. Decide which KPIs to improve You now know what your user experience looks like across multiple lines of business. Where should you be improving? Start with business priorities. If you have clear business directives or have access to a senior manager above who does, you should focus on what matters most to your organization. For example, let’s say your company has recently launched a new initiative around a line of business but the KPIs associated with the UI are below target. This is where you should focus time initially. Next, focus on KPIs for each line of business. Finally, filter each line of business by device, region, etc., to see if additional focus is needed for specific regions or devices. Improve targeted KPIs To track your progress, create a new dashboard or add a new page to the existing dashboard and name it Quality Foundation KPI Improvement. For more information, see Improve Web Uptime. Improve page load performance Narrow your focus to specific pages that aren’t meeting target KPI values. For each page load KPI result that is out of bounds in the Quality Foundation Dashboard, remove the COMPARE WITH clause and add FACET pageUrl/targetGroupedUrl LIMIT MAX to find which pages are the poor performers. Use targetGroupedUrl when there are many results; for example, when the customer ID is part of the URL. Otherwise, use pageUrl. Original Dashboard query: FROM PageViewTiming SELECT percentile(largestContentfulPaint, 75) WHERE appName ='WebPortal' AND pageUrl LIKE '%phone%' SINCE 1 week AGO COMPARE WITH 1 week AGO Copy New query to identify problem pages: FROM PageViewTiming SELECT percentile(largestContentfulPaint, 75) WHERE appName ='WebPortal' AND pageUrl LIKE '%phone%' FACET targetGroupedUrl LIMIT MAX Copy Once you have identified pages to improve, see the guidance in Improve page load performance. Improve AJAX response times Find the slow requests. Go to the Ajax duration widget on the dashboard. View query, then open in query builder. Add facet requestUrl LIMIT MAX to the end of the query. Run the query. View the results as a table and save to your KPI Improvement dashboard as LOB - AjaxResponseTimes. Focus improving requests with a timeToSettle > 2.5s. Use New Relic’s recommended best practices to improve response times. See AJAX troubleshooting tips. Improve the AJAX error rate Find the failing requests. Go to Dashboards > Query builder. Enter FROM AjaxRequest SELECT percentage(count(*), WHERE httpResponseCode >= 400) WHERE httpResponseCode >= 200 AND SINCE 1 week AGO facet pageUrl, appName Copy Run the query. View the results as a table and save to your KPI Improvement dashboard as LOB - Pages with AjaxErrors. Run the query again for the most problematic pages to find the requests that are failing: FROM AjaxRequest SELECT percentage(count(*), WHERE httpResponseCode >= 400) WHERE httpResponseCode >= 200 AND pageUrl= AND appName = SINCE 1 week AGO facet requestUrl Copy Use New Relic’s recommended best practices to improve response times. See AJAX troubleshooting tips. Improve JavaScript errors Find the most common failures. Go to Dashboards > Query builder Enter FROM JavaScriptError SELECT count(errorClass) SINCE 1 week AGO WHERE FACET transactionName, errorClass, errorMessage, domain Copy Run the query. View the results as a table and save to your KPI Improvement dashboard as LOB - Javascript Errors. Use this information to figure out which errors need to be addressed Use New Relic’s recommended best practices to resolve errors that need addressing. See JavaScript errors page: Detect and analyze errors. Remove third party errors that do not add value. You may be using a third party JavaScript that is noisy but works as expected. You can take a couple of approaches: Remove the domain name from the JavaScript error/Pageview ratio widget and add it as its own widget so you can see unexpected changes. You can alert on this using Baseline NRQL alerts. Drop the JavaScript error using drop filters. Only use this option if the volume of errors is impacting your data ingest in a significant way. Be as specific as you can in the drop filter. Conclusion Best practices to adopt Revisit performance metrics (shared in this document as Quality Foundation KPIs) at the end of each sprint. Incorporate performance improvements into developer sprints. Openly share metrics with the lines of the business you support as well as other internal stakeholders. Define Customer Experience SLOs. Create alerts for business critical drops in Quality Foundation KPIs. Value Realization At the end of this process you should now: Have an understanding of your end user experience in a way that is tangible, actionable, and easy for engineers as well as the business to understand. Know how releases impact your end customers. Know how your customers are impacted by service, infrastructure, or network level events. See latency issues caused by backend services if they exist. Have created, or be on the path to create, a common language with business owners so you are working together. This can open new avenues for recognition and sponsorship for new projects.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 162.98837, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "sections": "HTTP error rate", - "body": " Familiarity with basic Browser UI views Familiarity with SPA data in Browser UI Required Installation and Configuration Browser Pro installed in all pages SPA enabled for single page applications Synthetics monitors configured: Ping monitors configured for anonymous users Scripted synthetics check configured" - }, - "id": "61461531e7b9d25774b6f22d" - }, { "sections": [ "Handle sites with authentication", @@ -75789,7 +75660,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 162.10815, + "_score": 162.27826, "_version": null, "_explanation": null, "sort": null, @@ -75823,7 +75694,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 161.10617, + "_score": 160.54272, "_version": null, "_explanation": null, "sort": null, @@ -75834,6 +75705,69 @@ "body": " monitor API name: SCRIPT_API Provide a url and this monitor will test all the links on the page for success. If a failure is detected you can view the individual non-successful links that caused the failure. Certificate check monitor API name: SCRIPT_API Proactively ping your domain certificates based" }, "id": "603e873864441f3e154e888f" + }, + { + "sections": [ + "Quality foundation implementation guide", + "Overview", + "Desired Outcome", + "Key Performance Indicators", + "Availability", + "Largest contentful paint (LCP)", + "First input delay (FID)", + "Cumulative layout shift (CLS)", + "Time to first byte (TTFB)", + "Ajax response times", + "HTTP error rate", + "JavaScript error rate", + "Prerequisites", + "Required knowledge", + "Required Installation and Configuration", + "Establish current state", + "Review instrumented pages", + "Validate Browser URL grouping", + "Understand how you will segment your data", + "Import the quality foundation dashboard", + "Capture current performance for each dashboard page", + "Improvement Process", + "Plan your work", + "Decide which KPIs to improve", + "Improve targeted KPIs", + "Improve page load performance", + "Improve AJAX response times", + "Improve the AJAX error rate", + "Improve JavaScript errors", + "Conclusion" + ], + "title": "Quality foundation implementation guide", + "type": "docs", + "tags": [ + "Quality Foundation", + "Implementation guide", + "Digital customer experience", + "Customer experience", + "Observability maturity" + ], + "external_id": "91186ed56e33e040c73d1fff940cec0644c199f6", + "image": "https://docs.newrelic.com/static/9238160720501f4423dff703746fb59d/d9199/cx-what-you-can-measure-nr.png", + "url": "https://docs.newrelic.com/docs/new-relic-solutions/observability-maturity/customer-experience/quality-foundation-implementation-guide/", + "published_at": "2022-02-15T12:55:51Z", + "updated_at": "2022-02-15T12:55:51Z", + "document_type": "page", + "popularity": 1, + "body": "Overview Digital customer experience is your end user’s experience across all your digital touch points. There are four core factors that impact a user’s experience: Availability (Is it reachable?) Performance (Does it perform well enough to be usable?) Content quality (Does it have what users need and can they find it?) Product and content relevance (Does it have what users care about?) Digital customer experience includes web, mobile, and IoT. The first version of this guide is focused on measuring the end user web experience. Quality Foundation is about creating a standard practice to help you understand your digital customer experience in a meaningful way. This implementation guide will help you: Look at customer experience in relation to: Global functions, such as search and login Lines of business Regions Report back to business stakeholders on what they care about Prioritize what you work on Create a repeatable practice Desired Outcome Improve customer engagement and retention by measuring and improving performance in a way that better aligns to the end user experience. Key Performance Indicators Quality Foundation measures the following KPIs: Availability This KPI measures whether or not your application or its pages can be accessed by your users Goal: Improve uptime and availablity Thresholds: < 99% warning < 95% critical 99% or \"2 9's\" is a good minimum standard of availability, even for employee applications or sub-pages. We configure these default thresholds into the dashboards. You can easily change this to better suit expectations for your application. Largest contentful paint (LCP) Part of Core Web Vitals. Largest Contentful Paint (LCP) measures the time it takes to load the largest image after a user has navigated to a new page. Goal: Reduce LCP to 2.5 seconds or better for the 75% percentile for all pages or at least the most critical pages. Thresholds: Warning: > 2.5 seconds Critical: > 4.0 seconds LCP thresholds are defined by the team at Google. The thresholds and the supporting logic behind them can be found here. First input delay (FID) Part of Core Web Vitals. Measures the interactivity of a page by tracking the time between user interaction (such as clicking a link or entering text) when the browser begins processing the event. Goal: Reduce FID to 100 milliseconds or better for the 75% percentile for all pages or at least the most critical pages. Thresholds: Warning: > 100 milliseconds Critical: > 300 milliseconds FID thresholds are defined by the team at Google. The thresholds and the supporting logic behind them can be found here. Cumulative layout shift (CLS) Part of Core Web Vitals. Measures how much the page layout shifts during render. Goal: Maintain a score of 0.1 or less for the 75% percentile for all pages or at least the most critical pages. Thresholds: Warning: > 0.1 score Critical: > 0.25 score CLS thresholds are defined by the team at Google. The thresholds and the supporting logic behind them can be found here. Time to first byte (TTFB) This KPI measures the time from navigation start (a user clicking a link) to the browser receiving the first byte of the response from the server. Google considers TTFB secondary to Core Web Vitals. We recommend measuring it for a more complete picture. It can be revealing if you see a change in LCP, because it answers the question as to whether the change occurred server side or client side. Goal: Reduce the time to first byte by improving CDN, network, and service performance. Thresholds: Warning > 0.5 seconds Critical > 1.0 seconds According to Google and Search Engine People, 500 milliseconds is a decent TTFB for pages with dynamic content. You can find mention of these recommendations here. Ajax response times Slow ajax calls can make the user feel as though nothing is happening or the page is broken. If the response time is slow enough, users may even abandon the journey. Goal: Measure and improve ajax response times. Thresholds: Warning > 2 seconds Critical > 2.5 seconds These thresholds come from experience with customers across a variety of industries. HTTP error rate HTTP errors (or HTTP 4xx and 5xx responses) happen when calls to the backend are not successful. Goal: Measure and reduce the HTTP error rate to ensure your customers are able to do what they came to your site to do. Thresholds: Warning < 99% of requests are successful Critical < 97% of requests are successful These thresholds come from experience with customers across a variety of industries. We made the assumption that every ajax request is associated with something the user is trying to achieve and treat it accordingly. Because users will often retry failed actions, we allowed for space between warning and critical thresholds. If the ajax requests being measured are an important part of the user journey, we recommended aiming for higher success rates, such as 99.5% or 99.9%. If the ajax requests are tied to login requests, separate 4xx response codes from 5xx response codes and set a much lower threshold for the 4xx responses. You can look to historical response code rates to determine a reasonable threshold. JavaScript error rate This KPI measures the number of JavaScript errors per page view. Goal: Remove irrelevant JavaScript errors being tracked either by tuning ingest or using filtering. Reduce JavaScript errors that impact customer performance. Thresholds: Warning: > 5% errors per page view Critical: > 10% errors per page view These thresholds come from experience with customers across a variety of industries. For each KPI, we defined thresholds - one for warning, another for critical. You might ask where these values come from or how you can be sure they should apply to your application. Our thresholds are the ones recommended by Google (as with Core Web Vitals) or by us, based on our experience across a large number of customers and applications. If you feel strongly that they should be different, you can adjust them, but you should do this at the organizational level rather than on an application by application basis. Quality Foundation helps you identify where in your application you need to make improvements that will optimize user retention, conversion and satisfaction. It is less about where things are and more about where to get to. It also shows you what you should be measuring going forward. You can use this to define service level objectives (SLOs) (in a service level dashboard) and alert on them. Prerequisites Required knowledge Familiarity with synthetic monitoring Familiarity with browser monitoring Familiarity with basic Browser UI views Familiarity with SPA data in Browser UI Required Installation and Configuration Browser Pro installed in all pages SPA enabled for single page applications Synthetics monitors configured: Ping monitors configured for anonymous users Scripted synthetics check configured for login flow Monitors should be configured to test from all regions applicable to your users Monitors should be configured for each domain and each login flow Data retention for browser events greater than or equal to 2x an average sprint Establish current state Review instrumented pages Validate Browser URL grouping Understand how you will segment your data Import the quality foundation dashboard Capture current performance for each dashboard page Review instrumented pages Review Browser apps and pages to make sure that everything you expect to report back to New Relic is. You can do this by reviewing the Page Views tab in the Browser UI or running the following query: SELECT uniques(pageUrl) from PageView LIMIT MAX Copy You may need to filter out URLs that contain request or customer ID. Validate Browser URL grouping Ensure Browser segments are captured correctly so user experience performance is measurable in both the NewRelic UI as well as at the aggregate level when querying via NRQL. A segment is the text between two / in a URL or between . of a domain name. For example, in the URL website.com/product/widget-name, the segments are: website .com product widget-name When there are a lot of URLs with a lot of segments, URLs can get crushed, so that website.com/product/widget-name becomes website.com/ or website.com/product/. In this example, the first crushed URL is not particularly useful, but the second one may be a useful way of aggregating customer experience data for the product. Not sure whether you need to tune your configuration? Import the Segment Allow List Investigation dashboard in GitHub to help. Once you’ve identified which segments to add, you can add them using Segment allow lists in Browser. Understand how you will segment your data Make Customer Experience data understandable and actionable by breaking it out into different segments. In this case, segments refer to groups of data. It does not refer to sections of URLs, as in segment allow lists. Consider the following statements: Most of our users experience 3 seconds or better to first input delay. On average, we see 2 seconds to the largest contentful paint. Last week, there were 1 million page views. Compared to: Most of the users in the US, Canada, and EMEA experience 2 seconds or better to first input delay. Malaysia and Indonesia users experience 4 seconds; we are looking into this. Customers buying car insurance typically see 1 second to largest contentful paint. For home insurance, it’s 4 seconds. Last week, there were 700,000 page views on mobile browser apps compared to 300,000 on desktop. Let’s make sure we’re optimizing our mobile experience. Typical segmentation involves breaking down user experience into the following categories: Segment Guidance Region/Location Basic: Group by country. Browser events automatically contain the country code of requests, so there is nothing you need to do to break it out further. Advanced: Make regional grouping match regional SLO groups by creating your own region attribute using custom attributes in Browser. Facet by countryCode. Related attributes: regionCode city asnLatitude asnLongitude Device Break out performance and engagement device type so you can understand: Typical breakdown of desktop vs mobile browser users Experience of desktop vs mobile browser users Facet by deviceType. Related attributes: userAgentName userAgentOS userAgentVersion Product/Line of Business In this scenario, a product is a separate line of business or service provided by your organization. Some examples of industries and respective products: An insurance company that sells both car and house insurance A media company that has multiple streaming services or channels A travel company that provides car rental as well as hotel bookings Basic: Break out performance by product by: Faceting on pageUrl: Use this approach when multiple products are grouped into one browser app in New Relic. Faceting by appName: Use this approach when each product is instrumented as a separate web app. Grouping by appName and then facet: Use this approach when there are multiple apps in browser supporting one product. Advanced: Add product offering as a custom attribute to browser pages using custom attributes. Environment During instrumentation or afterwards, follow a naming convention that specifies the environment in Browser. Well named browser apps specify product and/or function as well as environment. Examples: account-management.prod hotels-book.prod car-insurance.uat Using app naming conventions to specify the environment supports filtering data in both the UI and in dashboards. For more information, see the documentation for how to rename Browser apps. Team In some organizations, a single team supports multiple products, while in others, a product is big enough to be supported by multiple teams. Report on team performance against customer experience or engagement by either adding the team name to the Browser app name (for example, account-management.prod.unicorn-squad) or by using custom attributes. Import the quality foundation dashboard This step creates the dashboard that you will use to measure your customer experience and improve it. Clone the GitHub repository. Follow the GitHub repository README instructions to implement the dashboard. Make sure to align the dashboard to lines of business or customer facing offerings rather than teams. This ensures optimization time is spent where it is most impactful. Capture current performance for each dashboard page Follow the GitHub README instructions. Use the dashboard from the previous step to understand the overall performance for each line of business. If relevant, apply filters to see performance across region or device. If values drop below targets and it matters, add it to the sheet as a candidate for improvement. Not worth tracking: A company that sells insurance in the US only notices poor performance in Malaysia. Worth tracking: A company that sells insurance in the US only notices poor performance with respect to mobile users in the US. Improvement Process Plan your work Decide which KPIs to improve Improve targeted KPIs Improve page load performance Improve AJAX response times Improve the AJAX error rate Improve JavaScript errors Plan your work Whether you have a dedicated initiative to improve performance or classifying as ongoing maintenance, you need to track your progress at the end of every sprint. Decide which KPIs to improve You now know what your user experience looks like across multiple lines of business. Where should you be improving? Start with business priorities. If you have clear business directives or have access to a senior manager above who does, you should focus on what matters most to your organization. For example, let’s say your company has recently launched a new initiative around a line of business but the KPIs associated with the UI are below target. This is where you should focus time initially. Next, focus on KPIs for each line of business. Finally, filter each line of business by device, region, etc., to see if additional focus is needed for specific regions or devices. Improve targeted KPIs To track your progress, create a new dashboard or add a new page to the existing dashboard and name it Quality Foundation KPI Improvement. For more information, see Improve Web Uptime. Improve page load performance Narrow your focus to specific pages that aren’t meeting target KPI values. For each page load KPI result that is out of bounds in the Quality Foundation Dashboard, remove the COMPARE WITH clause and add FACET pageUrl/targetGroupedUrl LIMIT MAX to find which pages are the poor performers. Use targetGroupedUrl when there are many results; for example, when the customer ID is part of the URL. Otherwise, use pageUrl. Original Dashboard query: FROM PageViewTiming SELECT percentile(largestContentfulPaint, 75) WHERE appName ='WebPortal' AND pageUrl LIKE '%phone%' SINCE 1 week AGO COMPARE WITH 1 week AGO Copy New query to identify problem pages: FROM PageViewTiming SELECT percentile(largestContentfulPaint, 75) WHERE appName ='WebPortal' AND pageUrl LIKE '%phone%' FACET targetGroupedUrl LIMIT MAX Copy Once you have identified pages to improve, see the guidance in Improve page load performance. Improve AJAX response times Find the slow requests. Go to the Ajax duration widget on the dashboard. View query, then open in query builder. Add facet requestUrl LIMIT MAX to the end of the query. Run the query. View the results as a table and save to your KPI Improvement dashboard as LOB - AjaxResponseTimes. Focus improving requests with a timeToSettle > 2.5s. Use New Relic’s recommended best practices to improve response times. See AJAX troubleshooting tips. Improve the AJAX error rate Find the failing requests. Go to Dashboards > Query builder. Enter FROM AjaxRequest SELECT percentage(count(*), WHERE httpResponseCode >= 400) WHERE httpResponseCode >= 200 AND SINCE 1 week AGO facet pageUrl, appName Copy Run the query. View the results as a table and save to your KPI Improvement dashboard as LOB - Pages with AjaxErrors. Run the query again for the most problematic pages to find the requests that are failing: FROM AjaxRequest SELECT percentage(count(*), WHERE httpResponseCode >= 400) WHERE httpResponseCode >= 200 AND pageUrl= AND appName = SINCE 1 week AGO facet requestUrl Copy Use New Relic’s recommended best practices to improve response times. See AJAX troubleshooting tips. Improve JavaScript errors Find the most common failures. Go to Dashboards > Query builder Enter FROM JavaScriptError SELECT count(errorClass) SINCE 1 week AGO WHERE FACET transactionName, errorClass, errorMessage, domain Copy Run the query. View the results as a table and save to your KPI Improvement dashboard as LOB - Javascript Errors. Use this information to figure out which errors need to be addressed Use New Relic’s recommended best practices to resolve errors that need addressing. See JavaScript errors page: Detect and analyze errors. Remove third party errors that do not add value. You may be using a third party JavaScript that is noisy but works as expected. You can take a couple of approaches: Remove the domain name from the JavaScript error/Pageview ratio widget and add it as its own widget so you can see unexpected changes. You can alert on this using Baseline NRQL alerts. Drop the JavaScript error using drop filters. Only use this option if the volume of errors is impacting your data ingest in a significant way. Be as specific as you can in the drop filter. Conclusion Best practices to adopt Revisit performance metrics (shared in this document as Quality Foundation KPIs) at the end of each sprint. Incorporate performance improvements into developer sprints. Openly share metrics with the lines of the business you support as well as other internal stakeholders. Define Customer Experience SLOs. Create alerts for business critical drops in Quality Foundation KPIs. Value Realization At the end of this process you should now: Have an understanding of your end user experience in a way that is tangible, actionable, and easy for engineers as well as the business to understand. Know how releases impact your end customers. Know how your customers are impacted by service, infrastructure, or network level events. See latency issues caused by backend services if they exist. Have created, or be on the path to create, a common language with business owners so you are working together. This can open new avenues for recognition and sponsorship for new projects.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 152.13733, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "sections": "HTTP error rate", + "body": " Familiarity with basic Browser UI views Familiarity with SPA data in Browser UI Required Installation and Configuration Browser Pro installed in all pages SPA enabled for single page applications Synthetics monitors configured: Ping monitors configured for anonymous users Scripted synthetics check configured" + }, + "id": "61461531e7b9d25774b6f22d" } ], "/unicorn/b82fc20e-a8b6-449a-b6d7-241b4a6dfba7": [ @@ -75869,7 +75803,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -75910,7 +75844,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -75952,7 +75886,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -75994,7 +75928,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -76036,7 +75970,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -76084,7 +76018,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 240.86467, + "_score": 223.28, "_version": null, "_explanation": null, "sort": null, @@ -76136,7 +76070,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 226.33588, + "_score": 210.89688, "_version": null, "_explanation": null, "sort": null, @@ -76170,7 +76104,7 @@ "external_id": "509f5fb8490b652df4c6f31ec06b403c3393530e", "image": "", "url": "https://docs.newrelic.com/docs/infrastructure/amazon-integrations/aws-integrations-list/aws-elb-classic-monitoring-integration/", - "published_at": "2022-02-14T11:39:18Z", + "published_at": "2022-02-16T01:42:02Z", "updated_at": "2022-02-14T11:39:18Z", "document_type": "page", "popularity": 1, @@ -76178,7 +76112,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 212.75424, + "_score": 200.11398, "_version": null, "_explanation": null, "sort": null, @@ -76226,7 +76160,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 201.80823, + "_score": 189.49698, "_version": null, "_explanation": null, "sort": null, @@ -76271,7 +76205,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 200.57959, + "_score": 189.11798, "_version": null, "_explanation": null, "sort": null, @@ -76316,7 +76250,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -76357,7 +76291,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -76399,7 +76333,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -76441,7 +76375,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -76483,7 +76417,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -76530,7 +76464,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 179.99936, + "_score": 169.2631, "_version": null, "_explanation": null, "sort": null, @@ -76559,7 +76493,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 87.47721, + "_score": 83.09881, "_version": null, "_explanation": null, "sort": null, @@ -76600,7 +76534,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27649, + "_score": 81.06927, "_version": null, "_explanation": null, "sort": null, @@ -76639,7 +76573,7 @@ "body": "External Projects Highlighted Projects New Relic Projects Standards Menu External Projects Highlighted Projects New Relic Projects Standards How New Relic is Involved New Relic contributes to the OpenTelemetry project by defining specifications, implementing parts of the specifications in language SDKs, reviewing pull requests from other contributors, and serving as a maintainer in the OpenTelemetry subprojects. OpenTelemetry is part of the Cloud Native Computing Foundation (CNCF); New Relic is a Silver Member. OpenTelemetry Specification The OpenTelemetry Specification SIG works to specify requirements and expectations for all language implementations. This includes specifications for APIs, SDKs, and data format. New Relic serves as a pull request approver in this project and actively contributes to the specification and discussions to evolve the specification. View Repo OpenTelemetry Java SDK New Relic serves as a maintainer and major contributor to the Java SDK project. View Repo OpenTelemetry Go SDK New Relic serves as a maintainer and major contributor to the Go SDK project. View Repo OpenTelemetry .NET SDK New Relic contributes to the .NET SDK project. View Repo OpenTelemetry C++ SDK New Relic contributes to the C++ SDK project. View Repo OpenTelemetry Java Auto-Instrumentation New Relic contributes to the Java Auto-Instrumentation project. View Repo OpenTelemetry Collector New Relic contributes to the OpenTelemetry Collector project. The Collector serves as a central service to receive, process, and export OpenTelemetry data. View Repo OpenTelemetry Ruby SDK New Relic contributes to the Ruby Auto-Instrumentation project. View Repo OpenTelemetry Erlang SDK New Relic contributes to the Erlang project. View Repo View Website GitHub Involved from New Relic", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 80.84406, + "_score": 80.61123, "_version": null, "_explanation": null, "sort": null, @@ -76707,7 +76641,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 8.055802, + "_score": 8.120123, "_version": null, "_explanation": null, "sort": null, @@ -76752,7 +76686,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 82.80652, + "_score": 82.43481, "_version": null, "_explanation": null, "sort": null, @@ -76794,7 +76728,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.889145, + "_score": 82.04414, "_version": null, "_explanation": null, "sort": null, @@ -76806,49 +76740,6 @@ }, "id": "603ebe19e7b9d2b9342a07c9" }, - { - "sections": [ - "Azure Cosmos DB (Document DB) monitoring integration", - "Features", - "Activate integration", - "Configuration and polling", - "View and query data", - "Metric data", - "Important", - "Account Data", - "DataBase Data", - "Collection Data", - "Inventory data", - "EOL NOTICE" - ], - "title": "Azure Cosmos DB (Document DB) monitoring integration", - "type": "docs", - "tags": [ - "Azure integrations list", - "Microsoft Azure integrations", - "Integrations" - ], - "external_id": "e4bb0ee9204d3af8c336f3bccd58052df2451116", - "image": "", - "url": "https://docs.newrelic.com/docs/infrastructure/microsoft-azure-integrations/azure-integrations-list/azure-cosmos-db-document-db-monitoring-integration/", - "published_at": "2022-02-15T19:28:51Z", - "updated_at": "2022-02-15T19:28:51Z", - "document_type": "page", - "popularity": 1, - "body": "New Relic infrastructure monitoring provides an integration for Microsoft Azure's Cosmos DB service that reports your Cosmos DB metrics and other data to New Relic. This document explains how to activate the Cosmos DB integration and describes the data that can be captured. Features New Relic gathers both database data and collection billing data from your Azure Cosmos DB service. You can monitor and alert on your Azure Cosmos DB data from New Relic, and you can create custom queries and custom chart dashboards. Activate integration To enable this integration follow standard procedures to activate your Azure service in New Relic. The Cosmos DB integration requires you to create an additional role and permission to fetch database and collection data: Go to the Azure Portal and open a shell by selecting the terminal icon. Add the following command: az role definition create --role-definition '{ \"Name\": \"NewRelic Integrations\", \"Actions\": [ \"*/read\", \"Microsoft.DocumentDB/databaseAccounts/listKeys/action\" ], \"NotActions\": [], \"AssignableScopes\": [ \"/subscriptions/YOUR_INSERT_SUBSCRIPTION_ID\" ], \"Description\": \"Read Only for NewRelic Integrations\", \"IsCustom\": \"true\" }' Copy From Services > Subscriptions, select the subscription, go to Access control (IAM), and then select Add. In the Role search box, add the name of the newly created role definition (for example, NewRelic Integrations). In the Select search box, add the name of the New Relic integration application, and select it. Ensure that the application is added to the Selected members list, then Save. Configuration and polling You can change the polling frequency and filter data using configuration options. Default polling information for the Cosmos DB integration: Polling interval: 5 minutes Resolution: 1 minute or 5 minutes, varies by metric. For resolution information on a specific metric, see Microsoft Azure's documentation about support metrics. View and query data To view your integration data, go to one.newrelic.com > Infrastructure > Azure and select the Cosmos DB Integration. You can query and explore your data using the following event types: Entity Event Type Provider Account AzureCosmosDbAccountSample AzureCosmosDbAccount Database AzureCosmosDbDatabaseSample AzureCosmosDbDatabase Collection AzureCosmosDbCollectionSample AzureCosmosDbCollection For more on how to find and use data, see Understand and use integration data. Metric data Important For information on deprecated Cosmos DB events or metrics, see Azure Cosmos DB integration (deprecated). We strongly recommend migrating to the supported events and metrics in this document. To view metrics reported by the Cosmos DB integration, query the Entities below. Use the metadata associated with each metric to filter and facet the data being reported. For detailed metric information, see the Azure supported metrics documentation. Account Data Metric Description Metadata totalRequests Total number of requests. account kind region offerType statusCode resourceGroup metadataRequests Count of metadata requests. account kind region offerType statusCode resourceGroup mongoRequests Count of Mongo requests made. account kind region commandName offerType errorCode resourceGroup mongoRequestCharge Total number of Mongo request units consumed. account kind region commandName offerType errorCode resourceGroup totalRequestUnits Total number of request units consumed. account kind region offerType statusCode resourceGroup provisionedThroughput Throughput provisioned for the database or collection. account offerType kind resourceGroup availableStorageBytes Total available storage, in bytes. account kind offerType region resourceGroup dataUsageBytes Total data usage reported, in bytes. account kind offerType region resourceGroup indexUsageBytes Total index usage reported, in bytes. account kind offerType region resourceGroup documentQuotaBytes Total storage quota reported, in bytes. account kind offerType region resourceGroup documentCount Total document count reported. account kind offerType region resourceGroup ReplicationLatency P99 replication latency across source and target regions for geo-enabled account, in milliseconds. account kind sourceRegion offerType targetRegion resourceGroup ServiceAvailability Account requests availability percentage in hour, day, or month granularity. No specific metadata. cassandraRequests Count of Cassandra requests made. account kind errorCode offerType opperationType region resourceType resourceGroup cassandraRequestCharges Total number of request units consumed for Cassandra requests. account kind errorCode offerType opperationType region resourceType resourceGroup cassandraConnectionClosures Total number of Cassandra connections that were closed. account kind closureReason offerType region resourceGroup DataBase Data Metric Description Metadata totalRequests Total number of requests. account databaseName region statusCode metadataRequests Count of metadata requests. account databaseName region statusCode mongoRequests Count of Mongo requests made. account databaseName region commandName errorCode mongoRequestCharge Total number of Mongo request units consumed. account databaseName region commandName errorCode totalRequestUnits Total number of request units consumed. account databaseName region statusCode provisionedThroughput Throughput provisioned for the database or collection. account databaseName availableStorageBytes Total available storage, in bytes. account databaseName region dataUsageBytes Total data usage reported, in bytes. account databaseName region indexUsageBytes Total index usage reported, in bytes. account databaseName region documentQuotaBytes Total storage quota reported, in bytes. account databaseName region documentCount Total document count reported. account databaseName region replicationLatencyMilliseconds P99 replication latency across source and target regions for geo-enabled account, in milliseconds. account sourceRegion targetRegion serviceAvailabilityPercent Account requests availability percentage in hour, day, or month granularity. No specific metadata. cassandraRequests Count of Cassandra requests made. account databaseName errorCode opperationType region resourceType cassandraRequestCharges Total number of request units consumed for Cassandra requests. account databaseName errorCode opperationType region resourceType cassandraConnectionClosures Total number of Cassandra connections that were closed. account closureReason region DataBase Data Metric Description Metadata totalRequests Total number of requests. account databaseName region statusCode metadataRequests Count of metadata requests. account databaseName region statusCode mongoRequests Count of Mongo requests made. account databaseName region commandName errorCode mongoRequestCharge Total number of Mongo request units consumed. account databaseName region commandName errorCode totalRequestUnits Total number of request units consumed. account databaseName region statusCode provisionedThroughput Throughput provisioned for the database or collection. account databaseName availableStorageBytes Total available storage, in bytes. account databaseName region dataUsageBytes Total data usage reported, in bytes. account databaseName region indexUsageBytes Total index usage reported, in bytes. account databaseName region documentQuotaBytes Total storage quota reported, in bytes. account databaseName region documentCount Total document count reported. account databaseName region replicationLatencyMilliseconds P99 replication latency across source and target regions for geo-enabled account, in milliseconds. account sourceRegion targetRegion serviceAvailabilityPercent Account requests availability percentage in hour, day, or month granularity. No specific metadata. cassandraRequests Count of Cassandra requests made. account databaseName errorCode opperationType region resourceType cassandraRequestCharges Total number of request units consumed for Cassandra requests. account databaseName errorCode opperationType region resourceType cassandraConnectionClosures Total number of Cassandra connections that were closed. account closureReason region Collection Data Metric Description Metadata totalRequests Total number of requests. account collectionName database region statusCode metadataRequests Count of metadata requests. account collectionName database region statusCode mongoRequests Count of Mongo requests made. account collectionName database region commandName errorCode mongoRequestCharge Total number of Mongo request units consumed. account collectionName database region commandName errorCode totalRequestUnits Total number of request units consumed. account collectionName database region statusCode provisionedThroughput Throughput provisioned for the database or collection. account collectionName database availableStorageBytes Total available storage, in bytes. account collectionName database region dataUsageBytes Total data usage reported, in bytes. account collectionName database region indexUsageBytes Total index usage reported, in bytes. account collectionName database region documentQuotaBytes Total storage quota reported, in bytes. account collectionName database region documentCount Total document count reported. account collectionName database region replicationLatencyMilliseconds P99 replication latency across source and target regions for geo-enabled account, in milliseconds. account collectionName sourceRegion targetRegion serviceAvailabilityPercent Account requests availability percentage in hour, day, or month granularity. No specific metadata. cassandraRequests Count of Cassandra requests made. account collectionName database errorCode opperationType region resourceType cassandraRequestCharges Total number of request units consumed for Cassandra requests. account collectionName database errorCode opperationType region resourceType cassandraConnectionClosures Total number of Cassandra connections that were closed. account collectionName closureReason region Inventory data EOL NOTICE After March 2022, we're discontinuing support for several capabilities, including inventory data for cloud integrations. For more details, including how you can easily prepare for this transition, see our Explorers Hub post. Inventory data is information about your system's state and configuration. For details on how to find and use inventory data, see Understand and use data. The Cosmos DB integration reports the inventory data for the entity type azure/cosmosdb/account/ using the following metadata: documentEndpoint: The document end point. databaseAccountOfferType: The database account offer type. consistencyPolicy: The consistency policy for the Cosmos DB database account. defaultConsistencyLevel: The default consistency level for the Cosmos DB database account. kind: The type of database account set at database account creation. resourceGroupName: The Azure resource group name that the Cosmos DB database account belong to. regionName: The region name in which the Azure DocumentDB database account is deployed. type: The azure resource type, which is Microsoft.DocumentDB/databaseAccounts.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.60828, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "sections": "DataBase Data", - "body": " gathers both database data and collection billing data from your Azure Cosmos DB service. You can monitor and alert on your Azure Cosmos DB data from New Relic, and you can create custom queries and custom chart dashboards. Activate integration To enable this integration follow standard procedures" - }, - "id": "617dc763e7b9d2d3dac0580e" - }, { "sections": [ "Gearman", @@ -76880,7 +76771,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.33618, + "_score": 81.56332, "_version": null, "_explanation": null, "sort": null, @@ -76922,7 +76813,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.276306, + "_score": 81.50425, "_version": null, "_explanation": null, "sort": null, @@ -76932,6 +76823,48 @@ "body": "What's included Python Dashboard Apdex Score This alert is triggered when the Apdex score is below 0.5 for 5 minutes Alert High CPU Utilization This alert is triggered when the CPU Utilization is above 90%. Alert Transaction Errors This alert is triggered when the the transactions fail more than 10" }, "id": "61566c4c64441fc860099644" + }, + { + "sections": [ + "Pika", + "What's included", + "Python", + "Apdex Score", + "High CPU Utilization", + "Transaction Errors", + "Pika installation docs", + "What is Pika?", + "Get started!", + "More info" + ], + "title": "Pika", + "type": "quickstarts", + "tags": [ + "apm", + "python" + ], + "quick_start_name": "Pika", + "external_id": "3f7e4155b883dfe528f42a1d3d74ef2a9b249c78", + "image": "https://raw.githubusercontent.com/newrelic/newrelic-quickstarts/v0.96.0/quickstarts/python/pika/dashboards/python.png", + "url": "https://developer.newrelic.com/instant-observability/pika/7bdea1cd-d723-4655-ae9f-c13468d593b7/", + "published_at": "2022-02-05T01:43:08Z", + "updated_at": "2021-10-29T01:50:34Z", + "document_type": "page", + "popularity": 1, + "body": "What's included Python Dashboard Apdex Score This alert is triggered when the Apdex score is below 0.5 for 5 minutes Alert High CPU Utilization This alert is triggered when the CPU Utilization is above 90%. Alert Transaction Errors This alert is triggered when the the transactions fail more than 10% of the time in 5 minutes. Alert Pika installation docs Pika is a fully featured, dynamic programming language. Doc What is Pika? Pika is a fully featured, dynamic programming language. Get started! Leverage community expertise and instantly get value out of your telemetry data. This quickstart automatically instruments Pika with the New Relic Python agent, and allows you to instantly monitor your Python application with out-of-the-box dashboards and alerts. Further leverage New Relic's APM capabilities by setting up errors inbox , transaction tracing , and service maps . More info Check out the documentation to learn more about New Relic monitoring for Pika. Pika quickstart contains 1 dashboard . These interactive visualizations let you easily explore your data, understand context, and resolve problems faster. Python Python Pika observability quickstart contains 3 alerts . These alerts detect changes in key performance metrics. Integrate these alerts with your favorite tools (like Slack, PagerDuty, etc.) and New Relic will let you know when something needs your attention. Apdex Score Alert Type: STATIC This alert is triggered when the Apdex score is below 0.5 for 5 minutes High CPU Utilization Alert Type: STATIC This alert is triggered when the CPU Utilization is above 90%. Transaction Errors Alert Type: STATIC This alert is triggered when the the transactions fail more than 10% of the time in 5 minutes. Pika observability quickstart contains 1 data source . This is how you'll get your data into New Relic. Pika installation docs Pika is a fully featured, dynamic programming language. Docs", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 81.50405, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "sections": "Python", + "tags": "apm", + "body": " automatically instruments Pika with the New Relic Python agent, and allows you to instantly monitor your Python application with out-of-the-box dashboards and alerts. Further leverage New Relic's APM capabilities by setting up errors inbox , transaction tracing , and service maps . More info Check out" + }, + "id": "61566846e7b9d2ef508de398" } ], "/red-hat-openshift/accac9b5-ec27-4d15-8879-51220086b8a1": [ @@ -76979,7 +76912,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 321.86188, + "_score": 315.59283, "_version": null, "_explanation": null, "sort": null, @@ -76990,54 +76923,6 @@ }, "id": "61fd18e9e7b9d2b5cc5e7358" }, - { - "sections": [ - "Link your applications to Kubernetes", - "Tip", - "Compatibility and requirements", - "Kubernetes requirements", - "Network requirements", - "APM agent compatibility", - "Openshift requirements", - "Important", - "Configure the injection of metadata", - "Default configuration", - "Custom configuration", - "Manage custom certificates", - "Validate the injection of metadata", - "Disable the injection of metadata", - "Troubleshooting" - ], - "title": "Link your applications to Kubernetes", - "type": "docs", - "tags": [ - "Link apps and services", - "Kubernetes integration", - "Integrations" - ], - "external_id": "0fe0951312aaf683f6614d5956f8c402b9693780", - "image": "", - "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/link-your-applications/link-your-applications-kubernetes/", - "published_at": "2022-02-06T01:24:10Z", - "updated_at": "2022-02-06T01:24:10Z", - "document_type": "page", - "popularity": 1, - "body": "You can surface Kubernetes metadata and link it to your APM agents as distributed traces to explore performance issues and troubleshoot transaction errors. For more information, see this New Relic blog post. You can quickly start monitoring Kubernetes clusters using our Auto-telemetry with Pixie integration, which doesn't require a language agent. Learn more about Auto-telemetry with Pixie. The metadata injection product uses a MutatingAdmissionWebhook to add the following environment variables to pods: NEW_RELIC_METADATA_KUBERNETES_CLUSTER_NAME NEW_RELIC_METADATA_KUBERNETES_NODE_NAME NEW_RELIC_METADATA_KUBERNETES_NAMESPACE_NAME NEW_RELIC_METADATA_KUBERNETES_DEPLOYMENT_NAME NEW_RELIC_METADATA_KUBERNETES_POD_NAME NEW_RELIC_METADATA_KUBERNETES_CONTAINER_NAME NEW_RELIC_METADATA_KUBERNETES_CONTAINER_IMAGE_NAME Copy Tip Our Kubernetes metadata injection project is open source. Here's the code to link APM and infrastructure data and the code to automatically manage certificates. Compatibility and requirements Before linking Kubernetes metadata to your APM agents, make sure you meet the following requirements: Kubernetes requirements Network requirements APM agent compatibility OpenShift requirements Kubernetes requirements To link your applications and Kubernetes, your cluster must have the MutatingAdmissionWebhook controller enabled, which requires Kubernetes 1.9 or higher. To verify that your cluster is compatible, run the following command: kubectl api-versions | grep admissionregistration.k8s.io/v1beta1 admissionregistration.k8s.io/v1beta1 Copy If you see a different result, follow the Kubernetes documentation to enable admission control in your cluster. Network requirements For Kubernetes to speak to our MutatingAdmissionWebhook, the master node (or the API server container, depending on how the cluster is set up) should be allowed egress for HTTPS traffic on port 443 to pods in all of the other nodes in the cluster. This might require specific configuration depending on how the infrastructure is set up (on-premises, AWS, Google Cloud, etc). Tip Until Kubernetes v1.14, users were only allowed to register admission webhooks on port 443. Since v1.15 it's possible to register them on different ports. To ensure backward compatibility, the webhook is registered by default on port 443 in the YAML config file we distribute. APM agent compatibility The following New Relic agents collect Kubernetes metadata: Go 2.3.0 or higher Java 4.10.0 or higher Node.js 5.3.0 or higher Python 4.14.0 or higher Ruby 6.1.0 or higher .NET 8.17.438 or higher Openshift requirements To link Openshift and Kubernetes you must enable mutating admission webhooks, which requires Openshift 3.9 or higher. During the process, install a resource that requires admin permissions to the cluster. Run this to log in as admin: oc login -u system:admin Copy Check that webhooks are correctly configured. If they are not, update the master-config.yaml file. admissionConfig: pluginConfig: MutatingAdmissionWebhook: configuration: apiVersion: apiserver.config.k8s.io/v1alpha1 kubeConfigFile: /dev/null kind: WebhookAdmission ValidatingAdmissionWebhook: configuration: apiVersion: apiserver.config.k8s.io/v1alpha1 kubeConfigFile: /dev/null kind: WebhookAdmission location: \"\" Copy Important Add kubeConfigFile: /dev/null to address some issues in Openshift. Enable certificate signing by editing the YAML file and updating your configuration: kubernetesMasterConfig: controllerArguments: cluster-signing-cert-file: - \"/etc/origin/master/ca.crt\" cluster-signing-key-file: - \"/etc/origin/master/ca.key\" Copy Restart the Openshift services in the master node. Configure the injection of metadata By default, all the pods you create that include APM agents have the correct environment variables set and the metadata injection applies to the entire cluster. To check that the environment variables have been set, any container that is running must be stopped, and a new instance started (see Validate the injection of metadata). This default configuration also uses the Kubernetes certificates API to automatically manage the certificates required for the injection. If needed, you can limit the injection of metadata to specific namespaces in your cluster or self-manage your certificates. Default configuration We offer instructions for deploying our integration using Helm. Just be sure that, when you are configuring the chart, the webhook that inject the metadata is enabled. Notice that we are specifying --dry-run and --debug, so nothing will be installed in this step: helm upgrade --install newrelic newrelic/nri-bundle \\ --dry-run \\ --debug \\ --namespace newrelic --create-namespace \\ --set global.licenseKey=YOUR_NEW_RELIC_LICENSE_KEY \\ --set global.cluster=K8S_CLUSTER_NAME \\ --set ksm.enabled=true \\ --set newrelic-infrastructure.privileged=true \\ --set infrastructure.enabled=true \\ --set prometheus.enabled=true \\ --set webhook.enabled= true \\ --set kubeEvents.enabled=true \\ --set logging.enabled=true Copy Custom configuration You can limit the injection of metadata only to specific namespaces by using labels. To enable this feature, edit nri-bundle Helm values.yaml file: nri-metadata-injection: injectOnlyLabeledNamespaces: true Copy Or add a --set when installing or upgrading your Helm release: helm upgrade --install newrelic newrelic/nri-bundle \\ --dry-run \\ --debug \\ --namespace newrelic --create-namespace \\ --set global.licenseKey=YOUR_NEW_RELIC_LICENSE_KEY \\ --set global.cluster=K8S_CLUSTER_NAME \\ --set ksm.enabled=true \\ --set newrelic-infrastructure.privileged=true \\ --set infrastructure.enabled=true \\ --set prometheus.enabled=true \\ --set webhook.enabled= true \\ --set nri-metadata-injection.injectOnlyLabeledNamespaces=true \\ --set kubeEvents.enabled=true \\ --set logging.enabled=true Copy With this option, injection is only applied to those namespaces that have the newrelic-metadata-injection label set to enabled: kubectl label namespace YOUR_NAMESPACE newrelic-metadata-injection=enabled Copy Manage custom certificates To use custom certificates you need to disable the automatic installation of certificates when you are installing using Helm. To disable the installation for certificates just modify nri-bundle Helm values.yaml like this: nri-metadata-injection: customTLSCertificate: true Copy Or add a --set when installing or upgrading your Helm release: helm upgrade --install newrelic newrelic/nri-bundle \\ --dry-run \\ --debug \\ --namespace newrelic --create-namespace \\ --set global.licenseKey=YOUR_NEW_RELIC_LICENSE_KEY \\ --set global.cluster=K8S_CLUSTER_NAME \\ --set ksm.enabled=true \\ --set newrelic-infrastructure.privileged=true \\ --set infrastructure.enabled=true \\ --set prometheus.enabled=true \\ --set webhook.enabled= true \\ --set nri-metadata-injection.customTLSCertificate=true \\ --set kubeEvents.enabled=true \\ --set logging.enabled=true Copy Now you can proceed with the custom certificate management option. You need your certificate, server key, and Certification Authority (CA) bundle encoded in PEM format. If you have them in the standard certificate format (X.509), install openssl, and run the following: openssl x509 -in CERTIFICATE_FILENAME -outform PEM -out CERTIFICATE_FILENAME.pem openssl x509 -in SERVER_KEY_FILENAME -outform PEM -out SERVER_KEY_FILENAME.pem openssl x509 -in CA_BUNDLE_FILENAME -outform PEM -out BUNDLE_FILENAME.pem Copy If your certificate/key pair are in another format, see the Digicert knowledgebase for more help. Create the TLS secret with the signed certificate/key pair, and patch the mutating webhook configuration with the CA using the following commands: kubectl create secret tls newrelic-metadata-injection-admission \\ --key=PEM_ENCODED_SERVER_KEY \\ --cert=PEM_ENCODED_CERTIFICATE \\ --dry-run -o yaml | kubectl -n newrelic apply -f - caBundle=$(cat PEM_ENCODED_CA_BUNDLE | base64 | td -d $'\\n') kubectl patch mutatingwebhookconfiguration newrelic-metadata-injection-cfg --type='json' -p \"[{'op': 'replace', 'path': '/webhooks/0/clientConfig/caBundle', 'value':'${caBundle}'}]\" Copy Important Certificates signed by Kubernetes have an expiration of one year. For more information, see the Kubernetes source code in GitHub. Validate the injection of metadata In order to validate that the webhook (responsible for injecting the metadata) was installed correctly, deploy a new pod and check for the New Relic environment variables. Create a dummy pod containing Busybox by running: kubectl create -f https://git.io/vPieo Copy Check if New Relic environment variables were injected: kubectl exec busybox0 -- env | grep NEW_RELIC_METADATA_KUBERNETES NEW_RELIC_METADATA_KUBERNETES_CLUSTER_NAME=fsi NEW_RELIC_METADATA_KUBERNETES_NODE_NAME=nodea NEW_RELIC_METADATA_KUBERNETES_NAMESPACE_NAME=default NEW_RELIC_METADATA_KUBERNETES_POD_NAME=busybox0 NEW_RELIC_METADATA_KUBERNETES_CONTAINER_NAME=busybox Copy Disable the injection of metadata To disable/uninstall the injection of metadata, use the following commands: Delete the Kubernetes objects using the yaml file: kubectl delete -f k8s-metadata-injection-latest.yaml Copy Delete the TLS secret containing the certificate/key pair: kubectl delete secret/newrelic-metadata-injection-secret Copy Troubleshooting Follow these troubleshooting tips as needed. No Kubernetes metadata in APM or distributed tracing transactions Problem The creation of the secret by the k8s-webhook-cert-manager job used to fail due to the kubectl version used by the image when running in Kubernetes version 1.19.x, The new version 1.3.2 fixes this issue, therefore it is enough to run again the job using an update version of the image to fix the issue. Solution Update the image k8s-webhook-cert-manager (to a version >= 1.3.2) and re-run the job. The secret will be correctly created and the k8s-metadata-injection pod will be able to start. Note that the new version of the manifest and of the nri-bundle are already updated with the correct version of the image. Problem In OpenShift version 4.x, the CA that is used in order to patch the mutatingwebhookconfiguration resource is not the one used when signing the certificates. This is a known issue currently tracked here. In the logs of the Pod nri-metadata-injection, you'll see the following error message: TLS handshake error from 10.131.0.29:37428: remote error: tls: unknown certificate authority TLS handshake error from 10.129.0.1:49314: remote error: tls: bad certificate Copy Workaround Manually update the certificate stored in the mutatingwebhookconfiguration object. The correct CA locations might change according to the cluster configuration. However, you can usually find the CA in the secret csr-signer in the namespace openshift-kube-controller-manager. Problem There is no Kubernetes metadata included in the transactions' attributes of your APM agent or in distributed tracing. Solution Verify that the environment variables are being correctly injected by following the instructions described in the Validate your installation step. If they are not present, get the name of the metadata injection pod by running: kubectl get pods | grep newrelic-metadata-injection-deployment kubectl logs -f pod/podname Copy In another terminal, create a new pod (for example, see Validate your installation), and inspect the logs of the metadata injection deployment for errors. For every created pod there should be a set of 4 new entries in the logs like: {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.107Z\",\"caller\":\"server/main.go:139\",\"msg\":\"POST https://newrelic-metadata-injection-svc.default.svc:443/mutate?timeout=30s HTTP/2.0\\\" from 10.11.49.2:32836\"} {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.110Z\",\"caller\":\"server/webhook.go:168\",\"msg\":\"received admission review\",\"kind\":\"/v1, Kind=Pod\",\"namespace\":\"default\",\"name\":\"\",\"pod\":\"busybox1\",\"UID\":\"6577519b-7a61-11ea-965e-0e46d1c9335c\",\"operation\":\"CREATE\",\"userinfo\":{\"username\":\"admin\",\"uid\":\"admin\",\"groups\":[\"system:masters\",\"system:authenticated\"]}} {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.111Z\",\"caller\":\"server/webhook.go:182\",\"msg\":\"admission response created\",\"response\":\"[{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env\\\",\\\"value\\\":[{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_CLUSTER_NAME\\\",\\\"value\\\":\\\"adn_kops\\\"}]},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_NODE_NAME\\\",\\\"valueFrom\\\":{\\\"fieldRef\\\":{\\\"fieldPath\\\":\\\"spec.nodeName\\\"}}}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_NAMESPACE_NAME\\\",\\\"valueFrom\\\":{\\\"fieldRef\\\":{\\\"fieldPath\\\":\\\"metadata.namespace\\\"}}}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_POD_NAME\\\",\\\"valueFrom\\\":{\\\"fieldRef\\\":{\\\"fieldPath\\\":\\\"metadata.name\\\"}}}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_CONTAINER_NAME\\\",\\\"value\\\":\\\"busybox\\\"}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_CONTAINER_IMAGE_NAME\\\",\\\"value\\\":\\\"busybox\\\"}}]\"} {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.111Z\",\"caller\":\"server/webhook.go:257\",\"msg\":\"writing response\"} Copy If there are no new entries on the logs, it means that the apiserver is not being able to communicate with the webhook service, this could be due to networking rules or security groups rejecting the communication. To check if the apiserver is not being able to communicate with the webhook you should inspect the apiserver logs for errors like: failed calling webhook \"metadata-injection.newrelic.com\": ERROR_REASON Copy To get the apiserver logs: Start a proxy to the Kubernetes API server by the executing the following command in a terminal window and keep it running. kubectl proxy --port=8001 Copy Create a new pod in your cluster, this will make the apiserver try to communicate with the webhook. The following command will create a busybox. kubectl create -f https://git.io/vPieo Copy Retrieve the apiserver logs. curl localhost:8001/logs/kube-apiserver.log > apiserver.log Copy Delete the busybox container. kubectl delete -f https://git.io/vPieo Copy Inspect the logs for errors. grep -E 'failed calling webhook' apiserver.log Copy Remember that one of the requirements for the metadata injection is that the apiserver must be allowed egress to the pods running on the cluster. If you encounter errors regarding connection timeouts or failed connections, make sure to check the security groups and firewall rules of the cluster. If there are no log entries in either the apiserver logs or the metadata injection deployment, it means that the webhook was not properly registered. Ensure the metadata injection setup job ran successfully by inspecting the output of: kubectl get job newrelic-metadata-setup Copy If the job is not completed, investigate the logs of the setup job: kubectl logs job/newrelic-metadata-setup Copy Ensure the CertificateSigningRequest is approved and issued by running: kubectl get csr newrelic-metadata-injection-svc.default Copy Ensure the TLS secret is present by running: kubectl get secret newrelic-metadata-injection-secret Copy Ensure the CA bundle is present in the mutating webhook configuration: kubectl get mutatingwebhookconfiguration newrelic-metadata-injection-cfg -o json Copy Ensure the TargetPort of the Service resource matches the Port of the Deployment's container: kubectl describe service/newrelic-metadata-injection-svc kubectl describe deployment/newrelic-metadata-injection-deployment Copy", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 287.47717, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "title": "Link your applications to Kubernetes", - "sections": "Link your applications to Kubernetes", - "tags": "Kubernetes integration", - "body": " the following commands: Delete the Kubernetes objects using the yaml file: kubectl delete -f k8s-metadata-injection-latest.yaml Copy Delete the TLS secret containing the certificate/key pair: kubectl delete secret/newrelic-metadata-injection-secret Copy Troubleshooting Follow these troubleshooting tips" - }, - "id": "617daead28ccbc662b7ffe23" - }, { "sections": [ "Link your applications to Kubernetes", @@ -77080,7 +76965,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 279.85217, + "_score": 274.4248, "_version": null, "_explanation": null, "sort": null, @@ -77133,7 +77018,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 246.37349, + "_score": 229.0039, "_version": null, "_explanation": null, "sort": null, @@ -77147,49 +77032,89 @@ }, { "sections": [ - "Configure control plane monitoring", - "Features", - "Compatibility and requirements", - "Discovery of master nodes and control plane components", - "Configuration", - "Important", - "ETCD", - "API server", - "OpenShift configuration", - "Set up mTLS from the ETCD client CA", + "New Relic Metrics Adapter", + "BETA FEATURE", + "Requirements", + "Installation", "Tip", - "To ease future installations", - "Set up mTLS for ETCD in OpenShift", - "See your data" + "Configuration", + "How it works", + "Caution", + "Troubleshooting", + "Get verbose logs", + "Get raw metrics", + "Metrics not working" ], - "title": "Configure control plane monitoring", + "title": "New Relic Metrics Adapter", + "type": "docs", + "tags": [ + "Link apps and services", + "Kubernetes integration", + "Integrations" + ], + "external_id": "51fdc0c8df2fdc91fcc51556e323c62e7c12d48a", + "image": "", + "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/advanced-configuration/newrelic-metrics-adapter/", + "published_at": "2022-02-15T19:18:45Z", + "updated_at": "2022-02-04T12:17:01Z", + "document_type": "page", + "popularity": 1, + "body": "BETA FEATURE This feature is still in development, but we encourage you to try it out! You can use metrics from your New Relic account to autoscale applications and services in your Kubernetes cluster by deploying the New Relic Metrics Adapter. This adapter fetches the metric values from New Relic and makes them available for the Horizontal Pod Autoscalers. The newrelic-k8s-metrics-adapter implements the external.metrics.k8s.io API to support the use of external metrics based New Relic NRQL queries results. Once deployed, the value for each configured metric is fetched using the NerdGraph API based on the configured NRQL query. The metrics adapter exposes the metrics over a secured endpoint with TLS. New Relic metrics adapter in a cluster. Requirements Kubernetes 1.16 or higher. The New Relic Kubernetes integration. New Relic's user API key. No other External Metrics Adapter installed in the cluster. Installation To install the New Relic Metrics Adapter, we provide the newrelic-k8s-metrics-adapter Helm chart, which is also included in the nri-bundle chart used to deploy all New Relic Kubernetes components. If not already installed, install our Kubernetes integration. Upgrade the installation to include the New Relic Metrics Adapter with the following command: helm upgrade --install newrelic newrelic/nri-bundle \\ --namespace newrelic --create-namespace --reuse-values \\ --set metrics-adapter.enabled=true \\ --set newrelic-k8s-metrics-adapter.personalAPIKey=YOUR_NEW_RELIC_PERSONAL_API_KEY \\ --set newrelic-k8s-metrics-adapter.config.accountID=YOUR_NEW_RELIC_ACCOUNT_ID \\ --set newrelic-k8s-metrics-adapter.config.externalMetrics.external_metric_name.query=NRQL query Copy Please notice and adjust the following flags: metrics-adapter.enabled: Must be set to true so the metrics adapter chart is installed. newrelic-k8s-metrics-adapter.personalAPIKey: Must be set to valid New Relic Personal API key. newrelic-k8s-metrics-adapter.accountID: Must be set to valid New Relic account where metrics are going to be fetched from. newrelic-k8s-metrics-adapter.config.externalMetrics.external_metric_name.query: Adds a new external metric where: external_metric_name: The metric name. query: The base NRQL query that is used to get the value for the metric. Tip Alternatively, you can use a values.yaml file that can be passed to the helm command with the --values flag. Values files can contain all parameters needed to configure the metrics explained in the configuration section. Configuration You can configure multiple metrics in the metrics adapter and change some parameters to modify the behaviour of the metrics cache and filtering. To see the full list and descriptions of all parameters that can be modified, refer to the chart README.md and values.yaml files. How it works The following example is a Helm values file that enable the metrics adapter on the nri-bundle chart installation, and configures the nginx_average_requests metric: metrics-adapter: enabled: true newrelic-k8s-metrics-adapter: personalAPIKey: config: accountID: externalMetrics: nginx_average_requests: query: \"FROM Metric SELECT average(nginx.server.net.requestsPerSecond) SINCE 2 MINUTES AGO\" Copy Caution The default time span for metrics is 1h. Therefore, you should define queries with the SINCE clause to adjust the time span according to your environment and needs. There is an HPA consuming the external metric as follows: kind: HorizontalPodAutoscaler apiVersion: autoscaling/v2beta2 metadata: name: nginx-scaler spec: scaleTargetRef: apiVersion: apps/v1 kind: Deployment name: nginx minReplicas: 1 maxReplicas: 10 metrics: - type: External external: metric: name: nginx_average_requests selector: matchLabels: k8s.namespaceName: nginx target: type: Value value: 10000 Copy Based on the HPA definition, the controller manager fetches the metrics from the external metrics API which are served by the New Relic metrics adapter. The New Relic metrics adapter receives the query including the nginx_average_requests metric name and all the selectors, and searches for a matching metric name in the internal memory based on the configured metrics. Then, it adds the selectors to the query to form a final query that is executed using NerdGraph to fetch the value from New Relic. The above example will generate a query like the following: FROM Metric SELECT average(nginx.server.net.requestsPerSecond) WHERE clusterName= AND `k8s.namespaceName`='nginx' SINCE 2 MINUTES AGO Copy Notice that a clusterName filter has been automatically added to the query to exclude metrics from other clusters in the same account. You can remove it by using the removeClusterFilter configuration parameter. Also the value is cached for a period of time defined by the cacheTTLSeconds configuration parameter, whose default is 30 seconds. Troubleshooting Get verbose logs Most common errors are displayed in the standard (non-verbose) logs. If you're doing a more in-depth investigation on your own or with New Relic Support, you can enable verbose mode. To get verbose logging details for an integration using Helm: Enable verbose logging: bash Copy $ helm upgrade -n newrelic --reuse-values newrelic-bundle --set newrelic-k8s-metrics-adapter.verboseLog=true newrelic/nri-bundle Leave on verbose mode for a few minutes, or until enough activity has occurred. When you have the information you need, disable verbose logging: bash Copy $ helm upgrade --reuse-values newrelic-bundle --set newrelic-k8s-metrics-adapter.verboseLog=false newrelic/nri-bundle Caution Verbose mode increases significantly the amount of information sent to log files. Enable this mode temporarily, only for troubleshooting purposes, and reset the log level when finished. Get raw metrics Sometimes it's useful to get the list of available metrics and also to get the current value of an specific metric. To get the list of metrics available, run: bash Copy $ kubectl get --raw \"/apis/external.metrics.k8s.io/v1beta1/\" To get the value for a specific metric with a selector, run: bash Copy $ kubectl get --raw \"/apis/external.metrics.k8s.io/v1beta1/namespaces/*/__METRIC_NAME__?labelSelector=_SELECTOR_KEY_=_SELECTOR_VALUE_\" Tip You must replace , and with your values. Metrics not working There are some usual errors that could cause a metric fail to retrieve the value. These errors are showed in the status of the metrics when you describe the HPA or are printed when you get the raw metrics directly. executing query: NRQL Syntax Error: Error at line...: The query that is being run has syntax errors. The same error message gives you the executed query and position of the error. You can try this query inside the New Relic query builder and correct the configuration from the adapter. extracting return value: expected first value to be of type \"float64\", got %!q(): The query doesn't return any value. The same error message gives you the executed query so you can try this query inside the New Relic query builder and correct the configuration from the adapter or the match selectors in the HPA.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 222.93944, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "tags": "Kubernetes integration", + "body": " in the cluster. Installation To install the New Relic Metrics Adapter, we provide the newrelic-k8s-metrics-adapter Helm chart, which is also included in the nri-bundle chart used to deploy all New Relic Kubernetes components. If not already installed, install our Kubernetes integration. Upgrade" + }, + "id": "61fd193d196a672daae826d6" + }, + { + "sections": [ + "Kubernetes integration: Predefined alert policy", + "Predefined alert conditions", + "Container CPU usage % is too high", + "Container memory usage % is too high", + "Pod was unable to be scheduled", + "Pod is not ready", + "Container is running out of space", + "ReplicaSet doesn't have desired amount of pods", + "etcd open file descriptors", + "Create new alert conditions" + ], + "title": "Kubernetes integration: Predefined alert policy", "type": "docs", "tags": [ "Installation", "Kubernetes integration", "Integrations" ], - "external_id": "d648be240fd2f1b09bc1f29ef805bc0dd5c59e7a", - "image": "https://docs.newrelic.com/static/209f301630c770f87ea8cbb1cace8e6e/8c557/new-relic-one-k8s-cluster-explorer-control-plane-parameters.png", - "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/installation/configure-control-plane-monitoring/", - "published_at": "2022-02-06T01:24:10Z", - "updated_at": "2021-12-09T15:16:34Z", + "external_id": "7c92831c394c4c087bad8b481250e55557e4b794", + "image": "", + "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/advanced-configuration/kubernetes-integration-predefined-alert-policy/", + "published_at": "2022-02-15T19:17:51Z", + "updated_at": "2022-02-04T14:47:09Z", "document_type": "page", "popularity": 1, - "body": "New Relic provides Control Plane support for your Kubernetes integration, allowing you to monitor and collect metrics from your cluster's Control Plane components. That data can then be found in New Relic and used to create queries and charts. Features We monitor and collect metrics from the following control plane components: ETCD: leader information, resident memory size, number of OS threads, consensus proposals data, etc. For a list of supported metrics, see ETCD data. API server: rate of apiserver requests, breakdown of apiserver requests by HTTP method and response code, etc. For the complete list of supported metrics, see API server data. Scheduler: requested CPU/memory vs available on the node, tolerations to taints, any set affinity or anti-affinity, etc. For the complete list of supported metrics, see Scheduler data. Controller manager: resident memory size, number of OS threads created, goroutines currently existing, etc. For the complete list of supported metrics, see Controller manager data. Compatibility and requirements Control plane monitoring requires Kubernetes integration version 1.16.0 or higher. Control plane monitoring support is not enabled for managed clusters. This is because providers (EKS, GKE, AKS, etc.) abstract away the concept of master nodes and control plane components, so that access to them is limited or non-existent. External control planes are not supported. The unprivileged version of the Kubernetes integration does not support control plane monitoring. OpenShift 4.x uses control plane component metric endpoints that are different than the default. Discovery of master nodes and control plane components The Kubernetes integration relies on the kubeadm labeling conventions to discover the master nodes and the control plane components. This means that master nodes should be labeled with node-role.kubernetes.io/master=\"\" or kubernetes.io/role=\"master\". The control plane components should have either the k8s-app or the tier and component labels. Refer to the following table for accepted label combinations and values: Component Label Endpoint API server Kubeadm / Kops / ClusterAPI k8s-app=kube-apiserver tier=control-plane component=kube-apiserver OpenShift app=openshift-kube-apiserver apiserver=true localhost:443/metrics by default (can be configured) if the request fails falls back to localhost:8080/metrics ETCD Kubeadm / Kops / ClusterAPI k8s-app=etcd-manager-main tier=control-plane component=etcd OpenShift k8s-app=etcd localhost:4001/metrics Scheduler Kubeadm / Kops / ClusterAPI k8s-app=kube-scheduler tier=control-plane component=kube-scheduler OpenShift app=openshift-kube-scheduler scheduler=true localhost:10251/metrics Controller manager Kubeadm / Kops / ClusterAPI k8s-app=kube-controller-manager tier=control-plane component=kube-controller-manager​ OpenShift app=kube-controller-manager kube-controller-manager=true localhost:10252/metrics When the integration detects that it is running inside a master node, it tries to find which components are running on the node by looking for pods that match the labels listed in the table above. For every running component, the integration makes a request to its metrics endpoint. Configuration Control plane monitoring is automatic for agents running inside master nodes. The only component that requires an extra step to run is ETCD, because it uses mutual TLS authentication (mTLS) for client requests. The API Server can also be configured to be queried using the Secure Port. Important Control plane monitoring for OpenShift 4.x requires additional configuration. For more information, see the OpenShift 4.x Configuration section. ETCD In order to set mTLS for querying ETCD, there are two configuration options that need to be set: Option Value ETCD_TLS_SECRET_NAME Name of a Kubernetes secret that contains the mTLS configuration. The secret should contain the following keys: cert: the certificate that identifies the client making the request. It should be signed by an ETCD trusted CA. key: the private key used to generate the client certificate. cacert: the root CA used to identify the ETCD server certificate. If the ETCD_TLS_SECRET_NAME option is not set, ETCD metrics won't be fetched. For step by step instructions on how to create a certificate and sign it with the ETCD client CA, see Set up mTLS from the ETCD client CA. ETCD_TLS_SECRET_NAMESPACE The namespace where the secret specified in the ETCD_TLS_SECRET_NAME was created. If not set, the default namespace is used. API server By default, the API server metrics are queried using the localhost:8080 unsecured endpoint. If this port is disabled, you can also query these metrics over the secure port. To enable this, set the following configuration option in the Kubernetes integration manifest file: Option Value API_SERVER_ENDPOINT_URL The (secure) URL to query the metrics. The API server uses localhost:443 by default Ensure that the ClusterRole has been updated to the newest version found in the manifest Added in version 1.15.0 Important Note that the port can be different according to the secure port used by the API server. For example, in Minikube the API server secure port is 8443 and therefore API_SERVER_ENDPOINT_URL should be set to https://localhost:8443 OpenShift configuration Control plane components on OpenShift 4.x use endpoint URLs that require SSL and service account based authentication. Therefore, the default endpoint URLs can not be used. To configure control plane monitoring on OpenShift, uncomment the following environment variables in the customized manifest. URL values are pre-configured to the default base URLs for the control plane monitoring metrics endpoints in OpenShift 4.x. - name: \"SCHEDULER_ENDPOINT_URL\" value: \"https://localhost:10259 - name: \"ETCD_ENDPOINT_URL\" value: \"https://localhost:9979\" - name: \"CONTROLLER_MANAGER_ENDPOINT_URL\" value: \"https://localhost:10257\" - name: \"API_SERVER_ENDPOINT_URL\" value: \"https://localhost:6443\" Copy Important Even though the custom ETCD_ENDPOINT_URL is defined, ETCD requires HTTPS and mTLS authentication to be configured. For more on configuring mTLS for ETCD in OpenShift, see Set up mTLS for ETCD in OpenShift. Important When installing through Helm openshift, specify the config to automatically include these endpoints. Setting openshift.enabled=true and openshift.version=\"4.x\" will include the secure endpoints and enable the /var/run/crio.sock runtime. Set up mTLS from the ETCD client CA The instructions below are based on the Kubernetes documentation. For more information, see Managing TLS certificates in a cluster. For OpenShift, see Set up mTLS for ETCD in OpenShift. To set up mTLS from the ETCD client CA: Download and install the tool cfssl, selecting the correct binaries for your OS from the list. Once installed, execute the following command: cat < etcd-secret.yaml Copy Open the secret file and change the keys: Rename the certificate authority to cacert. Rename the client certificate to cert. Rename the client key to key. Optional: change the secret name and namespace to something meaningful. Remove these unnecessary keys in the metadata section: creationTimestamp resourceVersion selfLink uid Install the manifest with its new name and namespace: kubectl apply -f etcd-secret.yaml Copy Go to Update manifest configuration (the last step under Set up MTL from ETCD client) to configure the required environment variables. See your data If the integration has been been set up correctly, the Kubernetes cluster explorer contains all the Control Plane components and their status in a dedicated section, as shown below. one.newrelic.com > Kubernetes Cluster Explorer: Use the Kubernetes cluster explorer to monitor and collect metrics from your cluster's Control Plane components You can also check for Control Plane data with this NRQL query: SELECT latest(timestamp) FROM K8sApiServerSample, K8sEtcdSample, K8sSchedulerSample, K8sControllerManagerSample FACET entityName where clusterName = 'MY_CLUSTER_NAME' Copy Tip If you still can't see Control Plane data, try the solution described in Kubernetes integration troubleshooting: Not seeing data.", + "body": "When deploying the New Relic Kubernetes integration for the first time in an account, we deploy a default set of alert conditions to your account. The predefined alert policy, named Kubernetes default alert policy, doesn't have a notification channel by default to avoid unwanted notifications. The alert conditions' thresholds can be customized to your environment and the alert policy updated to send notifications. For more information, see the Infrastructure alerts documentation. Predefined alert conditions Container CPU usage % is too high Setting Value Event type K8sContainerSample SELECT value (cpuUsedCores/cpuLimitCores)*100 Warning threshold > 90% for at least 5 minutes Critical threshold > 95% for at least 5 mins Container memory usage % is too high Setting Value Event type K8sContainerSample SELECT value memoryWorkingSetUtilization Warning threshold > 85% for at least 5 minutes Critical threshold > 95% for at least 5 mins Pod was unable to be scheduled Setting Value Event type K8sPodSample SELECT value isScheduled Warning threshold Critical threshold isScheduled = 0 for at least 7 minutes Pod is not ready Setting Value Event type K8sPodSample SELECT value isReady Warning threshold Critical threshold isReady = 0 for at least 10 minutes Container is running out of space Setting Value Event type K8sContainerSample SELECT value fsUsedPercent Warning threshold > 75% for at least 5 minutes Critical threshold > 90% for at least 5 minutes ReplicaSet doesn't have desired amount of pods Setting Value Event type K8sReplicaSetSample SELECT value podsDesired - podsReady Warning threshold Critical threshold 0 for at least 5 minutes etcd open file descriptors Setting Value Event type K8sEtcdSample SELECT value (processOpenFds/processMaxFds)*100 Warning threshold > 75% for at least 3 minutes Critical threshold > 90% for at least 5 minutes Create new alert conditions To create new alert conditions based on Kubernetes metric data, see Understand and use data.", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 242.26315, + "_score": 218.63763, "_version": null, "_explanation": null, "sort": null, "highlight": { - "sections": "Set up mTLS for ETCD in OpenShift", + "title": "Kubernetes integration: Predefined alert policy", + "sections": "Kubernetes integration: Predefined alert policy", "tags": "Kubernetes integration", - "body": ". This means that master nodes should be labeled with node-role.kubernetes.io/master="" or kubernetes.io/role="master". The control plane components should have either the k8s-app or the tier and component labels. Refer to the following table for accepted label combinations and values: Component" + "body": ". The alert conditions' thresholds can be customized to your environment and the alert policy updated to send notifications. For more information, see the Infrastructure alerts documentation. Predefined alert conditions Container CPU usage % is too high Setting Value Event type K8sContainerSample SELECT" }, - "id": "617d735f64441f9d39fbe7ee" + "id": "61fd3c6de7b9d279f45e6625" } ], "/split/772501cd-2b71-4544-9768-aec5efb91ef5": [ @@ -77218,7 +77143,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 75.936424, + "_score": 70.58229, "_version": null, "_explanation": null, "sort": null, @@ -77258,7 +77183,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 61.581696, + "_score": 58.156685, "_version": null, "_explanation": null, "sort": null, @@ -77297,7 +77222,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 52.602303, + "_score": 49.67939, "_version": null, "_explanation": null, "sort": null, @@ -77334,7 +77259,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 52.59035, + "_score": 49.66974, "_version": null, "_explanation": null, "sort": null, @@ -77345,42 +77270,39 @@ }, { "sections": [ - "New Relic One CLI reference", - "Installing the New Relic One CLI", - "Tip", - "New Relic One CLI Commands", - "Get started", - "Configure your CLI preferences", - "Set up your Nerdpacks", - "Manage your Nerdpack subscriptions", - "Install and manage plugins", - "Manage catalog information" + "iOS agent compatibility and requirements", + "Foreground monitoring", + "iOS requirements", + "Testing is not supported", + "Potential method replacement conflicts" ], - "title": "New Relic One CLI reference", - "type": "developer", + "title": "iOS agent compatibility and requirements", + "type": "docs", "tags": [ - "New Relic One app", - "nerdpack commands" + "Get started", + "New Relic Mobile iOS", + "Mobile monitoring" ], - "external_id": "858339a44ead21c83257778ce60b4c352cd30d3b", - "image": "https://developer.newrelic.com/static/2c6d337608b38a3312b4fc740afe6167/7272b/developercenter.png", - "url": "https://developer.newrelic.com/explore-docs/nr1-cli/", - "published_at": "2022-02-15T01:41:15Z", - "updated_at": "2022-02-11T01:45:15Z", + "external_id": "544e062fdc57c4545c2f36b54b38f95b30b3c25e", + "image": "", + "url": "https://docs.newrelic.com/docs/mobile-monitoring/new-relic-mobile-ios/get-started/new-relic-ios-compatibility-requirements/", + "published_at": "2022-02-14T12:02:28Z", + "updated_at": "2021-12-19T19:52:39Z", "document_type": "page", "popularity": 1, - "info": "An overview of the CLI to help you build, deploy, and manage New Relic apps.", - "body": "To build a New Relic One app, you must install the New Relic One CLI. The CLI helps you build, publish, and manage your New Relic app. We provide a variety of tools for building apps, including the New Relic One CLI (command line interface). This page explains how to use CLI commands to: Generate Nerdpack/Nerdlet templates Locally serve Nerdpacks (when developing) Publish and deploy Subscribe to Nerdpacks Add screenshots and metadata to the catalog Installing the New Relic One CLI In New Relic, click Instant Observability, then check the Apps box in the filter by section. Click the Build on New Relic launcher and follow the quick start instructions. The quick start automatically generates an API key for the account you select, and gives you the pre-populated commands to create a profile, generate your first \"Hello World\" app, and serve it locally. Tip Use the NR1 VS Code extension to build your apps. New Relic One CLI Commands This table provides descriptions for the New Relic One commands. For more context, including usage and option details, click any individual command or the command category. For details on user permissions, see Permissions. For more on how to serve and publish your application, see our guide on Deploying your New Relic One app. Get started nr1 help Shows all nr1 commands or details about each command. nr1 update Updates to the latest version of the CLI. nr1 create Creates a new component from a template (Nerdpack, Nerdlet, launcher, or catalog). nr1 profiles Manages the profiles you use to run CLI commands. nr1 autocomplete Displays autocomplete installation instructions. nr1 nrql Fetches data using NRQL (New Relic query language). Configure your CLI preferences nr1 config:set Sets a specific configuration value. nr1 config:get Shows a specific configuration. nr1 config:list Lists your configuration choices. nr1 config:delete Removes the value of a specific configuration. Set up your Nerdpacks nr1 nerdpack:build Assembles your Nerdpack into bundles. nr1 nerdpack:clone Clones an open source Nerdpack from our GitHub repository. nr1 nerdpack:serve Serves your Nerdpack for testing and development purposes. nr1 nerdpack:uuid Shows or regenerates the UUID of a Nerdpack. nr1 nerdpack:publish Publishes your Nerdpack to New Relic. nr1 nerdpack:deploy Deploys a Nerdpack version to a specific channel. nr1 nerdpack:undeploy Undeploys a Nerdpack version from a specific channel. nr1 nerdpack:clean Cleans your developtment folders. nr1 nerdpack:validate Validates the contents of your Nerdpack. nr1 nerdpack:info Shows the state of your Nerdpack in the New Relic's registry. Manage your Nerdpack subscriptions nr1 subscription:set Subscribes your account to a Nerdpack and channel. nr1 subscription:list Lists all the Nerdpacks your account is subscribed to. nr1 subscription:unset Unsubscribes your account from a Nerdpack. Install and manage plugins nr1 plugins:install Installs a plugin into the CLI. nr1 plugins:link Links a plugin into the CLI for development. nr1 plugins:update Updates your installed plugins. nr1 plugins:uninstall Removes a plugin from the CLI. Manage catalog information nr1 catalog:info Shows the Nerdpack info stored in the catalog. nr1 catalog:submit Gathers and submits the catalog info on the current folder.", + "body": "Before you install and configure the iOS agent, follow these guidelines for compatibility and other requirements. Foreground monitoring The iOS agent only monitors your app while it is in the foreground. The agent does not monitor background services while the app is closed. For more information, see our Mobile data privacy and security documentation. iOS requirements Make sure your iOS app meets these requirements: Component iOS application requirements Operating system iOS 9 or higher For Bitcode support, use SDK version 5.3.0 or higher. API/SDK NSURLConnection and AFNetworking are supported. NSURLSession supports upload and data tags only. ASIHttpRequest networking APIs are deprecated as of iOS agent version 5.8.2. Network traffic for UIWebView and WKWebView is supported. However, WKWebView Transfer size and Http errors are not supported. Languages Objective-C Swift: Works with both network traces and crash reporting, but no interaction traces by default. Interaction traces must be enabled for Swift. Devices Any iOS compatible device: iPhones, iPads, etc. File sizes The agent adds about 2 to 12 megabytes to your iOS release app, depending on platform build. Architectures ARM 64-bit. SHA-2 As a standard security measure for data collection, New Relic requires that your application server supports SHA-2 (256-bit). SHA-1 is not supported. Xcode To take advantage of New Relic's iOS features, make sure you have the latest version of Xcode. arm64e support To be able to properly symbolicate crashes from devices with arm64e architectures, make sure your Xcode settings are enabled for pointer authentication. For more information, see the Apple developer documentation. CocoaPods In order to use the latest XCFramework Agent, use CocoaPods version 1.10.1 or higher. Testing is not supported Our agents are designed and tested to work in a normal app lifecycle. New Relic does not support running any testing environment on applications with the agent. Testing can cause conflicts and unpredictable behavior. Potential method replacement conflicts Our iOS agent utilizes method replacement during run time. This may result in a conflict with other libraries that also implement method replacement, such as ReactiveCocoa, Firebase, Aspects, and AppleGuice.", + "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 36.003136, + "_score": 35.780693, "_version": null, "_explanation": null, "sort": null, "highlight": { - "body": " nerdpack:serve Serves your Nerdpack for testing and development purposes. nr1 nerdpack:uuid Shows or regenerates the UUID of a Nerdpack. nr1 nerdpack:publish Publishes your Nerdpack to New Relic. nr1 nerdpack:deploy Deploys a Nerdpack version to a specific channel. nr1 nerdpack:undeploy Undeploys a Nerdpack" + "sections": "Testing is not supported", + "body": " symbolicate crashes from devices with arm64e architectures, make sure your Xcode settings are enabled for pointer authentication. For more information, see the Apple developer documentation. CocoaPods In order to use the latest XCFramework Agent, use CocoaPods version 1.10.1 or higher. Testing" }, - "id": "6091fa9864441feb412f36d4" + "id": "6044196064441f4f10378f04" } ], "/rack/c86fdad1-314b-4987-951f-ff4d3fd42151": [ @@ -77416,7 +77338,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -77457,7 +77379,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -77499,7 +77421,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -77541,7 +77463,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -77583,7 +77505,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -77627,7 +77549,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -77668,7 +77590,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -77710,7 +77632,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -77752,7 +77674,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -77794,7 +77716,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -77828,7 +77750,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 125.993195, + "_score": 118.72217, "_version": null, "_explanation": null, "sort": null, @@ -77867,7 +77789,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 88.55557, + "_score": 82.447495, "_version": null, "_explanation": null, "sort": null, @@ -77901,7 +77823,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 63.59381, + "_score": 63.576008, "_version": null, "_explanation": null, "sort": null, @@ -77911,35 +77833,6 @@ }, "id": "6044141964441f5cb1378f32" }, - { - "image": "", - "url": "https://docs.newrelic.com/docs/release-notes/mobile-release-notes/android-release-notes/android-500/", - "sections": [ - "Android agent v5.0.0", - "Improvements" - ], - "published_at": "2022-02-15T09:55:15Z", - "title": "Android agent v5.0.0", - "updated_at": "2021-03-11T09:31:41Z", - "type": "docs", - "external_id": "33ffef8786518f9fade61dfed88996754cf2c2f0", - "document_type": "release_notes", - "popularity": 1, - "body": "Improvements Insights for Mobile This release introduces Insights for New Relic Mobile. The Android SDK now automatically records Session, Interaction, and Crash events in New Relic Insights. After upgrading to this release, you will be able to build queries and dashboards in Insights with the new Mobile event type. All events include attributes reflecting session and unique install identifiers, device, OS, geography, duration and other data. See the Insights documentation for a description of Mobile attributes and Insights usage details. Custom events In addition to the standard events available in Insights, you can now record custom events. Custom events have a name and set of key:value attributes, perfect for capturing usage, monetization, and custom performance data. Custom events are also available in Insights. See the Mobile Custom Event documentation for details. Custom attributes In addition to event-specific attributes, this release provides support for install-scoped attributes. An attribute recorded in the app will be attached to every Session, Interaction, Crash, and Custom event reported from that instance of the app. Custom attributes are perfect for tracking durable information like account id, campaign origin, subscription level, etc. See the Mobile Custom Attribute documentation for details. The Android API documentation includes details on sending custom events and attributes to Insights.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 58.149338, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "title": "Android agent v5.0.0", - "sections": "Android agent v5.0.0", - "body": "Improvements Insights for Mobile This release introduces Insights for New Relic Mobile. The Android SDK now automatically records Session, Interaction, and Crash events in New Relic Insights. After upgrading to this release, you will be able to build queries and dashboards in Insights with the new" - }, - "id": "60451a84196a67ac82960f79" - }, { "sections": [ "setUserId (Android SDK API)", @@ -77969,7 +77862,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 58.11009, + "_score": 58.242188, "_version": null, "_explanation": null, "sort": null, @@ -77980,6 +77873,35 @@ "body": " Insights events and attributes. This method can be called anytime after the New Relic Android agent starts. A user identifier is useful for several reasons. Using the Crash Analysis UI, you can: Find crashes specific to a user. Focusing on one particular crash for one user is often enough" }, "id": "603ea0e9196a67a04ba83dcf" + }, + { + "image": "", + "url": "https://docs.newrelic.com/docs/release-notes/mobile-release-notes/android-release-notes/android-500/", + "sections": [ + "Android agent v5.0.0", + "Improvements" + ], + "published_at": "2022-02-15T09:55:15Z", + "title": "Android agent v5.0.0", + "updated_at": "2021-03-11T09:31:41Z", + "type": "docs", + "external_id": "33ffef8786518f9fade61dfed88996754cf2c2f0", + "document_type": "release_notes", + "popularity": 1, + "body": "Improvements Insights for Mobile This release introduces Insights for New Relic Mobile. The Android SDK now automatically records Session, Interaction, and Crash events in New Relic Insights. After upgrading to this release, you will be able to build queries and dashboards in Insights with the new Mobile event type. All events include attributes reflecting session and unique install identifiers, device, OS, geography, duration and other data. See the Insights documentation for a description of Mobile attributes and Insights usage details. Custom events In addition to the standard events available in Insights, you can now record custom events. Custom events have a name and set of key:value attributes, perfect for capturing usage, monetization, and custom performance data. Custom events are also available in Insights. See the Mobile Custom Event documentation for details. Custom attributes In addition to event-specific attributes, this release provides support for install-scoped attributes. An attribute recorded in the app will be attached to every Session, Interaction, Crash, and Custom event reported from that instance of the app. Custom attributes are perfect for tracking durable information like account id, campaign origin, subscription level, etc. See the Mobile Custom Attribute documentation for details. The Android API documentation includes details on sending custom events and attributes to Insights.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 58.161392, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "title": "Android agent v5.0.0", + "sections": "Android agent v5.0.0", + "body": "Improvements Insights for Mobile This release introduces Insights for New Relic Mobile. The Android SDK now automatically records Session, Interaction, and Crash events in New Relic Insights. After upgrading to this release, you will be able to build queries and dashboards in Insights with the new" + }, + "id": "60451a84196a67ac82960f79" } ], "/pysqlite/a7dda741-365b-4d47-a1ad-538cff1ea467": [ @@ -78017,7 +77939,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 82.80652, + "_score": 82.43481, "_version": null, "_explanation": null, "sort": null, @@ -78059,7 +77981,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.889145, + "_score": 82.04414, "_version": null, "_explanation": null, "sort": null, @@ -78071,49 +77993,6 @@ }, "id": "603ebe19e7b9d2b9342a07c9" }, - { - "sections": [ - "Azure Cosmos DB (Document DB) monitoring integration", - "Features", - "Activate integration", - "Configuration and polling", - "View and query data", - "Metric data", - "Important", - "Account Data", - "DataBase Data", - "Collection Data", - "Inventory data", - "EOL NOTICE" - ], - "title": "Azure Cosmos DB (Document DB) monitoring integration", - "type": "docs", - "tags": [ - "Azure integrations list", - "Microsoft Azure integrations", - "Integrations" - ], - "external_id": "e4bb0ee9204d3af8c336f3bccd58052df2451116", - "image": "", - "url": "https://docs.newrelic.com/docs/infrastructure/microsoft-azure-integrations/azure-integrations-list/azure-cosmos-db-document-db-monitoring-integration/", - "published_at": "2022-02-15T19:28:51Z", - "updated_at": "2022-02-15T19:28:51Z", - "document_type": "page", - "popularity": 1, - "body": "New Relic infrastructure monitoring provides an integration for Microsoft Azure's Cosmos DB service that reports your Cosmos DB metrics and other data to New Relic. This document explains how to activate the Cosmos DB integration and describes the data that can be captured. Features New Relic gathers both database data and collection billing data from your Azure Cosmos DB service. You can monitor and alert on your Azure Cosmos DB data from New Relic, and you can create custom queries and custom chart dashboards. Activate integration To enable this integration follow standard procedures to activate your Azure service in New Relic. The Cosmos DB integration requires you to create an additional role and permission to fetch database and collection data: Go to the Azure Portal and open a shell by selecting the terminal icon. Add the following command: az role definition create --role-definition '{ \"Name\": \"NewRelic Integrations\", \"Actions\": [ \"*/read\", \"Microsoft.DocumentDB/databaseAccounts/listKeys/action\" ], \"NotActions\": [], \"AssignableScopes\": [ \"/subscriptions/YOUR_INSERT_SUBSCRIPTION_ID\" ], \"Description\": \"Read Only for NewRelic Integrations\", \"IsCustom\": \"true\" }' Copy From Services > Subscriptions, select the subscription, go to Access control (IAM), and then select Add. In the Role search box, add the name of the newly created role definition (for example, NewRelic Integrations). In the Select search box, add the name of the New Relic integration application, and select it. Ensure that the application is added to the Selected members list, then Save. Configuration and polling You can change the polling frequency and filter data using configuration options. Default polling information for the Cosmos DB integration: Polling interval: 5 minutes Resolution: 1 minute or 5 minutes, varies by metric. For resolution information on a specific metric, see Microsoft Azure's documentation about support metrics. View and query data To view your integration data, go to one.newrelic.com > Infrastructure > Azure and select the Cosmos DB Integration. You can query and explore your data using the following event types: Entity Event Type Provider Account AzureCosmosDbAccountSample AzureCosmosDbAccount Database AzureCosmosDbDatabaseSample AzureCosmosDbDatabase Collection AzureCosmosDbCollectionSample AzureCosmosDbCollection For more on how to find and use data, see Understand and use integration data. Metric data Important For information on deprecated Cosmos DB events or metrics, see Azure Cosmos DB integration (deprecated). We strongly recommend migrating to the supported events and metrics in this document. To view metrics reported by the Cosmos DB integration, query the Entities below. Use the metadata associated with each metric to filter and facet the data being reported. For detailed metric information, see the Azure supported metrics documentation. Account Data Metric Description Metadata totalRequests Total number of requests. account kind region offerType statusCode resourceGroup metadataRequests Count of metadata requests. account kind region offerType statusCode resourceGroup mongoRequests Count of Mongo requests made. account kind region commandName offerType errorCode resourceGroup mongoRequestCharge Total number of Mongo request units consumed. account kind region commandName offerType errorCode resourceGroup totalRequestUnits Total number of request units consumed. account kind region offerType statusCode resourceGroup provisionedThroughput Throughput provisioned for the database or collection. account offerType kind resourceGroup availableStorageBytes Total available storage, in bytes. account kind offerType region resourceGroup dataUsageBytes Total data usage reported, in bytes. account kind offerType region resourceGroup indexUsageBytes Total index usage reported, in bytes. account kind offerType region resourceGroup documentQuotaBytes Total storage quota reported, in bytes. account kind offerType region resourceGroup documentCount Total document count reported. account kind offerType region resourceGroup ReplicationLatency P99 replication latency across source and target regions for geo-enabled account, in milliseconds. account kind sourceRegion offerType targetRegion resourceGroup ServiceAvailability Account requests availability percentage in hour, day, or month granularity. No specific metadata. cassandraRequests Count of Cassandra requests made. account kind errorCode offerType opperationType region resourceType resourceGroup cassandraRequestCharges Total number of request units consumed for Cassandra requests. account kind errorCode offerType opperationType region resourceType resourceGroup cassandraConnectionClosures Total number of Cassandra connections that were closed. account kind closureReason offerType region resourceGroup DataBase Data Metric Description Metadata totalRequests Total number of requests. account databaseName region statusCode metadataRequests Count of metadata requests. account databaseName region statusCode mongoRequests Count of Mongo requests made. account databaseName region commandName errorCode mongoRequestCharge Total number of Mongo request units consumed. account databaseName region commandName errorCode totalRequestUnits Total number of request units consumed. account databaseName region statusCode provisionedThroughput Throughput provisioned for the database or collection. account databaseName availableStorageBytes Total available storage, in bytes. account databaseName region dataUsageBytes Total data usage reported, in bytes. account databaseName region indexUsageBytes Total index usage reported, in bytes. account databaseName region documentQuotaBytes Total storage quota reported, in bytes. account databaseName region documentCount Total document count reported. account databaseName region replicationLatencyMilliseconds P99 replication latency across source and target regions for geo-enabled account, in milliseconds. account sourceRegion targetRegion serviceAvailabilityPercent Account requests availability percentage in hour, day, or month granularity. No specific metadata. cassandraRequests Count of Cassandra requests made. account databaseName errorCode opperationType region resourceType cassandraRequestCharges Total number of request units consumed for Cassandra requests. account databaseName errorCode opperationType region resourceType cassandraConnectionClosures Total number of Cassandra connections that were closed. account closureReason region DataBase Data Metric Description Metadata totalRequests Total number of requests. account databaseName region statusCode metadataRequests Count of metadata requests. account databaseName region statusCode mongoRequests Count of Mongo requests made. account databaseName region commandName errorCode mongoRequestCharge Total number of Mongo request units consumed. account databaseName region commandName errorCode totalRequestUnits Total number of request units consumed. account databaseName region statusCode provisionedThroughput Throughput provisioned for the database or collection. account databaseName availableStorageBytes Total available storage, in bytes. account databaseName region dataUsageBytes Total data usage reported, in bytes. account databaseName region indexUsageBytes Total index usage reported, in bytes. account databaseName region documentQuotaBytes Total storage quota reported, in bytes. account databaseName region documentCount Total document count reported. account databaseName region replicationLatencyMilliseconds P99 replication latency across source and target regions for geo-enabled account, in milliseconds. account sourceRegion targetRegion serviceAvailabilityPercent Account requests availability percentage in hour, day, or month granularity. No specific metadata. cassandraRequests Count of Cassandra requests made. account databaseName errorCode opperationType region resourceType cassandraRequestCharges Total number of request units consumed for Cassandra requests. account databaseName errorCode opperationType region resourceType cassandraConnectionClosures Total number of Cassandra connections that were closed. account closureReason region Collection Data Metric Description Metadata totalRequests Total number of requests. account collectionName database region statusCode metadataRequests Count of metadata requests. account collectionName database region statusCode mongoRequests Count of Mongo requests made. account collectionName database region commandName errorCode mongoRequestCharge Total number of Mongo request units consumed. account collectionName database region commandName errorCode totalRequestUnits Total number of request units consumed. account collectionName database region statusCode provisionedThroughput Throughput provisioned for the database or collection. account collectionName database availableStorageBytes Total available storage, in bytes. account collectionName database region dataUsageBytes Total data usage reported, in bytes. account collectionName database region indexUsageBytes Total index usage reported, in bytes. account collectionName database region documentQuotaBytes Total storage quota reported, in bytes. account collectionName database region documentCount Total document count reported. account collectionName database region replicationLatencyMilliseconds P99 replication latency across source and target regions for geo-enabled account, in milliseconds. account collectionName sourceRegion targetRegion serviceAvailabilityPercent Account requests availability percentage in hour, day, or month granularity. No specific metadata. cassandraRequests Count of Cassandra requests made. account collectionName database errorCode opperationType region resourceType cassandraRequestCharges Total number of request units consumed for Cassandra requests. account collectionName database errorCode opperationType region resourceType cassandraConnectionClosures Total number of Cassandra connections that were closed. account collectionName closureReason region Inventory data EOL NOTICE After March 2022, we're discontinuing support for several capabilities, including inventory data for cloud integrations. For more details, including how you can easily prepare for this transition, see our Explorers Hub post. Inventory data is information about your system's state and configuration. For details on how to find and use inventory data, see Understand and use data. The Cosmos DB integration reports the inventory data for the entity type azure/cosmosdb/account/ using the following metadata: documentEndpoint: The document end point. databaseAccountOfferType: The database account offer type. consistencyPolicy: The consistency policy for the Cosmos DB database account. defaultConsistencyLevel: The default consistency level for the Cosmos DB database account. kind: The type of database account set at database account creation. resourceGroupName: The Azure resource group name that the Cosmos DB database account belong to. regionName: The region name in which the Azure DocumentDB database account is deployed. type: The azure resource type, which is Microsoft.DocumentDB/databaseAccounts.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.60828, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "sections": "DataBase Data", - "body": " gathers both database data and collection billing data from your Azure Cosmos DB service. You can monitor and alert on your Azure Cosmos DB data from New Relic, and you can create custom queries and custom chart dashboards. Activate integration To enable this integration follow standard procedures" - }, - "id": "617dc763e7b9d2d3dac0580e" - }, { "sections": [ "Gearman", @@ -78145,7 +78024,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.33618, + "_score": 81.56332, "_version": null, "_explanation": null, "sort": null, @@ -78187,7 +78066,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.276306, + "_score": 81.50425, "_version": null, "_explanation": null, "sort": null, @@ -78197,6 +78076,48 @@ "body": "What's included Python Dashboard Apdex Score This alert is triggered when the Apdex score is below 0.5 for 5 minutes Alert High CPU Utilization This alert is triggered when the CPU Utilization is above 90%. Alert Transaction Errors This alert is triggered when the the transactions fail more than 10" }, "id": "61566c4c64441fc860099644" + }, + { + "sections": [ + "Pika", + "What's included", + "Python", + "Apdex Score", + "High CPU Utilization", + "Transaction Errors", + "Pika installation docs", + "What is Pika?", + "Get started!", + "More info" + ], + "title": "Pika", + "type": "quickstarts", + "tags": [ + "apm", + "python" + ], + "quick_start_name": "Pika", + "external_id": "3f7e4155b883dfe528f42a1d3d74ef2a9b249c78", + "image": "https://raw.githubusercontent.com/newrelic/newrelic-quickstarts/v0.96.0/quickstarts/python/pika/dashboards/python.png", + "url": "https://developer.newrelic.com/instant-observability/pika/7bdea1cd-d723-4655-ae9f-c13468d593b7/", + "published_at": "2022-02-05T01:43:08Z", + "updated_at": "2021-10-29T01:50:34Z", + "document_type": "page", + "popularity": 1, + "body": "What's included Python Dashboard Apdex Score This alert is triggered when the Apdex score is below 0.5 for 5 minutes Alert High CPU Utilization This alert is triggered when the CPU Utilization is above 90%. Alert Transaction Errors This alert is triggered when the the transactions fail more than 10% of the time in 5 minutes. Alert Pika installation docs Pika is a fully featured, dynamic programming language. Doc What is Pika? Pika is a fully featured, dynamic programming language. Get started! Leverage community expertise and instantly get value out of your telemetry data. This quickstart automatically instruments Pika with the New Relic Python agent, and allows you to instantly monitor your Python application with out-of-the-box dashboards and alerts. Further leverage New Relic's APM capabilities by setting up errors inbox , transaction tracing , and service maps . More info Check out the documentation to learn more about New Relic monitoring for Pika. Pika quickstart contains 1 dashboard . These interactive visualizations let you easily explore your data, understand context, and resolve problems faster. Python Python Pika observability quickstart contains 3 alerts . These alerts detect changes in key performance metrics. Integrate these alerts with your favorite tools (like Slack, PagerDuty, etc.) and New Relic will let you know when something needs your attention. Apdex Score Alert Type: STATIC This alert is triggered when the Apdex score is below 0.5 for 5 minutes High CPU Utilization Alert Type: STATIC This alert is triggered when the CPU Utilization is above 90%. Transaction Errors Alert Type: STATIC This alert is triggered when the the transactions fail more than 10% of the time in 5 minutes. Pika observability quickstart contains 1 data source . This is how you'll get your data into New Relic. Pika installation docs Pika is a fully featured, dynamic programming language. Docs", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 81.50405, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "sections": "Python", + "tags": "apm", + "body": " automatically instruments Pika with the New Relic Python agent, and allows you to instantly monitor your Python application with out-of-the-box dashboards and alerts. Further leverage New Relic's APM capabilities by setting up errors inbox , transaction tracing , and service maps . More info Check out" + }, + "id": "61566846e7b9d2ef508de398" } ], "/express/da8f3197-7766-4096-92db-32edc8a8a36e": [ @@ -78225,7 +78146,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 319.28305, + "_score": 296.58286, "_version": null, "_explanation": null, "sort": null, @@ -78330,7 +78251,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 314.08, + "_score": 292.2414, "_version": null, "_explanation": null, "sort": null, @@ -78359,7 +78280,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 284.55774, + "_score": 269.04272, "_version": null, "_explanation": null, "sort": null, @@ -78404,7 +78325,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 269.03418, + "_score": 253.98067, "_version": null, "_explanation": null, "sort": null, @@ -78449,7 +78370,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 262.44315, + "_score": 246.80772, "_version": null, "_explanation": null, "sort": null, @@ -78491,7 +78412,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.33618, + "_score": 81.56581, "_version": null, "_explanation": null, "sort": null, @@ -78533,7 +78454,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.276306, + "_score": 81.50674, "_version": null, "_explanation": null, "sort": null, @@ -78575,7 +78496,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -78617,7 +78538,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -78659,7 +78580,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -78705,7 +78626,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 49.545578, + "_score": 48.534103, "_version": null, "_explanation": null, "sort": null, @@ -78737,7 +78658,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 40.142944, + "_score": 40.02409, "_version": null, "_explanation": null, "sort": null, @@ -78782,7 +78703,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 33.13157, + "_score": 32.82866, "_version": null, "_explanation": null, "sort": null, @@ -78826,7 +78747,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.33618, + "_score": 81.56581, "_version": null, "_explanation": null, "sort": null, @@ -78868,7 +78789,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.276306, + "_score": 81.50674, "_version": null, "_explanation": null, "sort": null, @@ -78910,7 +78831,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -78952,7 +78873,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -78994,7 +78915,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -79038,7 +78959,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 113.5916, + "_score": 113.52176, "_version": null, "_explanation": null, "sort": null, @@ -79078,7 +78999,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 91.370926, + "_score": 91.331665, "_version": null, "_explanation": null, "sort": null, @@ -79090,41 +79011,6 @@ }, "id": "617e649c196a67df22f7bb1c" }, - { - "sections": [ - "Troubleshoot enabling serverless monitoring of AWS Lambda", - "Problem", - "Solution", - "Recommended: Attach your CloudWatch logs to the ticket", - "Important" - ], - "title": "Troubleshoot enabling serverless monitoring of AWS Lambda", - "type": "docs", - "tags": [ - "Troubleshooting", - "AWS Lambda monitoring", - "Serverless function monitoring" - ], - "external_id": "73f864add78be5efb2429485506dc5a679a9820e", - "image": "", - "url": "https://docs.newrelic.com/docs/serverless-function-monitoring/aws-lambda-monitoring/troubleshooting/troubleshoot-enabling-serverless-monitoring-aws-lambda/", - "published_at": "2022-02-15T17:56:10Z", - "updated_at": "2022-02-15T17:56:09Z", - "document_type": "troubleshooting_doc", - "popularity": 1, - "body": "Problem You're attempting to enable serverless monitoring for AWS Lambda and are having an issue or error. Solution There are two common problems related to not completing all of the enablement procedures: Not seeing data on CloudWatch metrics UI page. This means the AWS integration step wasn't completed. Not seeing data on Troubleshooting category UI pages. If you aren't seeing data on the Distributed tracing, Errors, and Invocations UI tabs, this means the APM agent instrumentation step wasn't completed. Besides these basic enablement problems, there are some additional problems that may cause an issue: CloudWatch error \"HTTP error 401: unauthorized.\" This is due to an incorrect API Key. The --nr-api-keyargument in the Configure AWS enable step takes your user key, which is different from the REST API key. Custom metrics are missing. Lambda monitoring is not compatible with our custom metrics. Use custom attributes to add metadata. Invocations missing. To see invocation breakdown details, distributed tracing must be enabled as part of the Lambda instrumentation step. Distributed tracing is required so that span details can be displayed in the invocation details pane. You've completed the installation, integration, and instrumentation steps correctly, and your function is sending logs to CloudWatch but you're not seeing traces for specific dependencies (or any traces) in the UI. This may result from the order of layer merging (if you're using our Lambda layers) or from the order of import (if you're instrumenting manually): If you're instrumenting with layers: make sure in your function configuration that the New Relic layer is merged before other layers (though if your function uses webpack, the New Relic layer should be merged after the webpack layer). If you're instrumenting a Node function manually, make sure that logging is enabled, and that your function imports newrelic before it imports any dependencies you expect to monitor. If none of these solutions help you, contact our support team. The following information will help you when you talk to support technicians: Has the Lambda function appeared in the UI before? If so, what is the name of the function? If some data for the Lambda function is appearing in the UI, what specific data is appearing? What APM language agent are you using to instrument the function? Recommended: Attach your CloudWatch logs to the ticket To provide our support team with logging information when opening a ticket: Invoke the function in AWS Lambda. Click on the logs link after your function runs. This will take you to the CloudWatch logs in AWS. On the left-hand sidebar in AWS, under Logs, click on Insights. Select your function and also the newrelic-log-ingestion stream. Apply an appropriate Time Filter, and a log entry limit (the default of 20 may not be enough). Under Actions select Copy query results (ASCII). Paste the copied text into a new text file, then save and upload the text file to the ticket. Important The NR_LAMBDA_MONITORING payload contains all the information the agent attempts to send up, including metrics, events, some AWS account metadata, invocations and errors data. Note that some of that data (for example, our legacy metrics) will not make it to our UI because our ingest pipeline does not consume them.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 89.002975, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "body": " technicians: Has the Lambda function appeared in the UI before? If so, what is the name of the function? If some data for the Lambda function is appearing in the UI, what specific data is appearing? What APM language agent are you using to instrument the function? Recommended: Attach your CloudWatch" - }, - "id": "603ea6bb64441f85284e889b" - }, { "sections": [ "Configure request queue reporting", @@ -79154,7 +79040,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 88.37561, + "_score": 88.6792, "_version": null, "_explanation": null, "sort": null, @@ -79215,7 +79101,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 86.41236, + "_score": 84.79026, "_version": null, "_explanation": null, "sort": null, @@ -79225,6 +79111,41 @@ "body": "New Relic has APM language agents for C, Go, Java, Node.js, .NET, PHP, Python, and Ruby. Each of these offers several ways to leverage the power of distributed tracing: Quick start for standard distributed tracing (recommended): A fast way to get started Infinite Tracing: An advanced alternative" }, "id": "6072a66564441fb28e9d8595" + }, + { + "sections": [ + "Troubleshoot enabling serverless monitoring of AWS Lambda", + "Problem", + "Solution", + "Recommended: Attach your CloudWatch logs to the ticket", + "Important" + ], + "title": "Troubleshoot enabling serverless monitoring of AWS Lambda", + "type": "docs", + "tags": [ + "Troubleshooting", + "AWS Lambda monitoring", + "Serverless function monitoring" + ], + "external_id": "73f864add78be5efb2429485506dc5a679a9820e", + "image": "", + "url": "https://docs.newrelic.com/docs/serverless-function-monitoring/aws-lambda-monitoring/troubleshooting/troubleshoot-enabling-serverless-monitoring-aws-lambda/", + "published_at": "2022-02-15T17:56:10Z", + "updated_at": "2022-02-15T17:56:09Z", + "document_type": "troubleshooting_doc", + "popularity": 1, + "body": "Problem You're attempting to enable serverless monitoring for AWS Lambda and are having an issue or error. Solution There are two common problems related to not completing all of the enablement procedures: Not seeing data on CloudWatch metrics UI page. This means the AWS integration step wasn't completed. Not seeing data on Troubleshooting category UI pages. If you aren't seeing data on the Distributed tracing, Errors, and Invocations UI tabs, this means the APM agent instrumentation step wasn't completed. Besides these basic enablement problems, there are some additional problems that may cause an issue: CloudWatch error \"HTTP error 401: unauthorized.\" This is due to an incorrect API Key. The --nr-api-keyargument in the Configure AWS enable step takes your user key, which is different from the REST API key. Custom metrics are missing. Lambda monitoring is not compatible with our custom metrics. Use custom attributes to add metadata. Invocations missing. To see invocation breakdown details, distributed tracing must be enabled as part of the Lambda instrumentation step. Distributed tracing is required so that span details can be displayed in the invocation details pane. You've completed the installation, integration, and instrumentation steps correctly, and your function is sending logs to CloudWatch but you're not seeing traces for specific dependencies (or any traces) in the UI. This may result from the order of layer merging (if you're using our Lambda layers) or from the order of import (if you're instrumenting manually): If you're instrumenting with layers: make sure in your function configuration that the New Relic layer is merged before other layers (though if your function uses webpack, the New Relic layer should be merged after the webpack layer). If you're instrumenting a Node function manually, make sure that logging is enabled, and that your function imports newrelic before it imports any dependencies you expect to monitor. If none of these solutions help you, contact our support team. The following information will help you when you talk to support technicians: Has the Lambda function appeared in the UI before? If so, what is the name of the function? If some data for the Lambda function is appearing in the UI, what specific data is appearing? What APM language agent are you using to instrument the function? Recommended: Attach your CloudWatch logs to the ticket To provide our support team with logging information when opening a ticket: Invoke the function in AWS Lambda. Click on the logs link after your function runs. This will take you to the CloudWatch logs in AWS. On the left-hand sidebar in AWS, under Logs, click on Insights. Select your function and also the newrelic-log-ingestion stream. Apply an appropriate Time Filter, and a log entry limit (the default of 20 may not be enough). Under Actions select Copy query results (ASCII). Paste the copied text into a new text file, then save and upload the text file to the ticket. Important The NR_LAMBDA_MONITORING payload contains all the information the agent attempts to send up, including metrics, events, some AWS account metadata, invocations and errors data. Note that some of that data (for example, our legacy metrics) will not make it to our UI because our ingest pipeline does not consume them.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 82.84296, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "body": " technicians: Has the Lambda function appeared in the UI before? If so, what is the name of the function? If some data for the Lambda function is appearing in the UI, what specific data is appearing? What APM language agent are you using to instrument the function? Recommended: Attach your CloudWatch" + }, + "id": "603ea6bb64441f85284e889b" } ], "/sunspot/bddc7786-fe78-484c-a263-98cdf5d51f2f": [ @@ -79260,7 +79181,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -79301,7 +79222,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -79343,7 +79264,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -79385,7 +79306,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -79427,7 +79348,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -79464,7 +79385,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 123.92131, + "_score": 123.2737, "_version": null, "_explanation": null, "sort": null, @@ -79508,7 +79429,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.33618, + "_score": 81.56581, "_version": null, "_explanation": null, "sort": null, @@ -79550,7 +79471,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.276306, + "_score": 81.50674, "_version": null, "_explanation": null, "sort": null, @@ -79592,7 +79513,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -79634,7 +79555,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -79676,7 +79597,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -79721,7 +79642,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -79762,7 +79683,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -79804,7 +79725,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -79846,7 +79767,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -79888,7 +79809,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -79928,7 +79849,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 386.04968, + "_score": 364.8006, "_version": null, "_explanation": null, "sort": null, @@ -79985,7 +79906,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 344.86313, + "_score": 326.58368, "_version": null, "_explanation": null, "sort": null, @@ -80026,7 +79947,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 312.7329, + "_score": 293.0594, "_version": null, "_explanation": null, "sort": null, @@ -80063,7 +79984,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 291.5088, + "_score": 275.1955, "_version": null, "_explanation": null, "sort": null, @@ -80077,51 +79998,47 @@ }, { "sections": [ - "Notification message templates", - "Message template variables", - "The variables menu", - "Use the Handlebars syntax", - "Helper functions", - "JSON", - "Equality", - "Replace", - "Usage examples", - "Validate data", - "Return JSON", - "Get values from an array", - "Iterate through an array", - "Handle missing attributes" + "Incident Intelligence destination examples", + "EOL NOTICE", + "Destination template examples", + "Suggested OpsGenie template example", + "Suggested ServiceNow template example", + "Suggested Slack template example", + "Suggested VictorOps template example", + "Webhook and JSON format examples", + "Webhook Names and Descriptions", + "Jinja2 Default Payload", + "Jinja2 Useful Syntax" ], - "title": "Notification message templates", + "title": "Incident Intelligence destination examples", "type": "docs", "tags": [ - "Notification templates", - "Incident Intelligence", + "Incident intelligence", "Applied intelligence", "Alerts and Applied Intelligence" ], - "external_id": "17c0cb0905ad9d7fad7c31c814704279312f55b5", - "image": "https://docs.newrelic.com/static/7b1203c718220cb4a25d8d52dbbbbe98/c1b63/notification-payload-template.png", - "url": "https://docs.newrelic.com/docs/alerts-applied-intelligence/notifications/message-templates/", - "published_at": "2022-02-14T04:51:44Z", - "updated_at": "2022-02-14T04:51:44Z", + "external_id": "a516d04f8b75541a6dc9338fa3ce9645ba87d620", + "image": "", + "url": "https://docs.newrelic.com/docs/alerts-applied-intelligence/applied-intelligence/incident-intelligence/incident-intelligence-destination-examples/", + "published_at": "2022-02-14T06:49:59Z", + "updated_at": "2021-11-06T20:07:35Z", "document_type": "page", "popularity": 1, - "body": "Notification message templates enable you to customize your notification event data before it's sent to your third-party destination. The templates map your custom values to the values used by your third-party destination. This gives you full control over what data will be sent and where, as well as being able to fully engage with the services you use. Message template variables A message template is what you use to convert New Relic event data to data that's consumable by your third-party service. Variables are specific attributes that are mapped to data fields in your third-party service. Message templates are written in a simple templating language called Handlebars. Variables in the message templates are written as expressions inside double curly braces {{ }}. Use the notification message template to map your New Relic notifications to the fields in your external services. The variables menu The New Relic variable names are listed in the message template variables menu. The variables are grouped into subcategories. In the variables menu, type {{ to select from a list of variables. As you type, variable names appear via autocomplete. The variable type is written on the right-hand side. You can add enriched data to these variables. The variables menu shows the options you have when mapping New Relic notification fields onto the fields in your external service. Use the Handlebars syntax When an event generates a notification, the message template uses the Handlebar variables to map the notification data to the fields used by your third-party service. The Handlebars language provides many features in addition to basic variable replacement, including evaluating nested input objects and functions, such as iterations (loops), conditional statements, and more. In Handlebars, these functions are called helpers. Helper functions Our message templates support the Handlebars built-in helpers. In addition, we've added other helpers that might be useful to you. JSON The {{json}} helper converts text to a JSON element. Use this when you're configuring a Webhook’s payload, which uses a JSON syntax, and any other situation you might want to pass JSON formatted data. For example, with a variable called data. { \"data\": { \"tags\": [\"infra, team-a\"] } } Copy To get the names array as a JSON element, use the {{json}} helper: {{json data.tags}} Copy to get: [\"infra\", \"team-a\"] Copy Equality Use the equality {{#eq}} helper to compare variables. Compares variables a and b, renders 'yes' or 'no': {{#eq a b}} yes {{else}} no {{/eq}} Compares string value \"a\" to variable b, renders 'yes' or 'no': {{#eq \"a\" b}} yes {{else}} no {{/eq}} Renders 'true' or 'false': {{eq a b}} Render 'y' or 'n': {{eq a b yes='y' no='n'}} Copy Replace The replace helper replaces instances of the first parameter in the second parameter with the child block. Use else clause to specify what happens when no instance of the first parameter is found. If it is omitted an empty string will be generated. Example #1: replace the word dog with cat in the sentence The dog likes to eat: {{#replace \"dog\" \"The dog likes to eat\"}}cat{{/replace}} Copy to get: The cat likes to eat Copy Example #2: replace the word cat with mouse in the sentence The dog likes to eat: {{#replace \"cat\" \"The dog likes to eat\"}}mouse{{/replace}} Copy to get an empty string: Copy Example #3: replace the word cat with mouse in the sentence The dog likes to eat, using the else clause: {{#replace \"cat\" \"The dog likes to eat\"}}mouse{{else}}There is no cat to replace{{/replace}} Copy to get: There is no cat to replace Copy Example #4: replace the word dog with cat in the sentence The DOG likes to eat while ignoring case: {{#replace \"/dog/i\" \"The DOG likes to eat\"}}cat{{/replace}} Copy to get: The cat likes to eat Copy Example #5: replace the variable {{needle}} with the variable {{replacement}} in the variable {{haystack}}: {{#replace needle haystack }}{{replacement}}{{/replace}} Copy using this data: { \"needle\": \"/dog/i\", \"haystack\": \"The DOG likes to eat\", \"replacement\": \"cat\" } Copy to get: The cat likes to eat Copy Usage examples The examples are based on a variable called data: \"data\": { \"tags\":[\"infra, team-a\"], \"id\":123456789, \"name\": \"Alice\", } Copy The data value has an equivalent, dot-notated format: \"data.tags\": [\"infra, team-a\"] \"data.id\": 123456789 \"data.name\": \"Alice\" Copy Validate data If id equals 123456789, then the output is valid. If not, the output is not valid. {{eq data.name \"Alice\" yes='valid' no='not valid'}} Copy If name equals Alice, then the output is valid. Return JSON Get the tags and object’s properties in a JSON form: {{json data.tags}} Copy This would return the following JSON: [\"infra\", \"team-a\"] Copy Get values from an array Get the first tag from the tags array: {{json data.tags.[0]}} Copy This would return the first value from the array: bash Copy $ \"infra\" Iterate through an array Iterate a variable of type array and aggregate the values into a string: {{#each tags}}{{this}}{{#unless @last}}, {{/unless}}{{/each}} Copy The result contains the tags, seperated by commas (the trailing comma is omitted): bash Copy $ infra, team Similarly, iterate the data variable, aggregate the object’s values, and output a JSON element: {{#each (json data)}}{{this}}{{/each}} Copy This would return a JSON such as: { \"tags\": [\"infra, team-a\"], \"name\": \"Alice\", \"id\": \"123456789\" } Copy Iterate the data variable, then aggregate the object’s entries to a string: {{#each data}}{{@key}}: {{this}}{{#unless @last}}, {{/unless}}{{/each}} Copy This would return a string such as: bash Copy $ tags: infra,team-a, name: Alice, id: 123456789 Handle missing attributes In some cases, an attribute may be missing from the variables menu, or not exist whatsoever. We can use the #if statement to set a fallback, such as: {{#if data.type}} {{ json data.type }} {{else}}\"N/A\"{{/if}} Copy would return the string \"N/A\".", + "body": "Destinations are the data outputs to Applied Intelligence, where you can view your automatically correlated incidents. You can configure Incident Intelligence destinations to send data to PagerDuty or webhooks. This document gives examples of destination templates, webhook formats, and JSON formats. EOL NOTICE As of October 2021, we've discontinued support for several capabilities with PagerDuty, including suggested responders, golden signals, and component enrichment. For more details, including how you can easily make this transition, see our Explorers Hub post. Destination template examples The following template example destinations are formatted in Jinja2. Suggested OpsGenie template example { \"alias\": {{ id }}, \"message\": {{ ui_name }}, \"source\": '{{ sources }}', \"priority\": {{ priority }}, \"details\": { \"self_url\": {{ url }}, \"state\": {{ state }}, \"is_correlated\": {{ is_correlated }}, \"created_on\": {{ created_on }}, \"modified_on\": {{ modified_on }}, \"activated_on\": {{ active_since }}, \"closed_on\": {% if closed_on is defined %} {{ closed_on }} {% else %} None {% endif %}, \"is_correlated\": {{ is_correlated }} }, \"description\": \"\"\"Incidents [ {% for incident in incidents %} { \"id\": {{ incident.id }}, \"events_count\": {{ incident.events_count }}, \"labels\": {{ incident.labels }}, \"title\": {{ incident.title }}, \"description\": {{ incident.description }}, \"state\": {{ incident.state }}, \"sources\": {{ incident.sources }}, \"modified_on\": {{ incident.modified_on }}, \"opened_on\": {{ incident.opened_on }}, \"closed_on\": {{ incident.closed_on }} } {% if not loop.last %},{% endif %}{% endfor %} ]\"\"\" } Copy Suggested ServiceNow template example { \"short_description\": {{ ui_name }}, \"description\": 'Issue Id: {{ id }}, \\n Description: {{ description }}, \\n Sources: {{ sources }}, \\n Priority: {{ priority }}, \\n Details: { \\n self_url: {{ url }}, \\n state: {{ state }}, \\n is_correlated: {{ is_correlated }}, \\n created_on: {{ created_on }}, \\n modified_on: {{ modified_on }}, \\n activated_on: {{ active_since }}, \\n closed_on: {% if closed_on is defined %} {{ closed_on }} {% else %} None {% endif %}, \\n is_correlated: {{is_correlated}} }, \\n\\n incidents: {{ incidents }}, \\n\\n pathways: {{ pathways }}' } Copy Suggested Slack template example Go to Slack incoming WebHooks Choose the right Slack workspace and click \"Add to Slack\" Select the destination channel to receive the notifications to Click “Add Incoming WebHooks Integration” Copy the WebHook URL In the next screen, click “Save settings” at the bottom. In New Relic Applied Intelligence, under Incident Intelligence click Destinations Add a WebHook In the end point paste the WebHook url from slack. In the custom payload, paste the following JSON: { \"blocks\": [ { \"type\": \"section\", \"text\": { \"type\": \"mrkdwn\", \"text\": \"*New Relic Incident Intelligence Alert*\" } }, { \"type\": \"divider\" }, { \"type\": \"section\", \"text\": { \"type\": \"mrkdwn\", \"text\": \"*CUSTOM FIELDS*:\" }, \"fields\": [ { \"type\": \"mrkdwn\", \"text\": \"*Issue ID*\" }, { \"type\": \"mrkdwn\", \"text\": \"*Issue Title*\" }, { \"type\": \"plain_text\", \"text\": {{ id }} }, { \"type\": \"plain_text\", \"text\": {{ ui_name }} } ] }, { \"type\": \"section\", \"fields\": [ { \"type\": \"mrkdwn\", \"text\": \"*Issue URL*\" }, { \"type\": \"mrkdwn\", \"text\": \"*Description*\" }, { \"type\": \"mrkdwn\", \"text\": {{ url }} }, { \"type\": \"plain_text\", \"text\": {{ description }} } ] }, { \"type\": \"section\", \"fields\": [ { \"type\": \"mrkdwn\", \"text\": \"*State*\" }, { \"type\": \"mrkdwn\", \"text\": \"*is_correlated*\" }, { \"type\": \"plain_text\", \"text\": {{ state }} }, { \"type\": \"plain_text\", \"text\": \"{{ is_correlated }}\" } ] }, { \"type\": \"section\", \"fields\": [ { \"type\": \"mrkdwn\", \"text\": \"*Created On*\" }, { \"type\": \"mrkdwn\", \"text\": \"*Modified On*\" }, { \"type\": \"plain_text\", \"text\": \"{{ created_on }}\" }, { \"type\": \"plain_text\", \"text\": \"{{ modified_on }}\" } ] }, { \"type\": \"section\", \"fields\": [ { \"type\": \"mrkdwn\", \"text\": \"*Activated On*\" }, { \"type\": \"mrkdwn\", \"text\": \"*Closed On*\" }, { \"type\": \"plain_text\", \"text\": \"{{ active_since }}\" }, { \"type\": \"plain_text\", \"text\": \"{% if closed_on is defined %} {{ closed_on }} {% else %} None {% endif %}\" } ] }, { \"type\": \"section\", \"fields\": [ { \"type\": \"mrkdwn\", \"text\": \"*Aggregated Incidents*\" }, { \"type\": \"mrkdwn\", \"text\": \"*Monitoring Tool*\" }, { \"type\": \"plain_text\", \"text\": \"{{ incident_count }}\" }, { \"type\": \"plain_text\", \"text\": {{ sources|join(', ') }} } ] } ] } Copy Suggested VictorOps template example { \"monitoring_tool\": {{ sources }}, {% if state == 'closed' %} \"message_type\": \"OK\", {% else %} \"message_type\": {{ priority }}, {% endif %} \"custom_fields\": { \"issue_url\": {{ url }}, \"description\": {{ description }}, \"state\": {{ state }}, \"is_correlated\": {{ is_correlated }}, \"created_on\": {{ created_on }}, \"modified_on\": {{ modified_on }}, \"activated_on\": {{ active_since }}, \"closed_on\": {% if closed_on is defined %} {{ closed_on }} {% else %} None {% endif %}, \"related_incidents\": [ {% for incident in incidents %} { \"id\": {{ incident.id }}, \"events_count\": {{ incident.events_count }}, \"labels\": {{ incident.labels }}, \"title\": {{ incident.title }}, \"description\": {{ incident.description }}, \"state\": {{ incident.state }}, \"sources\": {{ incident.sources }}, \"modified_on\": {{ incident.modified_on }}, \"opened_on\": {{ incident.opened_on }}, \"closed_on\": {{ incident.closed_on }} } {% if not loop.last %},{% endif %} {% endfor %} ] }, \"state_message\": {{ description }}, \"entity_id\": {{ id }}, \"entity_display_name\": {{ ui_name }}, \"vo_annotate.u.NRAI_Link\": {{ url }} } Copy Webhook and JSON format examples Applied Intelligence will send the event body in JSON format via HTTPS POST. The system expects the endpoint to return a successful HTTP code (2xx). If you use webhooks to configure Incident Intelligence destinations, use these examples of the webhook body and JSON format. Webhook Names and Descriptions Webhook format: Name Description url Href A link to the UI where the issue can be seen. id String Unique identifier for the issue. title String The issue title. description String The description of the issue. priority Enum The issue priority. Can be Critical, High, Medium, or Low. state Enum The issue status. Can be Active, Closed, or Acknowledged. is_correlated Boolean True if the issue is based on correlated data. created_on String The date and time the issue was created (in ISO format). modified_on String The date and time the issue was modified (in ISO format). active_since String The date and time the issue was activated (in ISO format). closed_on String The date and time the issue was closed (in ISO format). sources List(String) List of the different sources that were used to send the events into Applied Intelligence (for example: PagerDuty). pathways List(Pathways) List of pathways that are associated with the issue. Each pathway contains an id and name: [{“id”: “pathway id”, “name”: “pathway name”}]. pathways[].id String The pathway ID. pathways[].name String The pathway name. incidents List(Incident) List of incidents that are attached to the issue. The list contains only the latest 100 incidents. incidents[].id String The incident ID. incidents[].events_count Integer The number of events used to create the incident. incidents[].title String The incident title. incidents[].description String The incident description. incidents[].labels Dictionary (String) A string to string mapping of the incident labels. Labels represent the unique entities that are used to describe the incident. incidents[].priority Enum The incident priority. Can be Critical, High, Medium, or Low. incidents[].sources List(String) The incident source. incidents[].state Enum(open, closed) The incident state. incidents[].opened_on String The date and time the incident was opened (in ISO format). incidents[].closed_on String The date and time the incident was closed (in ISO format). ui_name String Issue title. accumulations['alert/signal'] String Issue analysis summary golden signal/s (if applicable). accumulations['alert/components'] String Issue analysis summary golden components (if applicable). Jinja2 Default Payload Applied Intelligence uses a templating framework called Jinja2 in the Webhook interface. Here is a default Jinja2 payload to use: { \"id\": {{ id }}, \"url\": {{ url }}, \"ui_name\": {{ ui_name }}, \"description\": {{ description }}, \"priority\": {{ priority }}, \"state\": {{ state }}, \"is_correlated\": {{ is_correlated }}, \"created_on\": {{ created_on }}, \"modified_on\": {{ modified_on }}, \"active_since\": {{ active_since }}, \"closed_on\": {% if closed_on is defined %} {{ closed_on }} {% else %} None {% endif %}, \"sources\": {{ sources }}, \"incidents\": {{ incidents }}, \"pathways\": {{ pathways }}, } Copy Jinja2 Useful Syntax Below are a few useful Jinja2 commands to help you format your output. Casting a value to integer Example: “severity”: {{ priority | int }} Copy If clause to check if an attribute’s value is set Example: \"golden_signals\": {% if accumulations['alert/signal'] is defined %} {{ accumulations['alert/signal'] }} {% else %} None {% endif %} Copy For loop to iterate of an array of values: Example: \"description\": 'Incidents [ {% for incident in incidents %} { \"id\": {{ incident.id }}, \"events_count\": {{ incident.events_count }}, \"labels\": {{ incident.labels }}, \"title\": {{ incident.title }}, \"description\": {{ incident.description }}, \"state\": {{ incident.state }}, \"sources\": {{ incident.sources }}, \"modified_on\": {{ incident.modified_on }}, \"opened_on\": {{ incident.opened_on }}, \"closed_on\": {{ incident.closed_on }} } {% if not loop.last %},{% endif %}{% endfor %} ]' Copy Check if an array attribute's value is set: Example: \"hostname_field\": {% if incidents[0].labels['newrelic/tag/hostname'] is defined %} {{ incidents[0].labels['newrelic/tag/hostname'] }} {% else %} None {% endif %} Copy", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 169.98407, + "_score": 166.97075, "_version": null, "_explanation": null, "sort": null, "highlight": { - "title": "Notification message templates", - "sections": "Notification message templates", - "tags": "Incident Intelligence", - "body": ". Message templates are written in a simple templating language called Handlebars. Variables in the message templates are written as expressions inside double curly braces {{ }}. Use the notification message template to map your New Relic notifications to the fields in your external services" + "title": "Incident Intelligence destination examples", + "sections": "Incident Intelligence destination examples", + "tags": "Incident intelligence", + "body": " the notifications to Click “Add Incoming WebHooks Integration” Copy the WebHook URL In the next screen, click “Save settings” at the bottom. In New Relic Applied Intelligence, under Incident Intelligence click Destinations Add a WebHook In the end point paste the WebHook url from slack. In the custom" }, - "id": "618f3a6c28ccbc60e70317f1" + "id": "6044280d64441f4af5378ed3" } ], "/falcon/70c647fd-114f-4b64-9e54-21f7fd1ed652": [ @@ -80156,7 +80073,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.33618, + "_score": 81.56581, "_version": null, "_explanation": null, "sort": null, @@ -80198,7 +80115,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.276306, + "_score": 81.50674, "_version": null, "_explanation": null, "sort": null, @@ -80240,7 +80157,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -80282,7 +80199,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -80324,7 +80241,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -80368,7 +80285,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.33618, + "_score": 81.56581, "_version": null, "_explanation": null, "sort": null, @@ -80410,7 +80327,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.276306, + "_score": 81.50674, "_version": null, "_explanation": null, "sort": null, @@ -80452,7 +80369,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -80494,7 +80411,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -80536,7 +80453,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -80573,7 +80490,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 1259.9567, + "_score": 1262.2787, "_version": null, "_explanation": null, "sort": null, @@ -80592,7 +80509,7 @@ "Tip", "Important" ], - "published_at": "2022-02-15T01:41:58Z", + "published_at": "2022-02-16T01:44:29Z", "title": "Instrument your cluster", "updated_at": "2021-11-06T01:49:38Z", "type": "developer", @@ -80603,7 +80520,7 @@ "body": "lab This procedure is part of a lab that teaches you how to monitor your Kubernetes cluster with Pixie. Each procedure in the lab builds upon the last, so make sure you've completed the last procedure, Explore your cluster, before starting this one. As the developer of TinyHat.me, you need to have visibility into your cluster. You need to know how healthy your application is. You need to know when things go wrong. But you've put it off for so long because instrumenting Kubernetes is hard and time-consuming. This is one of the things that makes Pixie so valuable. Pixie is a CNCF open source Kubernetes monitoring solution that provides: Automatic and instant baseline observability of your cluster Actionable, code-level insights of your applications With Pixie's auto-telemetry, you'll instrument your cluster in minutes to get dynamic data such as protocol traces, resource metrics, and app metrics from your cluster—all without an agent! Tip If you haven't signed into New Relic, do that now so you're ready to install Pixie into your cluster. Step 1 of 10 Open New Relic. On the right side of the upper navigation bar, click Add more data: Step 2 of 10 Click Guided install: This walks you through the installation process. Step 3 of 10 Click Kubernetes to let New Relic guide you through instrumenting your Kubernetes cluster: Step 4 of 10 Click Begin installation: Step 5 of 10 Select your account, name your cluster \"tiny-hat\", and click Continue: This specifies that TinyHat.me, and all its services, should live in a New Relic cluster called \"tiny-hat\" in the account you selected. Step 6 of 10 Leave the default choices on the next screen. These provide a range of observability features for your cluster, including our infrastructure agent which gives you a high level overview of the health of your cluster. Notably, the default options include \"Instant service-level insights, full-body requests, and application profiles through Pixie\" which you focus on in this lab. Click Continue: In the next screen, you see a command for installing our Kubernetes integration into your cluster. Step 7 of 10 Click Copy command: Now you're ready to install Pixie into your cluster. Step 8 of 10 Switch back to your terminal and paste the Helm command. Step 9 of 10 While you're installing Pixie, switch back to New Relic and click Continue to progress your guided installation to the final step. Here, you see a message that says \"Listening for data\": In a few minutes, Helm will have fully installed the necessary charts. You should see a message with the name, deployed date, namespace, and more: bash Copy NAME: newrelic-bundle LAST DEPLOYED: Thu Sep 23 13:50:24 2021 NAMESPACE: newrelic STATUS: deployed REVISION: 1 TEST SUITE: None Soon after that, the New Relic page updates to tell you that we're receiving data from your cluster: Step 10 of 10 Click Kubernetes cluster explorer to see your nodes, pods, deployments and a host of other data about your cluster, all in one view: Important You may see an error message, \"We're receiving incomplete data for this cluster.\" Please wait a few more minutes and refresh the page to see your cluster. In minutes, you were able to instrument your entire cluster without having to install language-specific agents or specify detailed cluster information! On top of all the data you see in the cluster explorer, click a pod or a node to dig deeper and see the granular data that the infrastructure agent was able to access near-instantly: lab This procedure is part of a lab that teaches you how to monitor your Kubernetes cluster with Pixie. Now that you've instrumented your cluster, use Pixie to debug your application.", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 57.33723, + "_score": 57.40007, "_version": null, "_explanation": null, "sort": null, @@ -80707,7 +80624,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 352.4525, + "_score": 327.95792, "_version": null, "_explanation": null, "sort": null, @@ -80719,6 +80636,32 @@ }, "id": "617ea3da196a67da9ef7d092" }, + { + "image": "https://docs.newrelic.com/static/6decabb9d8cd5dc3e18f2f647f7c7cdd/c1b63/arrow-step-diagram-trans.png", + "url": "https://docs.newrelic.com/docs/distributed-tracing/concepts/quick-start/", + "sections": [ + "Distributed tracing setup options" + ], + "published_at": "2022-02-14T03:23:44Z", + "title": "Distributed tracing setup options", + "updated_at": "2022-02-14T03:23:44Z", + "type": "docs", + "external_id": "44df1a2d07693a41fa23c9bba9473ce8ebabe47e", + "document_type": "page", + "popularity": 1, + "body": "We recommend you do an initial setup of distributed tracing and consider the advanced Infinite Tracing feature if you are not getting the data you need. Also, if you are currently using New Relic APM agents and would like to enable distributed tracing, see our planning guide. Ready to get started? If you don't already have one, sign up for a New Relic account. It's free, forever! To set up distributed tracing, you'll complete three general steps: Identify services: Identify and write down the endpoints, services, languages, and systems that are used to complete this request (you'll need this information in the next step). If you have an environment diagram like the following, you could use it to create a list of services handling requests: Instrument services: Instrument each service you identify so it can send your trace data. Some tools, such as APM agents, instrument services automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile monitoring Kamon OpenTelemetry Trace API: generic format Trace API: Zipkin format View traces: After you instrument the services, generate some traffic in your application, and then go to the New Relic UI to see your trace data.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 297.94754, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "body": " automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile" + }, + "id": "61d8b6a664441fbe9700cc16" + }, { "sections": [ "Generate trace log for troubleshooting (Node.js)", @@ -80744,7 +80687,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 319.28305, + "_score": 296.58286, "_version": null, "_explanation": null, "sort": null, @@ -80756,32 +80699,6 @@ }, "id": "617ea562e7b9d2fd44c05078" }, - { - "image": "https://docs.newrelic.com/static/6decabb9d8cd5dc3e18f2f647f7c7cdd/c1b63/arrow-step-diagram-trans.png", - "url": "https://docs.newrelic.com/docs/distributed-tracing/concepts/quick-start/", - "sections": [ - "Distributed tracing setup options" - ], - "published_at": "2022-02-14T03:23:44Z", - "title": "Distributed tracing setup options", - "updated_at": "2022-02-14T03:23:44Z", - "type": "docs", - "external_id": "44df1a2d07693a41fa23c9bba9473ce8ebabe47e", - "document_type": "page", - "popularity": 1, - "body": "We recommend you do an initial setup of distributed tracing and consider the advanced Infinite Tracing feature if you are not getting the data you need. Also, if you are currently using New Relic APM agents and would like to enable distributed tracing, see our planning guide. Ready to get started? If you don't already have one, sign up for a New Relic account. It's free, forever! To set up distributed tracing, you'll complete three general steps: Identify services: Identify and write down the endpoints, services, languages, and systems that are used to complete this request (you'll need this information in the next step). If you have an environment diagram like the following, you could use it to create a list of services handling requests: Instrument services: Instrument each service you identify so it can send your trace data. Some tools, such as APM agents, instrument services automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile monitoring Kamon OpenTelemetry Trace API: generic format Trace API: Zipkin format View traces: After you instrument the services, generate some traffic in your application, and then go to the New Relic UI to see your trace data.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 315.10687, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "body": " automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile" - }, - "id": "61d8b6a664441fbe9700cc16" - }, { "image": "", "url": "https://docs.newrelic.com/docs/browser/new-relic-browser/browser-pro-features/upload-source-maps-api/", @@ -80818,7 +80735,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 308.55157, + "_score": 291.27484, "_version": null, "_explanation": null, "sort": null, @@ -80864,7 +80781,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 296.3191, + "_score": 278.66797, "_version": null, "_explanation": null, "sort": null, @@ -80906,7 +80823,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99103, + "_score": 95.21301, "_version": null, "_explanation": null, "sort": null, @@ -80948,7 +80865,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99097, + "_score": 95.21295, "_version": null, "_explanation": null, "sort": null, @@ -80990,7 +80907,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99097, + "_score": 95.21295, "_version": null, "_explanation": null, "sort": null, @@ -81032,7 +80949,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.99081, + "_score": 95.2128, "_version": null, "_explanation": null, "sort": null, @@ -81071,7 +80988,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 93.49031, + "_score": 93.73055, "_version": null, "_explanation": null, "sort": null, @@ -81115,7 +81032,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.33618, + "_score": 81.56581, "_version": null, "_explanation": null, "sort": null, @@ -81157,7 +81074,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.276306, + "_score": 81.50674, "_version": null, "_explanation": null, "sort": null, @@ -81199,7 +81116,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -81241,7 +81158,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -81283,7 +81200,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -81315,7 +81232,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 114.10287, + "_score": 114.09729, "_version": null, "_explanation": null, "sort": null, @@ -81345,7 +81262,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 110.44131, + "_score": 110.495575, "_version": null, "_explanation": null, "sort": null, @@ -81375,7 +81292,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 109.57589, + "_score": 109.63725, "_version": null, "_explanation": null, "sort": null, @@ -81407,7 +81324,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 109.12697, + "_score": 109.20071, "_version": null, "_explanation": null, "sort": null, @@ -81446,7 +81363,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 105.356445, + "_score": 105.35089, "_version": null, "_explanation": null, "sort": null, @@ -81492,7 +81409,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -81533,7 +81450,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -81575,7 +81492,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -81617,7 +81534,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -81659,7 +81576,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -81703,7 +81620,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -81744,7 +81661,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -81786,7 +81703,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -81828,7 +81745,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -81870,7 +81787,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -81914,7 +81831,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -81955,7 +81872,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -81997,7 +81914,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -82039,7 +81956,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -82081,7 +81998,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -82119,7 +82036,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 222.20781, + "_score": 207.2623, "_version": null, "_explanation": null, "sort": null, @@ -82175,7 +82092,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.8711, + "_score": 165.793, "_version": null, "_explanation": null, "sort": null, @@ -82239,7 +82156,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 146.40012, + "_score": 136.64267, "_version": null, "_explanation": null, "sort": null, @@ -82281,7 +82198,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 131.33131, + "_score": 131.42546, "_version": null, "_explanation": null, "sort": null, @@ -82326,7 +82243,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 130.79378, + "_score": 129.85677, "_version": null, "_explanation": null, "sort": null, @@ -82372,7 +82289,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -82413,7 +82330,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -82455,7 +82372,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -82497,7 +82414,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -82539,7 +82456,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -82588,7 +82505,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 165.1406, + "_score": 153.37717, "_version": null, "_explanation": null, "sort": null, @@ -82628,7 +82545,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 124.64363, + "_score": 115.50088, "_version": null, "_explanation": null, "sort": null, @@ -82669,7 +82586,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 121.88742, + "_score": 113.527466, "_version": null, "_explanation": null, "sort": null, @@ -82728,7 +82645,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 118.16919, + "_score": 111.11895, "_version": null, "_explanation": null, "sort": null, @@ -82759,7 +82676,7 @@ "external_id": "4e3af1ef7b8a79842f689fde5561e79fa9acfbb0", "image": "", "url": "https://docs.newrelic.com/docs/infrastructure/amazon-integrations/aws-integrations-list/aws-elemental-mediaconvert-monitoring-integration/", - "published_at": "2022-02-14T11:37:19Z", + "published_at": "2022-02-16T01:43:46Z", "updated_at": "2022-02-14T11:37:19Z", "document_type": "page", "popularity": 1, @@ -82767,7 +82684,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 117.08328, + "_score": 110.06327, "_version": null, "_explanation": null, "sort": null, @@ -82810,7 +82727,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.33618, + "_score": 81.56581, "_version": null, "_explanation": null, "sort": null, @@ -82852,7 +82769,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.276306, + "_score": 81.50674, "_version": null, "_explanation": null, "sort": null, @@ -82894,7 +82811,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -82936,7 +82853,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -82978,7 +82895,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -83023,7 +82940,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -83064,7 +82981,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -83106,7 +83023,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -83148,7 +83065,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -83190,7 +83107,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -83228,7 +83145,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 222.20781, + "_score": 207.2623, "_version": null, "_explanation": null, "sort": null, @@ -83284,7 +83201,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.8711, + "_score": 165.793, "_version": null, "_explanation": null, "sort": null, @@ -83348,7 +83265,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 146.40012, + "_score": 136.64267, "_version": null, "_explanation": null, "sort": null, @@ -83390,7 +83307,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 131.33131, + "_score": 131.42546, "_version": null, "_explanation": null, "sort": null, @@ -83435,7 +83352,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 130.79378, + "_score": 129.85677, "_version": null, "_explanation": null, "sort": null, @@ -83476,7 +83393,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 272.22168, + "_score": 270.99435, "_version": null, "_explanation": null, "sort": null, @@ -83507,7 +83424,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 267.75958, + "_score": 266.72873, "_version": null, "_explanation": null, "sort": null, @@ -83544,7 +83461,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 257.93774, + "_score": 256.80435, "_version": null, "_explanation": null, "sort": null, @@ -83575,7 +83492,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 253.53484, + "_score": 252.77283, "_version": null, "_explanation": null, "sort": null, @@ -83605,7 +83522,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 250.09953, + "_score": 249.39142, "_version": null, "_explanation": null, "sort": null, @@ -83648,7 +83565,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.33618, + "_score": 81.56581, "_version": null, "_explanation": null, "sort": null, @@ -83690,7 +83607,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.276306, + "_score": 81.50674, "_version": null, "_explanation": null, "sort": null, @@ -83732,7 +83649,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -83774,7 +83691,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -83816,7 +83733,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -83878,7 +83795,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 45.127655, + "_score": 42.564754, "_version": null, "_explanation": null, "sort": null, @@ -83920,7 +83837,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 20.2427, + "_score": 20.085806, "_version": null, "_explanation": null, "sort": null, @@ -83951,7 +83868,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 12.334793, + "_score": 12.316227, "_version": null, "_explanation": null, "sort": null, @@ -83994,7 +83911,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 6.7901697, + "_score": 6.7833753, "_version": null, "_explanation": null, "sort": null, @@ -84037,7 +83954,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.33618, + "_score": 81.56581, "_version": null, "_explanation": null, "sort": null, @@ -84079,7 +83996,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.276306, + "_score": 81.50674, "_version": null, "_explanation": null, "sort": null, @@ -84121,7 +84038,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -84163,7 +84080,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -84205,7 +84122,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -84249,7 +84166,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.33618, + "_score": 81.56581, "_version": null, "_explanation": null, "sort": null, @@ -84291,7 +84208,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.276306, + "_score": 81.50674, "_version": null, "_explanation": null, "sort": null, @@ -84333,7 +84250,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -84375,7 +84292,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -84417,7 +84334,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -84455,7 +84372,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 111.41844, + "_score": 111.148636, "_version": null, "_explanation": null, "sort": null, @@ -84493,7 +84410,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 97.50282, + "_score": 90.8239, "_version": null, "_explanation": null, "sort": null, @@ -84527,7 +84444,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 84.453156, + "_score": 78.32762, "_version": null, "_explanation": null, "sort": null, @@ -84573,7 +84490,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 84.225204, + "_score": 78.15056, "_version": null, "_explanation": null, "sort": null, @@ -84614,7 +84531,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 84.22137, + "_score": 78.14758, "_version": null, "_explanation": null, "sort": null, @@ -84661,7 +84578,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 49.549957, + "_score": 48.569668, "_version": null, "_explanation": null, "sort": null, @@ -84693,7 +84610,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 40.134586, + "_score": 40.040745, "_version": null, "_explanation": null, "sort": null, @@ -84738,7 +84655,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 33.120075, + "_score": 32.837097, "_version": null, "_explanation": null, "sort": null, @@ -84787,7 +84704,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 63.543415, + "_score": 63.653313, "_version": null, "_explanation": null, "sort": null, @@ -84807,7 +84724,7 @@ "lab", "Technical Detail" ], - "published_at": "2022-02-15T01:38:15Z", + "published_at": "2022-02-16T01:44:29Z", "title": "Set up your lab environment", "updated_at": "2021-12-22T01:43:56Z", "type": "developer", @@ -84818,7 +84735,7 @@ "body": "lab This procedure is part of a lab that teaches you how to instrument your application with OpenTelemetry. If you haven't already, check out the lab introduction. Before you can walk through the lab proper, you need to set up your development environment. Here, you: Spin up your .NET application Send traffic to your app with a simple load generator Step 1 of 3 Clone the lab repository: bash Copy $ git clone https://github.com/newrelic-experimental/opentelemetry-dotnet-lab-materials Step 2 of 3 Restore dependencies, build, and run the application: bash Copy $ cd opentelemetry-dotnet-lab-materials $ dotnet restore app $ dotnet build app $ dotnet run --project app In the output, you see a url for your app: bash Copy Determining projects to restore... All projects are up-to-date for restore. Microsoft (R) Build Engine version 17.0.0+c9eb9dd64 for .NET Copyright (C) Microsoft Corporation. All rights reserved. Determining projects to restore... All projects are up-to-date for restore. opentelemetry-dotnet-lab-materials -> /workspace/opentelemetry-dotnet-lab-materials/app/bin/Debug/net6.0/opentelemetry-dotnet-lab-materials.dll Build succeeded. 0 Warning(s) 0 Error(s) Time Elapsed 00:00:01.42 Building... info: Microsoft.Hosting.Lifetime[14] Now listening on: https://localhost:7072 info: Microsoft.Hosting.Lifetime[0] Application started. Press Ctrl+C to shut down. info: Microsoft.Hosting.Lifetime[0] Hosting environment: Development info: Microsoft.Hosting.Lifetime[0] Content root path: /workspace/opentelemetry-dotnet-lab-materials/app/ The application has a single endpoint at /WeatherForcast, which you can visit in your browser or with curl: bash Copy $ curl -k https://localhost:7072/WeatherForecast [{\"date\":\"2021-11-18T16:03:02.655159-05:00\",\"temperatureC\":38,\"temperatureF\":100,\"summary\":\"Chilly\"},{\"date\":\"2021-11-19T16:03:02.655161-05:00\",\"temperatureC\":-3,\"temperatureF\":27,\"summary\":\"Mild\"},{\"date\":\"2021-11-20T16:03:02.655162-05:00\",\"temperatureC\":-8,\"temperatureF\":18,\"summary\":\"Hot\"},{\"date\":\"2021-11-21T16:03:02.655162-05:00\",\"temperatureC\":3,\"temperatureF\":37,\"summary\":\"Cool\"},{\"date\":\"2021-11-22T16:03:02.655162-05:00\",\"temperatureC\":10,\"temperatureF\":49,\"summary\":\"Warm\"}] Technical Detail You use the -k option to instruct curl to not verify the site's SSL certification. This is fine because you're making a request against localhost. Leave this open so you can simulate requests against it. Step 3 of 3 In another terminal window, run the load generator: bash Copy $ cd opentelemetry-dotnet-lab-materials/sim $ pip install requests $ python simulator.py Now that you've got your application and load generator running, it's time to see what OpenTelemetry's all about. lab This procedure is part of a lab that teaches you how to instrument your application with OpenTelemetry. Now that you've set up your environment, instrument your application.", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 59.39842, + "_score": 59.210583, "_version": null, "_explanation": null, "sort": null, @@ -84852,7 +84769,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 54.856033, + "_score": 54.793175, "_version": null, "_explanation": null, "sort": null, @@ -84863,6 +84780,39 @@ }, "id": "604526d164441f6a40378eeb" }, + { + "sections": [ + "Azure Cloud Services: No data appears", + "Problem", + "Solution" + ], + "title": "Azure Cloud Services: No data appears", + "type": "docs", + "tags": [ + "Azure troubleshooting", + "NET agent", + "Agents" + ], + "external_id": "552beae09bbb95650dd5f9cb1a8a36777f403aba", + "image": "", + "url": "https://docs.newrelic.com/docs/apm/agents/net-agent/azure-troubleshooting/azure-cloud-services-no-data-appears/", + "published_at": "2022-02-15T20:05:45Z", + "updated_at": "2021-10-23T17:44:19Z", + "document_type": "page", + "popularity": 1, + "body": "Problem You installed New Relic's .NET agent through Microsoft Azure Cloud Services, and then you installed the NuGet package. After generating some traffic and waiting a few minutes for data to start being collected for your app, data still does not appear in the APM user interface. Solution In order for the .NET agent to start up and attach to your app, at least one metric must be collected. If you have an external call or database call that New Relic instruments, then your app will report data to your APM Summary page. In addition, information about external calls will appear on your app's Service maps. Without custom instrumentation, Worker roles only report database calls and external calls. (Instrumenting a Worker Role is similar to instrumenting a custom application.) To view other details, you must gather custom metrics using New Relic's .NET API and view them in a custom dashboard.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 54.21169, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "body": "Problem You installed New Relic's .NET agent through Microsoft Azure Cloud Services, and then you installed the NuGet package. After generating some traffic and waiting a few minutes for data to start being collected for your app, data still does not appear in the APM user interface. Solution" + }, + "id": "617e5a0528ccbc4354800476" + }, { "sections": [ "Networks", @@ -84899,7 +84849,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 54.664303, + "_score": 53.588497, "_version": null, "_explanation": null, "sort": null, @@ -84907,39 +84857,6 @@ "body": " that New Relic connects to that can be blocked without affecting your usage of the product. It also doesn't cover Nerdpacks or other features that communicate with external services that have additional domain requirements. If your organization uses a firewall that restricts outbound traffic" }, "id": "603eb81364441f64a24e88b6" - }, - { - "sections": [ - "Azure Cloud Services: No data appears", - "Problem", - "Solution" - ], - "title": "Azure Cloud Services: No data appears", - "type": "docs", - "tags": [ - "Azure troubleshooting", - "NET agent", - "Agents" - ], - "external_id": "552beae09bbb95650dd5f9cb1a8a36777f403aba", - "image": "", - "url": "https://docs.newrelic.com/docs/apm/agents/net-agent/azure-troubleshooting/azure-cloud-services-no-data-appears/", - "published_at": "2022-02-15T20:05:45Z", - "updated_at": "2021-10-23T17:44:19Z", - "document_type": "page", - "popularity": 1, - "body": "Problem You installed New Relic's .NET agent through Microsoft Azure Cloud Services, and then you installed the NuGet package. After generating some traffic and waiting a few minutes for data to start being collected for your app, data still does not appear in the APM user interface. Solution In order for the .NET agent to start up and attach to your app, at least one metric must be collected. If you have an external call or database call that New Relic instruments, then your app will report data to your APM Summary page. In addition, information about external calls will appear on your app's Service maps. Without custom instrumentation, Worker roles only report database calls and external calls. (Instrumenting a Worker Role is similar to instrumenting a custom application.) To view other details, you must gather custom metrics using New Relic's .NET API and view them in a custom dashboard.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 54.28348, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "body": "Problem You installed New Relic's .NET agent through Microsoft Azure Cloud Services, and then you installed the NuGet package. After generating some traffic and waiting a few minutes for data to start being collected for your app, data still does not appear in the APM user interface. Solution" - }, - "id": "617e5a0528ccbc4354800476" } ], "/zipkin/51fe9abd-ecab-4d2a-aecf-97fbecf01c28": [ @@ -84976,7 +84893,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 116.43321, + "_score": 110.17081, "_version": null, "_explanation": null, "sort": null, @@ -84988,117 +84905,6 @@ }, "id": "6072a6a3e7b9d23abba5c682" }, - { - "image": "https://docs.newrelic.com/static/6decabb9d8cd5dc3e18f2f647f7c7cdd/c1b63/arrow-step-diagram-trans.png", - "url": "https://docs.newrelic.com/docs/distributed-tracing/concepts/quick-start/", - "sections": [ - "Distributed tracing setup options" - ], - "published_at": "2022-02-14T03:23:44Z", - "title": "Distributed tracing setup options", - "updated_at": "2022-02-14T03:23:44Z", - "type": "docs", - "external_id": "44df1a2d07693a41fa23c9bba9473ce8ebabe47e", - "document_type": "page", - "popularity": 1, - "body": "We recommend you do an initial setup of distributed tracing and consider the advanced Infinite Tracing feature if you are not getting the data you need. Also, if you are currently using New Relic APM agents and would like to enable distributed tracing, see our planning guide. Ready to get started? If you don't already have one, sign up for a New Relic account. It's free, forever! To set up distributed tracing, you'll complete three general steps: Identify services: Identify and write down the endpoints, services, languages, and systems that are used to complete this request (you'll need this information in the next step). If you have an environment diagram like the following, you could use it to create a list of services handling requests: Instrument services: Instrument each service you identify so it can send your trace data. Some tools, such as APM agents, instrument services automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile monitoring Kamon OpenTelemetry Trace API: generic format Trace API: Zipkin format View traces: After you instrument the services, generate some traffic in your application, and then go to the New Relic UI to see your trace data.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 89.21034, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "title": "Distributed tracing setup options", - "sections": "Distributed tracing setup options", - "body": "We recommend you do an initial setup of distributed tracing and consider the advanced Infinite Tracing feature if you are not getting the data you need. Also, if you are currently using New Relic APM agents and would like to enable distributed tracing, see our planning guide. Ready to get started" - }, - "id": "61d8b6a664441fbe9700cc16" - }, - { - "image": "", - "url": "https://docs.newrelic.com/docs/browser/new-relic-browser/browser-pro-features/browser-data-distributed-tracing/", - "sections": [ - "Browser data in distributed tracing", - "Benefits of browser data in distributed tracing", - "Requirements and compatibility", - "Enable distributed tracing", - "Enable Infinite Tracing", - "Enable cross-origin resource sharing (CORS)", - "Risks and mitigations", - "Caution", - "Important", - "Enable cross-origin resource sharing", - "Find data", - "Troubleshooting" - ], - "published_at": "2022-02-14T09:10:27Z", - "title": "Browser data in distributed tracing", - "updated_at": "2022-02-14T09:10:27Z", - "type": "docs", - "external_id": "fe29e64d44406955651c3644eabb3ddb6d2a66c0", - "document_type": "page", - "popularity": 1, - "body": "If you use browser to monitor end-user browser activity, you can now see end-user-originating browser-side traces in distributed tracing. This document contains: Benefits of this feature Requirements Enable distributed tracing Enable cross-origin resource sharing (CORS) Find and query data Troubleshooting Benefits of browser data in distributed tracing By enabling New Relic to report browser data to distributed tracing, you can see the connection between front-end activity and back-end activity. You can see across a full transaction, from time spent by an end user in the web browser, to network activity, to associated back-end services. Benefits of this feature: Quickly spot latencies, errors, and anomalies in the browser or network Resolve customer-facing problems more quickly All the benefits of distributed tracing applied to your end-user monitoring Requirements and compatibility This feature reports AJAX requests (Fetch and XHR) that occur during a browser interaction. By default, only single-origin requests are monitored unless cross-origin resource sharing is enabled. Make sure you have the necessary minimum versions for your browser and APM agents: Browser Pro+SPA agent with distributed tracing enabled: Browser agent version 1153 or higher Browser agent version 1158 or higher (required for cross-origin resource sharing) Browser agent version 1173 or higher (required for w3c trace context support) APM agent versions: C SDK 1.3 or higher Java 5.9.0 or higher PHP 9.4.0 or higher Other APM agent version requirements Enable distributed tracing To enable distributed tracing for browser monitoring: Make sure you meet requirements. Go to one.newrelic.com, and click Browser, then select an app, then on the left side, click Application settings. Ensure the Distributed tracing toggle is on. By default, for agent version 1173 and above, the newrelic, traceparent, and tracestate headers will be added to all same-origin AJAX requests. Optional: If all of your services are configured to use the w3c trace context headers, you can choose to exclude the newrelic, traceparent, and tracestate headers from requests. Optional: Enable cross-origin resource sharing. Redeploy the browser monitoring agent (either restarting the associated APM agent or updating the copy/paste browser installation). If you have some apps or services that are downstream from your browser app that use the advanced option Infinite Tracing, see these setup steps. Enable Infinite Tracing If your browser apps have some downstream services that use the advanced type of distributed tracing called Infinite Tracing, you should enable this feature for your browser apps. This ensures that your root span (the initiating browser app) is included in the rest of the spans tracked by Infinite Tracing. To set up Infinite Tracing: Complete the steps above to enable distributed tracing. Go to our Infinite Tracing documentation for steps to create a trace observer and to select which apps (data sources) should send trace spans to the Infinite Tracing trace observer. Enable cross-origin resource sharing (CORS) If you have AJAX requests that need resources from different origins, you can enable cross-origin resource sharing (CORS). By default, distributed tracing for cross-origin requests is not enabled because of browser CORS security restrictions: Distributed tracing is implemented by adding a custom HTTP headers (newrelic, traceparent, and tracestate) to outgoing AJAX requests, and browsers typically do not allow custom headers on cross-origin requests. With the release of agent version 1173, we now support the w3c trace context headers (traceparent and tracestate) so these should also be allowed in your configuration. There are two separate configurations required to enable cross-origin distributed tracing: Configure the service on the different origin to accept the newrelic custom header Configure browser monitoring to include the target origin in distributed tracing Our step-by-step instructions provide key concepts and steps to enable this feature, but if you need more background about how cross-origin resource sharing works, we recommend this Mozilla developer document. Risks and mitigations Caution Cross-origin resource sharing can expose you to a high level of risk if the services on the different origins are not configured correctly. The AJAX requests will likely return an error, resulting in a variety of failures, including: Resources failing to load (for example, images and key content) Login failures Entire site outages (depending on type of requests enabled) By enabling this cross-origin resource sharing feature, you are acknowledging the following: You understand that this feature is optional and not mandatory. You understand the steps you need to take in order to enable this feature for your services and your domains. You understand that if you do not properly configure your services prior to deployment (including but not limited to configuring your services on your domains to accept custom headers) portions or all of your website will likely malfunction. You understand that New Relic is neither responsible nor liable for errors or issues related to your misconfiguration of servers or services. You fully and solely accept the risks and wish to proceed. The best way to minimize your risk is to ensure you fully understand the process and to try it first in a test environment. Before reading the step-by-step instructions, it may help to first read this overview of the process: To use distributed tracing with cross-origin resources, you populate a list of approved cross-origin resources in New Relic, and then we automatically send the following custom headers to those resources: newrelic, traceparent, and tracestate. For this process to work, you must first ensure that someone has configured the services on the other origins to accept this custom header. Cross-origin resource sharing uses a variety of HTTP headers (both in the request and response). The header that specifically applies to New Relic is the Access-Control-Allow-Headers response header, which can include newrelic, traceparent, tracestate, or newrelic, traceparent, tracestate in its value depending on what tracing strategies you enabled in your APM-monitored application. You must configure your server to return this CORS header in its response. Example: Access-Control-Allow-Headers: newrelic, traceparent, tracestate Important New Relic cannot perform any validation to ensure the services on the other origins were configured correctly. If you're unsure about how to allow these headers, do not add cross-origin resources to the approved list in the New Relic UI. Enable cross-origin resource sharing Caution You should always try enabling CORS in a test environment before setting it up in production. To enable cross-origin resource sharing: Confirm that the services on the other origins are configured to accept the newrelic header using: Access-Control-Allow-Headers: newrelic, traceparent, tracestate (for details, see Risks and mitigations). Confirm that you meet the Browser monitoring requirements. Make sure you are in one.newrelic.com, and click Browser > (select an app) > Application settings. Turn on the Distributed tracing toggle if it's not already enabled. Turn on the Cross-origin resource sharing (CORS) toggle. Under Cross-origin resource sharing (CORS), add cross-origin resources to the approved list. Important Valid cross-origin resources must include: The prefix http:// or https:// The domain name The port number is not required unless it differs from the default for HTTP (port 80) or HTTPS (port 443). Select Save application settings to update the agent configuration. Redeploy the browser agent (either restarting the associated APM agent or updating the copy/paste browser installation). Find data Tips for finding and querying data: You can find end-user-originating traces in any New Relic One distributed tracing UI. In the distributed tracing UI, end-user spans are indicated with the icon. To see a span's attributes, select a span in the UI. Spans are reported as Span data, and can be queried in New Relic. Query tips: Query by browser app name by setting browserApp.name to the browser app name. Query for traces containing at least one browser app span with browserApp.name is not null. Query for traces containing at least one back-end app with appName is not null. Query for traces containing both browser and back-end spans by combining the two previous conditions. Troubleshooting If you don't see end-user spans, or are having other distributed tracing issues, see Troubleshooting.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 89.03682, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "title": "Browser data in distributed tracing", - "sections": "Browser data in distributed tracing", - "body": " with distributed tracing enabled: Browser agent version 1153 or higher Browser agent version 1158 or higher (required for cross-origin resource sharing) Browser agent version 1173 or higher (required for w3c trace context support) APM agent versions: C SDK 1.3 or higher Java 5.9.0 or higher PHP 9.4.0" - }, - "id": "6043fe6f196a6784f0960f53" - }, - { - "sections": [ - "Report traces via the Trace API (New Relic format)", - "Get started", - "Send sample trace payload (non-Infinite Tracing)", - "Tip", - "Trace API payload (New Relic format)", - "The Span object in the spans array", - "The common object (optional)", - "Highly recommended attributes", - "Reserved attributes", - "Other attributes", - "Explore more about distributed tracing:" - ], - "title": "Report traces via the Trace API (New Relic format)", - "type": "docs", - "tags": [ - "Trace API", - "Distributed tracing", - "Understand dependencies" - ], - "external_id": "7b6be23c78b9a06ebf71671cc69590b4ac4b3311", - "image": "", - "url": "https://docs.newrelic.com/docs/distributed-tracing/trace-api/report-new-relic-format-traces-trace-api/", - "published_at": "2022-02-14T03:18:13Z", - "updated_at": "2022-02-14T03:18:13Z", - "document_type": "page", - "popularity": 1, - "body": "If you want to create your own tracing implementation, you can use our Trace API. This doc explains how to send traces in our general format, aka new-relic format. (To send Zipkin-format data, see Zipkin.) Get started Using our Trace API is as simple as: Sending trace data in the expected format (in this case, our new-relic format). Sending that data to the appropriate endpoint. Before using the Trace API, you should decide whether you want to use Infinite Tracing. To learn more about this, see Intro to Infinite Tracing and Sampling considerations. To get started using the Trace API, follow one of these paths: Want to use Infinite Tracing? Follow the Set up a trace observer instructions. That walks you through creating a trace observer and sending a sample payload to the trace observer endpoint. Don't want Infinite Tracing? See how to send a sample payload (below). Send sample trace payload (non-Infinite Tracing) The following explains how to send a standard (non-Infinite Tracing) payload to the Trace API using our newrelic format. Get a license key for the account you want to report data to. Insert that key into the following JSON and then send the JSON to our endpoint. Note: if you have a EU New Relic account, use the EU endpoint instead. curl -i -H 'Content-Type: application/json' \\ -H 'Api-Key: $YOUR_LICENSE_KEY' \\ -H 'Data-Format: newrelic' \\ -H 'Data-Format-Version: 1' \\ -X POST \\ -d '[ { \"common\": { \"attributes\": { \"service.name\": \"Test Service A\", \"host\": \"host123.example.com\" } }, \"spans\": [ { \"trace.id\": \"123456\", \"id\": \"ABC\", \"attributes\": { \"duration.ms\": 12.53, \"name\": \"/home\" } }, { \"trace.id\": \"123456\", \"id\": \"DEF\", \"attributes\": { \"error.message\": \"Invalid credentials\", \"service.name\": \"Test Service A\", \"host\": \"host456.example.com\", \"duration.ms\": 2.97, \"name\": \"/auth\", \"parent.id\": \"ABC\" } } ] } ]' 'https://trace-api.newrelic.com/trace/v1' Copy Tip If you're sending more than one POST, change the trace.id to a unique value. Sending the same payload or span id multiple times for the same trace.id may result in fragmented traces in the UI. If your test returned HTTP/1.1 202 Accepted, go to our UI to see a query of your test data using the span attribute service.name = Test Service A. Tip Traces may take up to one minute to be processed by both the trace observer and the Trace API. Trace API payload (New Relic format) The Trace API JSON payload is an array of objects, with each object representing a single trace. Each of these objects requires a spans key and may also include a common key. spans (required) contains an array of objects, with each object representing a span. common (optional) shares information across multiple spans. The Span object in the spans array field type description required default id string Unique identifier for this span. yes N/A trace.id string Unique identifier shared by all spans within a single trace. yes N/A timestamp long Span start time in milliseconds since the Unix epoch. no Current time in UTC time zone attributes object Any set of key: value pairs that add more details about a span. duration.ms, name, and parent.id are strongly recommended to add. no N/A Requests without the required keys above will be rejected, and an NrIntegrationError will be generated. The common object (optional) field type description required default attributes object Any set of key: value pairs that add common details about spans in the payload. If a span contains an attribute that has been set in common, the key in the span attributes object will take precedence. duration.ms, name, and parent.id are strongly recommended to add. no N/A Highly recommended attributes While not required, these attributes should be included for the best experience with your data in the attributes object for each span. attribute default description duration.ms float none Duration of this span in milliseconds. name string none The name of this span. parent.id string none The id of the caller of this span. Value is null if this is the root span. Traces without a root span will not be displayed. service.name string none The name of the entity that created this span. Reserved attributes These attributes are currently reserved for internal New Relic usage. While they are not explicitly blocked, we recommend not using them. attribute default description entity.name string service.name This is derived from the service.name attribute. entity.type string service The entity type is assumed to be a service. entity.guid string None The entity.guid is a derived value that uniquely identifies the entity in New Relic's backend. Other attributes You can add any arbitrary attributes you want in the attributes object in either common or each span object, with the exception of the restricted attributes. For example, you might want to add attributes like customer.id or user.id to help you analyze your trace data. Requirements and guidelines for trace JSON using the newrelic format: Each JSON payload is an array of objects. Each object should contain a required spans key. Each object can contain an optional common key. Use this if you want to share information across multiple spans in a object. Any keys on a span have precedence over the same key in the common block. The value for a spans key is a list of span objects. Certain attributes are required, and must be included either in the optional common block, or in each span. Recommended and custom attributes can be optionally included in a list of key-value pairs under a key named attributes, in the optional common block and/or in each span. In the following example POST, there are two spans, both of which have the trace.id 12345 and the custom attribute host: host123.example.com. The first span has no parent.id, so that is the root of the trace; the second span's parent.id points to the ID of the first. [ { \"common\": { \"attributes\": { \"host\": \"host123.example.com\" } }, \"spans\": [ { \"trace.id\": \"12345\", \"id\": \"abc\", \"timestamp\": 1603336834823, \"attributes\": { \"user.email\": \"bob@newr.com\", \"service.name\": \"my-service\", \"duration.ms\": 750, \"name\": \"my-span\" } }, { \"trace.id\": \"12345\", \"id\": \"def\", \"timestamp\": 1603336834899, \"attributes\": { \"parent.id\": \"abc\", \"service.name\": \"second-service\", \"duration.ms\": 750, \"name\": \"second-span\" } } ] } ] Copy To learn how to control how spans appear in New Relic (for example, adding errors or setting a span as a datastore span), see Decorate spans. Explore more about distributed tracing: Learn where Trace API data shows up in the UI. Learn how to decorate spans for a richer, more detailed UI experience. For example, you can have spans show up as datastore spans or display errors. Learn about general data limits, required metadata, and response validation. If you don't see your trace data, see Troubleshooting.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 88.49199, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "title": "Report traces via the Trace API (New Relic format)", - "sections": "Send sample trace payload (non-Infinite Tracing)", - "tags": "Trace API", - "body": "If you want to create your own tracing implementation, you can use our Trace API. This doc explains how to send traces in our general format, aka new-relic format. (To send Zipkin-format data, see Zipkin.) Get started Using our Trace API is as simple as: Sending trace data in the expected format" - }, - "id": "6071cfc8196a6790e864a7a4" - }, { "sections": [ "External services", @@ -85125,7 +84931,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 86.932175, + "_score": 85.41547, "_version": null, "_explanation": null, "sort": null, @@ -85134,6 +84940,117 @@ "body": " services does not include some back-end components such as MemCache and the database itself. The external services feature is not just for New Relic APM agent users. It also includes OpenTelemetry and uses the power of distributed tracing to give you insights into service activity. The classic external" }, "id": "61f27291e7b9d29cf2bb1ea9" + }, + { + "image": "https://docs.newrelic.com/static/6decabb9d8cd5dc3e18f2f647f7c7cdd/c1b63/arrow-step-diagram-trans.png", + "url": "https://docs.newrelic.com/docs/distributed-tracing/concepts/quick-start/", + "sections": [ + "Distributed tracing setup options" + ], + "published_at": "2022-02-14T03:23:44Z", + "title": "Distributed tracing setup options", + "updated_at": "2022-02-14T03:23:44Z", + "type": "docs", + "external_id": "44df1a2d07693a41fa23c9bba9473ce8ebabe47e", + "document_type": "page", + "popularity": 1, + "body": "We recommend you do an initial setup of distributed tracing and consider the advanced Infinite Tracing feature if you are not getting the data you need. Also, if you are currently using New Relic APM agents and would like to enable distributed tracing, see our planning guide. Ready to get started? If you don't already have one, sign up for a New Relic account. It's free, forever! To set up distributed tracing, you'll complete three general steps: Identify services: Identify and write down the endpoints, services, languages, and systems that are used to complete this request (you'll need this information in the next step). If you have an environment diagram like the following, you could use it to create a list of services handling requests: Instrument services: Instrument each service you identify so it can send your trace data. Some tools, such as APM agents, instrument services automatically, while other tools require you to insert some code in the services. Click the icon below for instrumentation steps: Android mobile monitoring APM: C APM: Golang APM: Java APM: .NET APM: Node.js APM: PHP APM: Python APM: Ruby AWS Lambda Functions AWS X-Ray Browser monitoring iOS mobile monitoring Kamon OpenTelemetry Trace API: generic format Trace API: Zipkin format View traces: After you instrument the services, generate some traffic in your application, and then go to the New Relic UI to see your trace data.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 84.08655, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "title": "Distributed tracing setup options", + "sections": "Distributed tracing setup options", + "body": "We recommend you do an initial setup of distributed tracing and consider the advanced Infinite Tracing feature if you are not getting the data you need. Also, if you are currently using New Relic APM agents and would like to enable distributed tracing, see our planning guide. Ready to get started" + }, + "id": "61d8b6a664441fbe9700cc16" + }, + { + "sections": [ + "Report traces via the Trace API (New Relic format)", + "Get started", + "Send sample trace payload (non-Infinite Tracing)", + "Tip", + "Trace API payload (New Relic format)", + "The Span object in the spans array", + "The common object (optional)", + "Highly recommended attributes", + "Reserved attributes", + "Other attributes", + "Explore more about distributed tracing:" + ], + "title": "Report traces via the Trace API (New Relic format)", + "type": "docs", + "tags": [ + "Trace API", + "Distributed tracing", + "Understand dependencies" + ], + "external_id": "7b6be23c78b9a06ebf71671cc69590b4ac4b3311", + "image": "", + "url": "https://docs.newrelic.com/docs/distributed-tracing/trace-api/report-new-relic-format-traces-trace-api/", + "published_at": "2022-02-14T03:18:13Z", + "updated_at": "2022-02-14T03:18:13Z", + "document_type": "page", + "popularity": 1, + "body": "If you want to create your own tracing implementation, you can use our Trace API. This doc explains how to send traces in our general format, aka new-relic format. (To send Zipkin-format data, see Zipkin.) Get started Using our Trace API is as simple as: Sending trace data in the expected format (in this case, our new-relic format). Sending that data to the appropriate endpoint. Before using the Trace API, you should decide whether you want to use Infinite Tracing. To learn more about this, see Intro to Infinite Tracing and Sampling considerations. To get started using the Trace API, follow one of these paths: Want to use Infinite Tracing? Follow the Set up a trace observer instructions. That walks you through creating a trace observer and sending a sample payload to the trace observer endpoint. Don't want Infinite Tracing? See how to send a sample payload (below). Send sample trace payload (non-Infinite Tracing) The following explains how to send a standard (non-Infinite Tracing) payload to the Trace API using our newrelic format. Get a license key for the account you want to report data to. Insert that key into the following JSON and then send the JSON to our endpoint. Note: if you have a EU New Relic account, use the EU endpoint instead. curl -i -H 'Content-Type: application/json' \\ -H 'Api-Key: $YOUR_LICENSE_KEY' \\ -H 'Data-Format: newrelic' \\ -H 'Data-Format-Version: 1' \\ -X POST \\ -d '[ { \"common\": { \"attributes\": { \"service.name\": \"Test Service A\", \"host\": \"host123.example.com\" } }, \"spans\": [ { \"trace.id\": \"123456\", \"id\": \"ABC\", \"attributes\": { \"duration.ms\": 12.53, \"name\": \"/home\" } }, { \"trace.id\": \"123456\", \"id\": \"DEF\", \"attributes\": { \"error.message\": \"Invalid credentials\", \"service.name\": \"Test Service A\", \"host\": \"host456.example.com\", \"duration.ms\": 2.97, \"name\": \"/auth\", \"parent.id\": \"ABC\" } } ] } ]' 'https://trace-api.newrelic.com/trace/v1' Copy Tip If you're sending more than one POST, change the trace.id to a unique value. Sending the same payload or span id multiple times for the same trace.id may result in fragmented traces in the UI. If your test returned HTTP/1.1 202 Accepted, go to our UI to see a query of your test data using the span attribute service.name = Test Service A. Tip Traces may take up to one minute to be processed by both the trace observer and the Trace API. Trace API payload (New Relic format) The Trace API JSON payload is an array of objects, with each object representing a single trace. Each of these objects requires a spans key and may also include a common key. spans (required) contains an array of objects, with each object representing a span. common (optional) shares information across multiple spans. The Span object in the spans array field type description required default id string Unique identifier for this span. yes N/A trace.id string Unique identifier shared by all spans within a single trace. yes N/A timestamp long Span start time in milliseconds since the Unix epoch. no Current time in UTC time zone attributes object Any set of key: value pairs that add more details about a span. duration.ms, name, and parent.id are strongly recommended to add. no N/A Requests without the required keys above will be rejected, and an NrIntegrationError will be generated. The common object (optional) field type description required default attributes object Any set of key: value pairs that add common details about spans in the payload. If a span contains an attribute that has been set in common, the key in the span attributes object will take precedence. duration.ms, name, and parent.id are strongly recommended to add. no N/A Highly recommended attributes While not required, these attributes should be included for the best experience with your data in the attributes object for each span. attribute default description duration.ms float none Duration of this span in milliseconds. name string none The name of this span. parent.id string none The id of the caller of this span. Value is null if this is the root span. Traces without a root span will not be displayed. service.name string none The name of the entity that created this span. Reserved attributes These attributes are currently reserved for internal New Relic usage. While they are not explicitly blocked, we recommend not using them. attribute default description entity.name string service.name This is derived from the service.name attribute. entity.type string service The entity type is assumed to be a service. entity.guid string None The entity.guid is a derived value that uniquely identifies the entity in New Relic's backend. Other attributes You can add any arbitrary attributes you want in the attributes object in either common or each span object, with the exception of the restricted attributes. For example, you might want to add attributes like customer.id or user.id to help you analyze your trace data. Requirements and guidelines for trace JSON using the newrelic format: Each JSON payload is an array of objects. Each object should contain a required spans key. Each object can contain an optional common key. Use this if you want to share information across multiple spans in a object. Any keys on a span have precedence over the same key in the common block. The value for a spans key is a list of span objects. Certain attributes are required, and must be included either in the optional common block, or in each span. Recommended and custom attributes can be optionally included in a list of key-value pairs under a key named attributes, in the optional common block and/or in each span. In the following example POST, there are two spans, both of which have the trace.id 12345 and the custom attribute host: host123.example.com. The first span has no parent.id, so that is the root of the trace; the second span's parent.id points to the ID of the first. [ { \"common\": { \"attributes\": { \"host\": \"host123.example.com\" } }, \"spans\": [ { \"trace.id\": \"12345\", \"id\": \"abc\", \"timestamp\": 1603336834823, \"attributes\": { \"user.email\": \"bob@newr.com\", \"service.name\": \"my-service\", \"duration.ms\": 750, \"name\": \"my-span\" } }, { \"trace.id\": \"12345\", \"id\": \"def\", \"timestamp\": 1603336834899, \"attributes\": { \"parent.id\": \"abc\", \"service.name\": \"second-service\", \"duration.ms\": 750, \"name\": \"second-span\" } } ] } ] Copy To learn how to control how spans appear in New Relic (for example, adding errors or setting a span as a datastore span), see Decorate spans. Explore more about distributed tracing: Learn where Trace API data shows up in the UI. Learn how to decorate spans for a richer, more detailed UI experience. For example, you can have spans show up as datastore spans or display errors. Learn about general data limits, required metadata, and response validation. If you don't see your trace data, see Troubleshooting.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 83.79241, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "title": "Report traces via the Trace API (New Relic format)", + "sections": "Send sample trace payload (non-Infinite Tracing)", + "tags": "Trace API", + "body": "If you want to create your own tracing implementation, you can use our Trace API. This doc explains how to send traces in our general format, aka new-relic format. (To send Zipkin-format data, see Zipkin.) Get started Using our Trace API is as simple as: Sending trace data in the expected format" + }, + "id": "6071cfc8196a6790e864a7a4" + }, + { + "image": "", + "url": "https://docs.newrelic.com/docs/browser/new-relic-browser/browser-pro-features/browser-data-distributed-tracing/", + "sections": [ + "Browser data in distributed tracing", + "Benefits of browser data in distributed tracing", + "Requirements and compatibility", + "Enable distributed tracing", + "Enable Infinite Tracing", + "Enable cross-origin resource sharing (CORS)", + "Risks and mitigations", + "Caution", + "Important", + "Enable cross-origin resource sharing", + "Find data", + "Troubleshooting" + ], + "published_at": "2022-02-14T09:10:27Z", + "title": "Browser data in distributed tracing", + "updated_at": "2022-02-14T09:10:27Z", + "type": "docs", + "external_id": "fe29e64d44406955651c3644eabb3ddb6d2a66c0", + "document_type": "page", + "popularity": 1, + "body": "If you use browser to monitor end-user browser activity, you can now see end-user-originating browser-side traces in distributed tracing. This document contains: Benefits of this feature Requirements Enable distributed tracing Enable cross-origin resource sharing (CORS) Find and query data Troubleshooting Benefits of browser data in distributed tracing By enabling New Relic to report browser data to distributed tracing, you can see the connection between front-end activity and back-end activity. You can see across a full transaction, from time spent by an end user in the web browser, to network activity, to associated back-end services. Benefits of this feature: Quickly spot latencies, errors, and anomalies in the browser or network Resolve customer-facing problems more quickly All the benefits of distributed tracing applied to your end-user monitoring Requirements and compatibility This feature reports AJAX requests (Fetch and XHR) that occur during a browser interaction. By default, only single-origin requests are monitored unless cross-origin resource sharing is enabled. Make sure you have the necessary minimum versions for your browser and APM agents: Browser Pro+SPA agent with distributed tracing enabled: Browser agent version 1153 or higher Browser agent version 1158 or higher (required for cross-origin resource sharing) Browser agent version 1173 or higher (required for w3c trace context support) APM agent versions: C SDK 1.3 or higher Java 5.9.0 or higher PHP 9.4.0 or higher Other APM agent version requirements Enable distributed tracing To enable distributed tracing for browser monitoring: Make sure you meet requirements. Go to one.newrelic.com, and click Browser, then select an app, then on the left side, click Application settings. Ensure the Distributed tracing toggle is on. By default, for agent version 1173 and above, the newrelic, traceparent, and tracestate headers will be added to all same-origin AJAX requests. Optional: If all of your services are configured to use the w3c trace context headers, you can choose to exclude the newrelic, traceparent, and tracestate headers from requests. Optional: Enable cross-origin resource sharing. Redeploy the browser monitoring agent (either restarting the associated APM agent or updating the copy/paste browser installation). If you have some apps or services that are downstream from your browser app that use the advanced option Infinite Tracing, see these setup steps. Enable Infinite Tracing If your browser apps have some downstream services that use the advanced type of distributed tracing called Infinite Tracing, you should enable this feature for your browser apps. This ensures that your root span (the initiating browser app) is included in the rest of the spans tracked by Infinite Tracing. To set up Infinite Tracing: Complete the steps above to enable distributed tracing. Go to our Infinite Tracing documentation for steps to create a trace observer and to select which apps (data sources) should send trace spans to the Infinite Tracing trace observer. Enable cross-origin resource sharing (CORS) If you have AJAX requests that need resources from different origins, you can enable cross-origin resource sharing (CORS). By default, distributed tracing for cross-origin requests is not enabled because of browser CORS security restrictions: Distributed tracing is implemented by adding a custom HTTP headers (newrelic, traceparent, and tracestate) to outgoing AJAX requests, and browsers typically do not allow custom headers on cross-origin requests. With the release of agent version 1173, we now support the w3c trace context headers (traceparent and tracestate) so these should also be allowed in your configuration. There are two separate configurations required to enable cross-origin distributed tracing: Configure the service on the different origin to accept the newrelic custom header Configure browser monitoring to include the target origin in distributed tracing Our step-by-step instructions provide key concepts and steps to enable this feature, but if you need more background about how cross-origin resource sharing works, we recommend this Mozilla developer document. Risks and mitigations Caution Cross-origin resource sharing can expose you to a high level of risk if the services on the different origins are not configured correctly. The AJAX requests will likely return an error, resulting in a variety of failures, including: Resources failing to load (for example, images and key content) Login failures Entire site outages (depending on type of requests enabled) By enabling this cross-origin resource sharing feature, you are acknowledging the following: You understand that this feature is optional and not mandatory. You understand the steps you need to take in order to enable this feature for your services and your domains. You understand that if you do not properly configure your services prior to deployment (including but not limited to configuring your services on your domains to accept custom headers) portions or all of your website will likely malfunction. You understand that New Relic is neither responsible nor liable for errors or issues related to your misconfiguration of servers or services. You fully and solely accept the risks and wish to proceed. The best way to minimize your risk is to ensure you fully understand the process and to try it first in a test environment. Before reading the step-by-step instructions, it may help to first read this overview of the process: To use distributed tracing with cross-origin resources, you populate a list of approved cross-origin resources in New Relic, and then we automatically send the following custom headers to those resources: newrelic, traceparent, and tracestate. For this process to work, you must first ensure that someone has configured the services on the other origins to accept this custom header. Cross-origin resource sharing uses a variety of HTTP headers (both in the request and response). The header that specifically applies to New Relic is the Access-Control-Allow-Headers response header, which can include newrelic, traceparent, tracestate, or newrelic, traceparent, tracestate in its value depending on what tracing strategies you enabled in your APM-monitored application. You must configure your server to return this CORS header in its response. Example: Access-Control-Allow-Headers: newrelic, traceparent, tracestate Important New Relic cannot perform any validation to ensure the services on the other origins were configured correctly. If you're unsure about how to allow these headers, do not add cross-origin resources to the approved list in the New Relic UI. Enable cross-origin resource sharing Caution You should always try enabling CORS in a test environment before setting it up in production. To enable cross-origin resource sharing: Confirm that the services on the other origins are configured to accept the newrelic header using: Access-Control-Allow-Headers: newrelic, traceparent, tracestate (for details, see Risks and mitigations). Confirm that you meet the Browser monitoring requirements. Make sure you are in one.newrelic.com, and click Browser > (select an app) > Application settings. Turn on the Distributed tracing toggle if it's not already enabled. Turn on the Cross-origin resource sharing (CORS) toggle. Under Cross-origin resource sharing (CORS), add cross-origin resources to the approved list. Important Valid cross-origin resources must include: The prefix http:// or https:// The domain name The port number is not required unless it differs from the default for HTTP (port 80) or HTTPS (port 443). Select Save application settings to update the agent configuration. Redeploy the browser agent (either restarting the associated APM agent or updating the copy/paste browser installation). Find data Tips for finding and querying data: You can find end-user-originating traces in any New Relic One distributed tracing UI. In the distributed tracing UI, end-user spans are indicated with the icon. To see a span's attributes, select a span in the UI. Spans are reported as Span data, and can be queried in New Relic. Query tips: Query by browser app name by setting browserApp.name to the browser app name. Query for traces containing at least one browser app span with browserApp.name is not null. Query for traces containing at least one back-end app with appName is not null. Query for traces containing both browser and back-end spans by combining the two previous conditions. Troubleshooting If you don't see end-user spans, or are having other distributed tracing issues, see Troubleshooting.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 83.75699, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "title": "Browser data in distributed tracing", + "sections": "Browser data in distributed tracing", + "body": " with distributed tracing enabled: Browser agent version 1153 or higher Browser agent version 1158 or higher (required for cross-origin resource sharing) Browser agent version 1173 or higher (required for w3c trace context support) APM agent versions: C SDK 1.3 or higher Java 5.9.0 or higher PHP 9.4.0" + }, + "id": "6043fe6f196a6784f0960f53" } ], "/snmp/9eb6470e-fffd-4f16-b29a-9c78ae43f0e6": [ @@ -85161,7 +85078,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 125.995674, + "_score": 126.224014, "_version": null, "_explanation": null, "sort": null, @@ -85171,54 +85088,6 @@ }, "id": "61566dd764441f9769099673" }, - { - "sections": [ - "Alerts for infrastructure: Add, edit, or view host alert information", - "Create alert conditions for infrastructure", - "Important", - "Other infrastructure alert condition methods", - "Use the Alerts UI", - "Use the Infrastructure UI", - "Use infrastructure settings for integrations", - "Tip", - "View host alert events", - "Update or delete host alert information", - "Use New Relic Alerts to monitor your entire infrastructure", - "Add a description", - "Add or edit a runbook URL", - "Violation time limit for violations", - "Alert conditions that generate too-long NRQL queries" - ], - "title": "Alerts for infrastructure: Add, edit, or view host alert information", - "type": "docs", - "tags": [ - "Infrastructure alert conditions", - "Infrastructure alerts", - "Infrastructure" - ], - "external_id": "00207a1020aa29ea6d5d5bbb8e806a50a5966f80", - "image": "", - "url": "https://docs.newrelic.com/docs/infrastructure/new-relic-infrastructure/infrastructure-alert-conditions/infrastructure-alerts-add-edit-or-view-host-alert-information/", - "published_at": "2022-02-14T10:16:12Z", - "updated_at": "2022-02-14T10:16:12Z", - "document_type": "page", - "popularity": 1, - "body": "With New Relic's infrastructure monitoring, you can create alert conditions directly within the context of what you're currently monitoring with New Relic. For example, if you're monitoring a filter set and notice a problem, you can create an alert directly, and you don't need to recreate it from Alerts. To create the alert, select your filter set immediately, and tailor the alert condition directly from the chart you're viewing. This helps you proactively manage and monitor the alerting system for your environment. Any alert violations will be created per entity within the filter set. Want to try out alert conditions with our infrastructure agent? Create a New Relic account for free! No credit card required. Create alert conditions for infrastructure Alert conditions apply to alert policies. You can select an existing policy or create a new policy with email notifications from the Infrastructure monitoring UI. If you want to use other types of notification channels, create a new policy from within the Alerts UI. Important The Infrastructure REST API has a limit of 3,700 alert conditions, including both active and disabled conditions. The API, whether used directly or via the UI, will reject all requests to add any additional alert conditions beyond the 3,700 alert condition limit. To add an infrastructure alert condition to an alerts policy: Go to one.newrelic.com > Infrastructure, then select any of these Infrastructure monitoring pages: Hosts, Processes, Network, or Storage. Mouse over the chart you want to alert on, select the ellipses icon, and then select Create alert. Type a meaningful condition name. Select the Alert type, or refer to the examples to decide which type to select. Create individual filters, or copy all the filters from a filter set to identify the hosts that you want the alert condition to use. Important For more information about the rules behind filters, see Filter set logic. Define the Critical (required) and Warning (optional, if available) thresholds for triggering the alert notification. Optional: To create the condition criteria proactively but not receive alert notifications at this time, turn off the Enabled checkbox option. Select an existing policy for the new condition. OR Select the option to create a new policy and identify the email for alert notifications. Optional: Add a runbook url. Optional: Set Violation time limit for violations (this defaults to 24 hours). Select Create. Important If New Relic hasn't received a cloud integration service's attribute in the past 60 minutes, we refer to this as a \"silent attribute,\" and it won't be available to use as an alert condition in the UI. In this situation, you can use the API to create alert conditions for silent attributes. Other infrastructure alert condition methods You can also use these other methods to create an infrastructure alert condition: Use the Alerts UI Go to one.newrelic.com > Alerts & AI > Alerts > Alert policies > New alert policy > Create new condition, then select Infrastructure as the product. Use the Infrastructure UI Go to one.newrelic.com > Infrastructure. Select any of these Infrastructure monitoring pages: Hosts, Processes, Network, or Storage. Mouse over the chart you want to alert on, select the ellipses icon, and then select Create alert. Use infrastructure settings for integrations Tip Use this method to create an alert condition for infrastructure integrations. Go to one.newrelic.com > Infrastructure > Settings > Alerts, and then click Create alert condition. Name and describe the alert condition. Click the Integrations alert type, and then select the integration data source you'd like to use. Use the Filter entities dropdown to limit your condition to specific attributes. Use the Define thresholds dropdowns to define your condition's thresholds, and then click Create. The configuration settings are optional. You can always update them later. View host alert events Anyone included in the policy's notification channels receive alert notifications directly. In addition, anyone with permissions for your New Relic account can view Infrastructure alert incidents and individual violations through the user interface. Go to one.newrelic.com > Infrastructure > Events. To change the hosts or time frame, use the search window, Filter set, or Time functions. From the Events list, select the alert violation. To view detailed information in Alerts about the selected violation, select the link. Update or delete host alert information To edit, disable (or re-enable), or delete host alert information: Go to one.newrelic.com > Infrastructure > Settings > Alerts. Optional: Use the search window or Select all checkbox to locate one or more alert conditions. Select any of the available functions to edit, disable, enable, or delete the selected conditions. Use New Relic Alerts to monitor your entire infrastructure New Relic Alerts provides a single, coordinated alerting tool across all of your New Relic products. This allows you to manage alert policies and conditions that focus on the metrics for entities that you care about the most, such as Docker containers, JVMs, and more. Alert features Features in Infrastructure Alert conditions Create: Use the Infrastructure UI. View, change, disable (or re-enable), or delete: Use the Infrastructure Settings > Alerts UI. Information on alerts View summary information about events: Use the Infrastructure Events UI. View detailed information about alert incidents or individual violations: Use the Alerts UI or the notification channel integrated with the associated policy. Alert policies View, add, change, disable, or delete: For policies with a variety of notification channels: Use the Alerts UI. For policies only needing email notifications: Go to one.newrelic.com > Infrastructure > Settings > Alerts > Create a new policy, and add one or more email addresses as needed. Add host conditions to an existing policy: Use the Infrastructure UI. Notification channels To view, add, change, or delete available notification options: Go to one.newrelic.com > Infrastructure > Settings > Alerts. Optional: Search for the condition or policy name. From the list of conditions, select the policy link to view notification channel information in the Alerts UI. Add a description The use of the Description field is available for these alert condition types: NRQL conditions: add a description using the NerdGraph API. Infrastructure conditions: add a description using the UI or the REST API. The text you place in an alert condition's Description field is passed downstream to associated violations and notifications. A description can be used for several purposes, including: Capturing the reason for the alert condition. Defining the signal being monitored. Defining next steps. Add metadata to downstream systems. You can use template substitution to insert values from the attributes in the associated violation event. The template format is {{attributeName}}. For the attributes you can use when creating a description, see Violation event attributes. One available attribute is the special {{tag.*}} attribute. This attribute prefix is used to access any of the tag values that are included with the target signal, or any of the entity tags that are associated with the target signal. If there are entity tags associated with your violation, then they can be accessed using the entity tag name. An example of this would be {{tag.aws.awsRegion}}. When entity tags are available to use, you see them included with the violation, and displayed when you view the violations in an incident. This field has a maximum character size of 4,000. Add or edit a runbook URL The alert condition creation process includes an option for setting a URL for runbook instructions. This lets you link to information or standard procedures for handling a violation. Before adding or updating the link, make sure you use a valid URL. To add, update, or delete an alert condition's runbook URL: Select an alert condition, and make changes to the Runbook URL link. Save the condition. In order to be saved, the URL must be a valid URL. Violation time limit for violations The violation time limit allows you to define a time period after which violations will be force-closed. By default, violation time limit is 24 hours. To add or update an alert condition's violation time limit: Select an alert condition, and make changes to the violation time limit. Save the condition. Alert conditions that generate too-long NRQL queries Alert conditions created for infrastructure rely on behind-the-scenes NRQL queries, and NRQL queries have a 4096-character limit. This means that if your condition generates a very complex NRQL query that filters on many elements (for example, including many hosts or many tags), it will exceed this limit and display an error message saying that the condition failed. To solve this problem, reduce the number of elements you are using in your alert condition. For example: Problem Solution Hosts If you entered a large number of hosts that caused the condition to fail, reduce the number of hosts. Use substrings to target hosts. For example, instead of targeting prod-host-01, prod-host-02, and prod-host-03, just target all hosts with prod-host-0 in the name. Entities Edit your alert condition to target specific attributes that apply to the entities you're trying to target. Create custom attributes for the entities you want to target, and use those attributes in your alert condition. For more information, see Best practices for filtering in infrastructure alerts in New Relic's Explorers Hub.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 85.161545, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "title": "Alerts for infrastructure: Add, edit, or view host alert information", - "sections": "Use New Relic Alerts to monitor your entire infrastructure", - "tags": "Infrastructure alert conditions", - "body": ", will reject all requests to add any additional alert conditions beyond the 3,700 alert condition limit. To add an infrastructure alert condition to an alerts policy: Go to one.newrelic.com > Infrastructure, then select any of these Infrastructure monitoring pages: Hosts, Processes, Network" - }, - "id": "6043fa3428ccbc401d2c60b9" - }, { "sections": [ "Default infrastructure monitoring data", @@ -85257,7 +85126,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 85.07054, + "_score": 83.634735, "_version": null, "_explanation": null, "sort": null, @@ -85306,7 +85175,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 84.97718, + "_score": 80.729, "_version": null, "_explanation": null, "sort": null, @@ -85318,6 +85187,54 @@ }, "id": "61b9389528ccbcb4d396ee5e" }, + { + "sections": [ + "Alerts for infrastructure: Add, edit, or view host alert information", + "Create alert conditions for infrastructure", + "Important", + "Other infrastructure alert condition methods", + "Use the Alerts UI", + "Use the Infrastructure UI", + "Use infrastructure settings for integrations", + "Tip", + "View host alert events", + "Update or delete host alert information", + "Use New Relic Alerts to monitor your entire infrastructure", + "Add a description", + "Add or edit a runbook URL", + "Violation time limit for violations", + "Alert conditions that generate too-long NRQL queries" + ], + "title": "Alerts for infrastructure: Add, edit, or view host alert information", + "type": "docs", + "tags": [ + "Infrastructure alert conditions", + "Infrastructure alerts", + "Infrastructure" + ], + "external_id": "00207a1020aa29ea6d5d5bbb8e806a50a5966f80", + "image": "", + "url": "https://docs.newrelic.com/docs/infrastructure/new-relic-infrastructure/infrastructure-alert-conditions/infrastructure-alerts-add-edit-or-view-host-alert-information/", + "published_at": "2022-02-14T10:16:12Z", + "updated_at": "2022-02-14T10:16:12Z", + "document_type": "page", + "popularity": 1, + "body": "With New Relic's infrastructure monitoring, you can create alert conditions directly within the context of what you're currently monitoring with New Relic. For example, if you're monitoring a filter set and notice a problem, you can create an alert directly, and you don't need to recreate it from Alerts. To create the alert, select your filter set immediately, and tailor the alert condition directly from the chart you're viewing. This helps you proactively manage and monitor the alerting system for your environment. Any alert violations will be created per entity within the filter set. Want to try out alert conditions with our infrastructure agent? Create a New Relic account for free! No credit card required. Create alert conditions for infrastructure Alert conditions apply to alert policies. You can select an existing policy or create a new policy with email notifications from the Infrastructure monitoring UI. If you want to use other types of notification channels, create a new policy from within the Alerts UI. Important The Infrastructure REST API has a limit of 3,700 alert conditions, including both active and disabled conditions. The API, whether used directly or via the UI, will reject all requests to add any additional alert conditions beyond the 3,700 alert condition limit. To add an infrastructure alert condition to an alerts policy: Go to one.newrelic.com > Infrastructure, then select any of these Infrastructure monitoring pages: Hosts, Processes, Network, or Storage. Mouse over the chart you want to alert on, select the ellipses icon, and then select Create alert. Type a meaningful condition name. Select the Alert type, or refer to the examples to decide which type to select. Create individual filters, or copy all the filters from a filter set to identify the hosts that you want the alert condition to use. Important For more information about the rules behind filters, see Filter set logic. Define the Critical (required) and Warning (optional, if available) thresholds for triggering the alert notification. Optional: To create the condition criteria proactively but not receive alert notifications at this time, turn off the Enabled checkbox option. Select an existing policy for the new condition. OR Select the option to create a new policy and identify the email for alert notifications. Optional: Add a runbook url. Optional: Set Violation time limit for violations (this defaults to 24 hours). Select Create. Important If New Relic hasn't received a cloud integration service's attribute in the past 60 minutes, we refer to this as a \"silent attribute,\" and it won't be available to use as an alert condition in the UI. In this situation, you can use the API to create alert conditions for silent attributes. Other infrastructure alert condition methods You can also use these other methods to create an infrastructure alert condition: Use the Alerts UI Go to one.newrelic.com > Alerts & AI > Alerts > Alert policies > New alert policy > Create new condition, then select Infrastructure as the product. Use the Infrastructure UI Go to one.newrelic.com > Infrastructure. Select any of these Infrastructure monitoring pages: Hosts, Processes, Network, or Storage. Mouse over the chart you want to alert on, select the ellipses icon, and then select Create alert. Use infrastructure settings for integrations Tip Use this method to create an alert condition for infrastructure integrations. Go to one.newrelic.com > Infrastructure > Settings > Alerts, and then click Create alert condition. Name and describe the alert condition. Click the Integrations alert type, and then select the integration data source you'd like to use. Use the Filter entities dropdown to limit your condition to specific attributes. Use the Define thresholds dropdowns to define your condition's thresholds, and then click Create. The configuration settings are optional. You can always update them later. View host alert events Anyone included in the policy's notification channels receive alert notifications directly. In addition, anyone with permissions for your New Relic account can view Infrastructure alert incidents and individual violations through the user interface. Go to one.newrelic.com > Infrastructure > Events. To change the hosts or time frame, use the search window, Filter set, or Time functions. From the Events list, select the alert violation. To view detailed information in Alerts about the selected violation, select the link. Update or delete host alert information To edit, disable (or re-enable), or delete host alert information: Go to one.newrelic.com > Infrastructure > Settings > Alerts. Optional: Use the search window or Select all checkbox to locate one or more alert conditions. Select any of the available functions to edit, disable, enable, or delete the selected conditions. Use New Relic Alerts to monitor your entire infrastructure New Relic Alerts provides a single, coordinated alerting tool across all of your New Relic products. This allows you to manage alert policies and conditions that focus on the metrics for entities that you care about the most, such as Docker containers, JVMs, and more. Alert features Features in Infrastructure Alert conditions Create: Use the Infrastructure UI. View, change, disable (or re-enable), or delete: Use the Infrastructure Settings > Alerts UI. Information on alerts View summary information about events: Use the Infrastructure Events UI. View detailed information about alert incidents or individual violations: Use the Alerts UI or the notification channel integrated with the associated policy. Alert policies View, add, change, disable, or delete: For policies with a variety of notification channels: Use the Alerts UI. For policies only needing email notifications: Go to one.newrelic.com > Infrastructure > Settings > Alerts > Create a new policy, and add one or more email addresses as needed. Add host conditions to an existing policy: Use the Infrastructure UI. Notification channels To view, add, change, or delete available notification options: Go to one.newrelic.com > Infrastructure > Settings > Alerts. Optional: Search for the condition or policy name. From the list of conditions, select the policy link to view notification channel information in the Alerts UI. Add a description The use of the Description field is available for these alert condition types: NRQL conditions: add a description using the NerdGraph API. Infrastructure conditions: add a description using the UI or the REST API. The text you place in an alert condition's Description field is passed downstream to associated violations and notifications. A description can be used for several purposes, including: Capturing the reason for the alert condition. Defining the signal being monitored. Defining next steps. Add metadata to downstream systems. You can use template substitution to insert values from the attributes in the associated violation event. The template format is {{attributeName}}. For the attributes you can use when creating a description, see Violation event attributes. One available attribute is the special {{tag.*}} attribute. This attribute prefix is used to access any of the tag values that are included with the target signal, or any of the entity tags that are associated with the target signal. If there are entity tags associated with your violation, then they can be accessed using the entity tag name. An example of this would be {{tag.aws.awsRegion}}. When entity tags are available to use, you see them included with the violation, and displayed when you view the violations in an incident. This field has a maximum character size of 4,000. Add or edit a runbook URL The alert condition creation process includes an option for setting a URL for runbook instructions. This lets you link to information or standard procedures for handling a violation. Before adding or updating the link, make sure you use a valid URL. To add, update, or delete an alert condition's runbook URL: Select an alert condition, and make changes to the Runbook URL link. Save the condition. In order to be saved, the URL must be a valid URL. Violation time limit for violations The violation time limit allows you to define a time period after which violations will be force-closed. By default, violation time limit is 24 hours. To add or update an alert condition's violation time limit: Select an alert condition, and make changes to the violation time limit. Save the condition. Alert conditions that generate too-long NRQL queries Alert conditions created for infrastructure rely on behind-the-scenes NRQL queries, and NRQL queries have a 4096-character limit. This means that if your condition generates a very complex NRQL query that filters on many elements (for example, including many hosts or many tags), it will exceed this limit and display an error message saying that the condition failed. To solve this problem, reduce the number of elements you are using in your alert condition. For example: Problem Solution Hosts If you entered a large number of hosts that caused the condition to fail, reduce the number of hosts. Use substrings to target hosts. For example, instead of targeting prod-host-01, prod-host-02, and prod-host-03, just target all hosts with prod-host-0 in the name. Entities Edit your alert condition to target specific attributes that apply to the entities you're trying to target. Create custom attributes for the entities you want to target, and use those attributes in your alert condition. For more information, see Best practices for filtering in infrastructure alerts in New Relic's Explorers Hub.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 80.60472, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "title": "Alerts for infrastructure: Add, edit, or view host alert information", + "sections": "Use New Relic Alerts to monitor your entire infrastructure", + "tags": "Infrastructure alert conditions", + "body": ", will reject all requests to add any additional alert conditions beyond the 3,700 alert condition limit. To add an infrastructure alert condition to an alerts policy: Go to one.newrelic.com > Infrastructure, then select any of these Infrastructure monitoring pages: Hosts, Processes, Network" + }, + "id": "6043fa3428ccbc401d2c60b9" + }, { "sections": [ "AWS VPC", @@ -85346,7 +85263,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 71.13968, + "_score": 71.144165, "_version": null, "_explanation": null, "sort": null, @@ -85391,7 +85308,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 49.549957, + "_score": 48.569668, "_version": null, "_explanation": null, "sort": null, @@ -85423,7 +85340,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 40.134586, + "_score": 40.040745, "_version": null, "_explanation": null, "sort": null, @@ -85468,7 +85385,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 33.120075, + "_score": 32.837097, "_version": null, "_explanation": null, "sort": null, @@ -85513,7 +85430,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -85554,7 +85471,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -85596,7 +85513,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -85638,7 +85555,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -85680,7 +85597,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -85724,7 +85641,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -85765,7 +85682,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -85807,7 +85724,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -85849,7 +85766,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -85891,7 +85808,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -85935,7 +85852,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -85976,7 +85893,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -86018,7 +85935,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -86060,7 +85977,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -86102,7 +86019,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -86150,7 +86067,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 85.161545, + "_score": 80.60472, "_version": null, "_explanation": null, "sort": null, @@ -86192,7 +86109,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 71.070465, + "_score": 68.373886, "_version": null, "_explanation": null, "sort": null, @@ -86249,7 +86166,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 60.366703, + "_score": 59.388115, "_version": null, "_explanation": null, "sort": null, @@ -86298,7 +86215,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 59.39953, + "_score": 56.21028, "_version": null, "_explanation": null, "sort": null, @@ -86333,7 +86250,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 59.397385, + "_score": 56.20854, "_version": null, "_explanation": null, "sort": null, @@ -86382,7 +86299,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 98.27347, + "_score": 97.209465, "_version": null, "_explanation": null, "sort": null, @@ -86394,46 +86311,6 @@ }, "id": "617e9527e7b9d295d9c046b5" }, - { - "sections": [ - "Introduction to New Relic for Ruby", - "Compatibility and requirements", - "Monitor app performance", - "Install the agent", - "Extend agent instrumentation", - "Troubleshooting", - "Check the source code" - ], - "title": "Introduction to New Relic for Ruby", - "type": "docs", - "tags": [ - "Getting started", - "Ruby agent", - "Agents" - ], - "external_id": "78629823e3103cf758fabed8581980990b19008d", - "image": "", - "url": "https://docs.newrelic.com/docs/apm/agents/ruby-agent/getting-started/introduction-new-relic-ruby/", - "published_at": "2022-02-14T16:29:53Z", - "updated_at": "2022-01-27T07:16:16Z", - "document_type": "page", - "popularity": 1, - "body": "The New Relic Ruby agent monitors your applications to help you identify and solve performance issues. You can also extend the agent's performance monitoring to collect and analyze business data to help you improve the customer experience and make data-driven business decisions. Compatibility and requirements The Ruby agent supports many of the most common Ruby frameworks and platforms. You can also use the Ruby agent in a Google App Engine (GAE) flexible environment. Before you install the Ruby agent, ensure your system meets the system requirements. Monitor app performance View the big picture of your app Monitor your app's Apdex (user satisfaction). Get a high-level summary of your app. Create architectural maps of your app. Enable distributed tracing to understand activity in an environment that relies on many services. Install New Relic Infrastructure and view detailed host data for your app. Find errors and problems quickly Track key transactions. Create customized dashboards for important metrics. Alert your team when errors or problems occur before they affect your users. Track performance after a deployment. Drill down into performance details Examine code-level transaction traces Examine database query traces. Examine error traces. Monitor Ruby background processes and daemons. View logs for your APM and infrastructure data Bring your logs and application's data together to make troubleshooting easier and faster. No need to switch to another UI page in New Relic One. With logs in context, you can see log messages related to your errors and traces directly in your app's UI. You can also see logs in context of your infrastructure data, such as Kubernetes clusters. Analyze business data Use the Ruby agent to organize, query, and visualize your data to answer key questions about application performance and customer experience. Use default transaction attributes or add your own. Query your data using NRQL. Send your own event data. Create and share customizable, interactive dashboards. Install the agent After creating a New Relic account, use our launcher or see the installation instructions. Install docs for gem (recommended) Install docs for rails plugin Add Ruby data Extend agent instrumentation After installing the agent, go further and extend the agent's instrumentation: Page load timing: Automatically inject the browser monitoring agent to get visibility into end-user activity. Custom instrumentation: Instrument transactions not captured as part of our framework instrumentation. Agent API: Use the agent API to fully customize the agent's behavior. For example, you can collect custom metrics, flag an error, or ignore a particular transaction entirely. Custom metrics: Record additional metrics as part of a transaction to gain more insights into your app's performance and business data. Agent attributes: Customize the attributes attached to transactions. Customizing attributes allows you to avoid sending sensitive attributes, or to collect additional attributes for deeper visibility into your transactions. Troubleshooting If you're having problems, see the Ruby agent troubleshooting docs. Common issues include: No data appears (Ruby) Gems incompatible with the Ruby agent Sending handled errors to New Relic Controlling when the Ruby agent starts Check the source code The Ruby agent is open source software. That means you can browse its source code and send improvements, or create your own fork and build it. For more information, see the README.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 84.63069, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "title": "Introduction to New Relic for Ruby", - "sections": "Introduction to New Relic for Ruby", - "tags": "Ruby agent", - "body": " transaction traces Examine database query traces. Examine error traces. Monitor Ruby background processes and daemons. View logs for your APM and infrastructure data Bring your logs and application's data together to make troubleshooting easier and faster. No need to switch to another UI page in New Relic" - }, - "id": "617eab27e7b9d2395fc044bc" - }, { "sections": [ "Net::HTTP", @@ -86466,7 +86343,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.074814, "_version": null, "_explanation": null, "sort": null, @@ -86507,7 +86384,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.074814, "_version": null, "_explanation": null, "sort": null, @@ -86549,7 +86426,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.074814, "_version": null, "_explanation": null, "sort": null, @@ -86558,6 +86435,48 @@ "body": " server designed to handle applications that expect long request/response times and/or slow clients. Get started! Leverage community expertise and instantly get value out of your telemetry data. This quickstart automatically instruments Rainbows! with the New Relic Ruby agent, and allows you to further" }, "id": "6156689428ccbc41dcf2143a" + }, + { + "sections": [ + "HttpRb", + "What's included", + "Httprb", + "High CPU Utilization", + "Memory Usage", + "Apdex Score", + "Transaction Errors", + "HttpRb installation docs", + "What is HttpRb?", + "Get started!", + "More info" + ], + "title": "HttpRb", + "type": "quickstarts", + "tags": [ + "apm", + "ruby" + ], + "quick_start_name": "HttpRb", + "external_id": "5d27d2abc7e12eb66e6216eaf1f2911890a0e692", + "image": "", + "url": "https://developer.newrelic.com/instant-observability/httprb/18adf8b8-f2d2-47a5-b8a2-278f526b5243/", + "published_at": "2022-02-08T01:40:36Z", + "updated_at": "2021-11-30T01:47:44Z", + "document_type": "page", + "popularity": 1, + "body": "What's included Httprb Dashboard High CPU Utilization This alert is triggered when the CPU Utilization is above 90%. Alert Memory Usage This alert is triggered when Memory usage is above 90% Alert Apdex Score This alert is triggered when the Apdex score is below 0.5 for 5 minutes Alert Transaction Errors This alert is triggered when the the transactions fail more than 10% of the time in 5 minutes. Alert HttpRb installation docs Straightforward Ruby-based HTTP client with advanced feature set. Doc What is HttpRb? Straightforward Ruby-based HTTP client with advanced feature set. Get started! Leverage community expertise and instantly get value out of your telemetry data. This quickstart automatically instruments HttpRb with the New Relic Ruby agent, and allows you to further leverage New Relic's APM capabilities by setting up custom dashboards , errors inbox , transaction tracing , and service maps . More info Check out the documentation to learn more about New Relic monitoring for HttpRb. HttpRb quickstart contains 1 dashboard . These interactive visualizations let you easily explore your data, understand context, and resolve problems faster. Httprb HttpRb observability quickstart contains 4 alerts . These alerts detect changes in key performance metrics. Integrate these alerts with your favorite tools (like Slack, PagerDuty, etc.) and New Relic will let you know when something needs your attention. High CPU Utilization Alert Type: STATIC This alert is triggered when the CPU Utilization is above 90%. Memory Usage Alert Type: STATIC This alert is triggered when Memory usage is above 90% Apdex Score Alert Type: STATIC This alert is triggered when the Apdex score is below 0.5 for 5 minutes Transaction Errors Alert Type: STATIC This alert is triggered when the the transactions fail more than 10% of the time in 5 minutes. HttpRb observability quickstart contains 1 data source . This is how you'll get your data into New Relic. HttpRb installation docs Straightforward Ruby-based HTTP client with advanced feature set. Docs", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 84.074814, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "tags": "apm", + "body": "! Leverage community expertise and instantly get value out of your telemetry data. This quickstart automatically instruments HttpRb with the New Relic Ruby agent, and allows you to further leverage New Relic's APM capabilities by setting up custom dashboards , errors inbox , transaction tracing" + }, + "id": "6156684728ccbc1617f21430" } ], "/padrino/7c11a5e8-d9fd-4dbe-9846-1181dfebbe11": [ @@ -86593,7 +86512,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -86634,7 +86553,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -86676,7 +86595,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -86718,7 +86637,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -86760,7 +86679,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -86804,7 +86723,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -86845,7 +86764,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -86887,7 +86806,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -86929,7 +86848,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -86971,7 +86890,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -87010,7 +86929,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 107.73782, + "_score": 107.34386, "_version": null, "_explanation": null, "sort": null, @@ -87052,7 +86971,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 96.852036, + "_score": 95.37338, "_version": null, "_explanation": null, "sort": null, @@ -87094,7 +87013,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 80.62351, + "_score": 80.845825, "_version": null, "_explanation": null, "sort": null, @@ -87131,7 +87050,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 77.04792, + "_score": 76.89376, "_version": null, "_explanation": null, "sort": null, @@ -87176,7 +87095,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 73.81227, + "_score": 73.86043, "_version": null, "_explanation": null, "sort": null, @@ -87220,7 +87139,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 64.17504, + "_score": 60.015556, "_version": null, "_explanation": null, "sort": null, @@ -87272,7 +87191,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 57.881638, + "_score": 53.63669, "_version": null, "_explanation": null, "sort": null, @@ -87339,7 +87258,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 57.20974, + "_score": 53.539505, "_version": null, "_explanation": null, "sort": null, @@ -87371,7 +87290,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 54.903347, + "_score": 51.504333, "_version": null, "_explanation": null, "sort": null, @@ -87404,7 +87323,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 54.864708, + "_score": 51.473335, "_version": null, "_explanation": null, "sort": null, @@ -87443,7 +87362,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 203.00626, + "_score": 189.37022, "_version": null, "_explanation": null, "sort": null, @@ -87499,7 +87418,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.87093, + "_score": 165.793, "_version": null, "_explanation": null, "sort": null, @@ -87563,7 +87482,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 146.4, + "_score": 136.64267, "_version": null, "_explanation": null, "sort": null, @@ -87599,7 +87518,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 133.54099, + "_score": 126.08999, "_version": null, "_explanation": null, "sort": null, @@ -87640,7 +87559,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 114.887024, + "_score": 114.99555, "_version": null, "_explanation": null, "sort": null, @@ -87678,7 +87597,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 194.57608, + "_score": 193.384, "_version": null, "_explanation": null, "sort": null, @@ -87743,7 +87662,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 118.09767, + "_score": 111.34474, "_version": null, "_explanation": null, "sort": null, @@ -87772,7 +87691,7 @@ "external_id": "1f13326e09d6da78f08f645bc069c22342fbac6c", "image": "", "url": "https://docs.newrelic.com/docs/infrastructure/install-infrastructure-agent/config-management-tools/configure-infrastructure-agent-using-ansible/", - "published_at": "2022-02-14T09:31:37Z", + "published_at": "2022-02-16T01:42:02Z", "updated_at": "2022-02-04T11:22:48Z", "document_type": "page", "popularity": 1, @@ -87780,7 +87699,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 109.761826, + "_score": 107.75279, "_version": null, "_explanation": null, "sort": null, @@ -87819,7 +87738,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 106.56522, + "_score": 99.14227, "_version": null, "_explanation": null, "sort": null, @@ -87831,44 +87750,38 @@ }, { "sections": [ - "Amazon RDS Enhanced Monitoring integration", - "Important", - "Features", - "Enable enhanced monitoring", - "Stream logs to Lambda function", - "Configuration and polling", - "Find and use data", - "Metric data", - "Metric data for all DB engines (except MS SQL Server) [#DB metrics]", - "Metric data for MS SQL", - "Definitions" + "OS versions page", + "Viewing the OS versions page", + "Viewing drill-down details" ], - "title": "Amazon RDS Enhanced Monitoring integration", + "title": "OS versions page", "type": "docs", "tags": [ - "AWS integrations list", - "Amazon integrations", - "Integrations" + "Mobile app pages", + "Mobile monitoring UI", + "Mobile monitoring" ], - "external_id": "b8fc293ef2181c19a5e816449b9a320e44e13ab3", - "image": "", - "url": "https://docs.newrelic.com/docs/infrastructure/amazon-integrations/aws-integrations-list/aws-rds-enhanced-monitoring-integration/", - "published_at": "2022-02-15T20:48:24Z", - "updated_at": "2022-02-15T20:48:24Z", + "external_id": "370b6f1584d001a17f414066097692b9189e1a50", + "image": "https://docs.newrelic.com/static/8d84abf966c2f4b75ca298b362995c0e/c1b63/os-version-pic_0.png", + "url": "https://docs.newrelic.com/docs/mobile-monitoring/mobile-monitoring-ui/mobile-app-pages/os-versions-page/", + "published_at": "2022-02-14T10:34:40Z", + "updated_at": "2021-07-09T11:46:41Z", "document_type": "page", "popularity": 1, - "body": "Important Enable the AWS CloudWatch Metric Streams integration to monitor all CloudWatch metrics from your AWS services, including custom namespaces. Individual integrations are no longer our recommended option. New Relic infrastructure integrations include an integration for collecting enhanced Amazon RDS data; this supplements the basic New Relic RDS integration with real-time metrics about the operating system the database instance runs on. Features This New Relic integration allows you to monitor and alert on RDS Enhanced Monitoring. You can use integration data and alerts to monitor the DB processes and identify potential trouble spots as well as to profile the DB allowing you to improve and optimize their response and cost. Enable enhanced monitoring Important Enabling this integration will incur some additional charges to your Amazon CloudWatch account. In addition there are some limitations and CPU metric data collection differences that are explained in Amazon's enhanced monitoring documentation. You must first have the New Relic Amazon RDS monitoring integration enabled before enabling RDS Enhanced Monitoring. Be sure that you have completed the steps in Connect AWS services to New Relic. New Relic uses AWS Lambda in order to collect RDS logs and provide near-real time data of your RDS instances, this capability is called RDS Enhanced Monitoring in AWS. Follow these steps in order to enable RDS Enhanced Monitoring integration: Specify the RDS instances that need Enable RDS Enhanced Monitoring. You can specify this when creating or modifying the instance: under Monitoring, set Enable Enhanced Monitoring to Yes. We recommend setting the data Granularity to 15 seconds. After enhanced monitoring is enabled, a stream called RDSOSMetrics is created in AWS CloudWatch Logs. Enhanced monitoring metrics are available via this stream. Create a lambda function and subscribe it to that stream in the following steps to obtain the data. Create a new AWS Lambda function from the Serverless Repository: Go to Lambda > Create Function > Browse serverless App repository, check the box for Show apps that create custom IAM roles or resource policies, and search for NewRelic-log-ingestion. Populate the LICENSE_KEY environment variable with your New Relic account license key. Select Deploy to create a new CloudFormation stack, a new function called newrelic-log-ingestion, and the required role. Make sure that the NewRelic-log-ingestion function execution role has attached the arn:aws:iam::aws:policy/CloudWatchLogsReadOnlyAccess policy, giving it the appropriate permissions to read CloudWatch Logs. Go to the newrelic-log-ingestion function. Continue with the procedure to stream logs to the Lambda function. Stream logs to Lambda function To link the RDSOSMetrics log stream to the Lambda function (JSON format): From AWS Console > CloudWatch > Logs, select RDSOSMetrics log group, and apply Actions > Create Lambda subscription filter. For the Lambda function, select newrelic-log-ingestion. From the Log Format dropdown, select JSON as the Log format. At the bottom, click the Start streaming button to save your Lambda subscription filter. Once completed, the Lambda function will send all the log lines from RDSOSMetrics to New Relic's ingest services. Configuration and polling You can change the polling frequency and filter data using configuration options. Default polling information for the Amazon RDS Enhanced Monitoring integration: New Relic polling interval: 30 seconds on average (collected via CloudWatch Logs) Configurable when setting up AWS Lambda Amazon CloudWatch data interval: 1 minute Find and use data To find your integration data, go to one.newrelic.com > Infrastructure > AWS and select the RDS > Enhanced monitoring dashboard link. You can query and explore your data using the DatastoreSample event type, with a provider value of RdsDbInstance . For more on how to use your data, see Understand and use integration data. Metric data New Relic collects the following enhanced RDS data: Metric data for all DB engines (except MS SQL Server) [#DB metrics] Group Metrics Description General engine The database engine for the DB instance. instanceId The DB instance identifier. instanceResourceId A region-unique, immutable identifier for the DB instance, also used as the log stream identifier. numVCpus The number of virtual CPUs for the DB instance. timestamp The time at which the metrics were taken. uptime The amount of time that the DB instance has been active. version The version of the OS metrics' stream JSON format. cpuUtilization guest The percentage of CPU in use by guest programs. idle The percentage of CPU that is idle. irq The percentage of CPU in use by software interrupts. nice The percentage of CPU in use by programs running at lowest priority. steal The percentage of CPU in use by other virtual machines. system The percentage of CPU in use by the kernel. total The total percentage of the CPU in use. This value excludes the nice value. user The percentage of CPU in use by user programs. wait The percentage of CPU unused while waiting for I/O access. diskIO (not available for Amazon Aurora) avgQueueLen The number of requests waiting in the I/O device's queue. avgReqSz The average request size, in kilobytes. await The number of milliseconds required to respond to requests, including queue time and service time. device The identifier of the disk device in use. readIOsPS The number of read operations per second. readKb The total number of kilobytes read. readKbPS The number of kilobytes read per second. rrqmPS The number of merged read requests queued per second. tps The number of I/O transactions per second. util The percentage of CPU time during which requests were issued. writeIOsPS The number of write operations per second. writeKb The total number of kilobytes written. writeKbPS The number of kilobytes written per second. wrqmPS The number of merged write requests queued per second. fileSys maxFiles The maximum number of files that can be created for the file system. total The total number of disk space available for the file system, in kilobytes. used The amount of disk space used by files in the file system, in kilobytes. usedFilePercent The percentage of available files in use. usedFiles The number of files in the file system. usedPercent The percentage of the file-system disk space in use. loadAverageMinute fifteen The number of processes requesting CPU time over the last 15 minutes. five The number of processes requesting CPU time over the last 5 minutes. one The number of processes requesting CPU time over the last minute. memory active The amount of assigned memory, in kilobytes. buffers The amount of memory used for buffering I/O requests prior to writing to the storage device, in kilobytes. cached The amount of memory used for caching file system–based I/O. dirty The amount of memory pages in RAM that have been modified but not written to their related data block in storage, in kilobytes. free The amount of unassigned memory, in kilobytes. hugePagesFree The number of free huge pages. Huge pages are a feature of the Linux kernel. hugePagesRsvd The number of committed huge pages. hugePagesSize The size for each huge pages unit, in kilobytes. hugePagesSurp The number of available surplus huge pages over the total. hugePagesTotal The total number of huge pages for the system. inactive The amount of least-frequently used memory pages, in kilobytes. mapped The total amount of file-system contents that is memory mapped inside a process address space, in kilobytes. pageTables The amount of memory used by page tables, in kilobytes. slab The amount of reusable kernel data structures, in kilobytes. total The total amount of memory, in kilobytes. writeback The amount ofn kilobytes. network rx The number of bytes received per second. tx The number of bytes uploaded per second. process cpuUsedPc The percentage of CPU used by the process. rss The amount of RAM allocated to the process, in kilobytes. memoryUsedPc The amount of memory used by the process, in kilobytes. processName The name of the process. swap cached The amount of swap memory, in kilobytes, used as cache memory. free The total amount of swap memory free, in kilobytes. total The total amount of swap memory available, in kilobytes. tasks blocked The number of tasks that are blocked. running The number of tasks that are running. sleeping The number of tasks that are sleeping. stopped The number of tasks that are stopped. total The total number of tasks. zombie The number of child tasks that are inactive with an active parent task. Metric data for MS SQL Group Metrics Description disks totalKb The total space of the disk, in kilobytes. usedKb The amount of space used on the disk, in kilobytes. usedPc The percentage of space used on the disk. availKb The space available on the disk, in kilobytes. availPc The percentage of space available on the disk. rdCountPS The number of read operations per second rdBytesPS The number of bytes read per second. wrCountPS The number of write operations per second. wBytesPS The amount of bytes written per second. memory commitToKb The amount of pagefile-backed virtual address space in use, that is, the current commit charge. This value is composed of main memory (RAM) and disk (pagefiles). commitLimitKb The maximum possible value for the commitTotKb metric. This value is the sum of the current pagefile size plus the physical memory available for pageable contents–excluding RAM that is assigned to non-pageable areas. commitPeakKb The largest value of the commitTotKb metric since the operating system was last started. kernTotKb The sum of the memory in the paged and non-paged kernel pools, in kilobytes. kernPagedKb The amount of memory in the paged kernel pool, in kilobytes. kernNonpagedKb The amount of memory in the non-paged kernel pool, in kilobytes. pageSize The size of a page, in bytes. physTotKb The amount of physical memory, in kilobytes. physAvailKb The amount of available physical memory, in kilobytes. sqlServerTotKb The amount of memory committed to Microsoft SQL Server, in kilobytes. sysCacheKb The amount of system cache memory, in kilobytes. network rdBytesPS The number of bytes received per second. wrBytesPS The number of bytes sent per second. process cpuUsedPc The percentage of CPU used by the process. memUsedPc The amount of memory used by the process, in kilobytes. processName The name of the process. workingSetKb The amount of memory in the private working set plus the amount of memory that is in use by the process and can be shared with other processes, in kilobytes. workingSetPrivKb The amount of memory that is in use by a process, but can't be shared with other processes, in kilobytes. workingSetShareableKb The amount of memory that is in use by a process and can be shared with other processes, in kilobytes. virtKb The amount of virtual address space the process is using, in kilobytes. Use of virtual address space does not necessarily imply corresponding use of either disk or main memory pages. system handles The number of handles that the system is using. processes The number of processes running on the system. threads The number of threads running on the system. Definitions Term Description Event type DataStoreSample Provider RdsDbInstance Processes Enhanced Monitoring allows you to monitor the following processes associated with your RDS instances. : RDS Process: Shows a summary of the resources used by the RDS management agent, diagnostics monitoring processes, and other AWS processes that are required to support RDS DB instances. RDS Child Process: Nested under RDS Processes, shows a summary of the RDS processes that support the DB instance, for example aurora for Amazon Aurora DB clusters and mysqld for MySQL DB instances. OS Processes: Shows a summary of the kernel and system processes, which generally have minimal impact on performance.", + "body": "The OS versions page for mobile monitoring provides performance details about the top operating system versions hosting your mobile application, such as iOS and Android. Charts compare the OS versions by: HTTP request time Network failures Requests per minute Active devices From here you can drill down into details by a major or minor OS version (for example, iOS 8, iOS 7.1.1, Android 4.2.2). Viewing the OS versions page one.newrelic.com > Mobile > (select an app) > App > OS versions: Use this page to view, sort, or drill down into detailed information about the top five types of operation system versions using your mobile app. To view performance details about the operating system versions for your mobile app users: Go to one.newrelic.com > Mobile > (select an app) > App > OS versions. To select the mobile app versions or time period, use the Versions menu and time picker below the UI menu bar. Optional: Select the Sort by and Hide < 1% throughput options. To expand or collapse the list of operating systems to include versions, select the operating system's name (for example, iOS 7). Viewing drill-down details To drill down into detailed information, use any of our standard user interface functions and page functions to drill down into detailed information. In addition: To view details for the minor and point releases of a major OS version (including interaction time, HTTP request times, network failures, active devices, and slowest traces or all subversions), select a major OS version from the list. To view details for a specific OS version, select its name from the expanded OS list. To view trace details a slow transaction (if available), select its link. For more information, see Interactions page. To return to the main OS versions page, select the Close (X) button.", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 97.1358, + "_score": 95.48022, "_version": null, "_explanation": null, "sort": null, "highlight": { - "body": " Amazon RDS data; this supplements the basic New Relic RDS integration with real-time metrics about the operating system the database instance runs on. Features This New Relic integration allows you to monitor and alert on RDS Enhanced Monitoring. You can use integration data and alerts to monitor" + "title": "OS versions page", + "sections": "OS versions page", + "body": "The OS versions page for mobile monitoring provides performance details about the top operating system versions hosting your mobile application, such as iOS and Android. Charts compare the OS versions by: HTTP request time Network failures Requests per minute Active devices From here you can drill" }, - "id": "617d6d5d64441fb952fbcb5d" + "id": "603eaee9e7b9d260112a0809" } ], "/webrick/6c327009-b269-49c1-b3a3-392f86ece8fe": [ @@ -87904,7 +87817,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -87945,7 +87858,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -87987,7 +87900,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -88029,7 +87942,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -88071,7 +87984,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -88109,7 +88022,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 203.00644, + "_score": 189.37022, "_version": null, "_explanation": null, "sort": null, @@ -88165,7 +88078,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 177.8711, + "_score": 165.793, "_version": null, "_explanation": null, "sort": null, @@ -88229,7 +88142,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 146.40012, + "_score": 136.64267, "_version": null, "_explanation": null, "sort": null, @@ -88265,7 +88178,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 133.54108, + "_score": 126.08999, "_version": null, "_explanation": null, "sort": null, @@ -88306,7 +88219,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 114.887024, + "_score": 114.99555, "_version": null, "_explanation": null, "sort": null, @@ -88351,7 +88264,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 49.549965, + "_score": 48.569668, "_version": null, "_explanation": null, "sort": null, @@ -88383,7 +88296,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 40.134586, + "_score": 40.040745, "_version": null, "_explanation": null, "sort": null, @@ -88428,7 +88341,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 33.12008, + "_score": 32.837097, "_version": null, "_explanation": null, "sort": null, @@ -88466,7 +88379,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 319.6813, + "_score": 296.8805, "_version": null, "_explanation": null, "sort": null, @@ -88571,7 +88484,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 314.45465, + "_score": 292.5229, "_version": null, "_explanation": null, "sort": null, @@ -88600,7 +88513,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 284.9087, + "_score": 269.35345, "_version": null, "_explanation": null, "sort": null, @@ -88645,7 +88558,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 269.35065, + "_score": 254.21501, "_version": null, "_explanation": null, "sort": null, @@ -88690,7 +88603,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 262.73715, + "_score": 247.03351, "_version": null, "_explanation": null, "sort": null, @@ -88727,7 +88640,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 194.57608, + "_score": 193.384, "_version": null, "_explanation": null, "sort": null, @@ -88766,7 +88679,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 170.38239, + "_score": 158.75671, "_version": null, "_explanation": null, "sort": null, @@ -88833,7 +88746,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 143.9213, + "_score": 135.72348, "_version": null, "_explanation": null, "sort": null, @@ -88877,7 +88790,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 140.00446, + "_score": 132.38713, "_version": null, "_explanation": null, "sort": null, @@ -88919,7 +88832,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 132.92007, + "_score": 125.41164, "_version": null, "_explanation": null, "sort": null, @@ -88964,7 +88877,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.33618, + "_score": 81.56581, "_version": null, "_explanation": null, "sort": null, @@ -89006,7 +88919,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.276306, + "_score": 81.50674, "_version": null, "_explanation": null, "sort": null, @@ -89048,7 +88961,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -89090,7 +89003,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -89132,7 +89045,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 81.27611, + "_score": 81.50654, "_version": null, "_explanation": null, "sort": null, @@ -89171,7 +89084,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 194.57608, + "_score": 193.384, "_version": null, "_explanation": null, "sort": null, @@ -89210,7 +89123,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 192.19806, + "_score": 179.11475, "_version": null, "_explanation": null, "sort": null, @@ -89221,6 +89134,50 @@ }, "id": "617d72cf64441f775afbd935" }, + { + "sections": [ + "Azure App Service monitoring integration", + "Features", + "Activate integration", + "Configuration and polling", + "Find and use data", + "Metric data", + "WebApp Metrics", + "Service Plan Metrics", + "Inventory data", + "EOL NOTICE", + "azure/appservice/webapp/", + "azure/appservice/host-name", + "azure/appservice/service-plan" + ], + "title": "Azure App Service monitoring integration", + "type": "docs", + "tags": [ + "Azure integrations list", + "Microsoft Azure integrations", + "Integrations" + ], + "external_id": "8d631a91f45c63f6f311b9586c5a93b0e8964ee2", + "image": "", + "url": "https://docs.newrelic.com/docs/infrastructure/microsoft-azure-integrations/azure-integrations-list/azure-app-service-monitoring-integration/", + "published_at": "2022-02-14T12:03:53Z", + "updated_at": "2022-02-14T12:03:53Z", + "document_type": "page", + "popularity": 1, + "body": "New Relic infrastructure monitoring provides an integration for Microsoft Azure App Service that reports data from Azure Apps Service to the New Relic platform. This document explains how to activate this integration and describes the data that can be captured. Features An Azure App Service is a service for hosting and running web applications, REST APIs, and mobile back ends in a fully managed platform. New Relic's integration for Azure App Service reports metric data about your Web Apps, including: Number of requests received Number of HTTP responses and errors It also collects data about the service's status and configuration. Using New Relic, you can: View Azure Storage data in pre-built infrastructure dashboards. Run custom queries and visualize the data in New Relic One. Create alert conditions to notify you of changes in Storage data. Activate integration To enable the integration follow standard procedures to activate your Azure service in New Relic. Configuration and polling You can change the polling frequency and filter data using configuration options. New Relic query your Azure App Services according to a default polling interval, which varies depending on the integration. Polling frequency for the Azure App Services integration: Polling interval: 5 minutes Resolution: 1 data point per minute Find and use data To explore your integration data, go to one.newrelic.com > Infrastructure > Azure. The event types are: AzureAppServiceWebAppSample: Represents each of the Azure App Service Web Apps in the account. Inventory data has a provider value of AzureAppServiceWebApp. AzureAppServiceHostNameSample: Represents each of the domain names bound to a Web App that can be used for accessing it. Inventory data has a provider value of AzureAppServiceHostName. AzureAppServicePlan: Represents a set of compute resources for a web app to run on. Metric data To view metrics reported by the Azure App Service integration, query the AzureAppServiceWebAppSample or AzureAppServicePlanSample event types. Use the metadata associated with each metric to filter and facet the data being reported. For detailed metric information, see the Azure supported metrics documentation. WebApp Metrics Metric Description Metadata appConnections Average number of connections. instance averageMemoryWorkingSetBytes Average memory working set, in bytes. instance averageResponseTimeSeconds Average response time, in seconds. instance cpuTimeSeconds Total CPU time, in seconds. instance currentAssemblies Average number of current assemblies. instance handles Average number of resources handled. instance gen0Collections Total number of Gen 0 garbage collections. instance gen1Collections Total number of Gen 1 garbage collections. instance gen2Collections Total number of Gen 2 garbage collections. instance http101 Total number of HTTP responses 101. instance http2xx Total number of HTTP responses 200-299. instance http3xx Total number of HTTP responses 300-399. instance http401 Total number of HTTP responses 401. instance http403 Total number of HTTP responses 403. instance http404 Total number of HTTP responses 404. instance http406 Total number of HTTP responses 406. instance http4xx Total number of HTTP responses 400-499. instance http5xx Total number of HTTP server errors 500-599. instance ioOtherBytesPerSecond Total number of IO Other bytes per second. instance ioOtherOperationsPerSecond Total number of IO Other operations per second. instance ioReadBytesPerSecond Total number of IO Read bytes per second. instance ioReadOperationsPerSecond Total number of IO Other operations bytes per second. instance ioWriteBytesPerSecond Total number of IO Write bytes per second. instance ioWriteOperationsPerSecond Total number of IO Other operations bytes per second. instance memoryWorkingSetBytes Total bytes of memory working set. instance privateBytes Average number of private bytes. instance requests Total number of requests to the app. instance requestsInApplicationQueue Average number of requests in application queue. instance receivedBytes Total number of bytes received by the app. instance sentBytes Total number of bytes sent by the app. instance threads Average number of threads. instance totalAppDomains Total App domains. instance totalAppDomainsUnloaded Total App domains unloaded. instance Service Plan Metrics Metric Description Metadata bytesReceivedBytes Amount of data received by the application, in bytes. instance bytesSentBytes Amount of data received by the application, in bytes. instance cpuPercent CPU usage. instance diskQueueLength Length of the disk queue. instance httpQueueLength Length of the HTTP queue. instance memoryPercent Memory usage. instance Inventory data EOL NOTICE After March 2022, we're discontinuing support for several capabilities, including inventory data for cloud integrations. For more details, including how you can easily prepare for this transition, see our Explorers Hub post. The Azure App Service integration reports the following inventory data about your system's state and configuration. azure/appservice/webapp/ alwaysOn availabilityState clientAffinityEnabled clientCertEnabled containerSize defaultDocuments detailedErrorLoggingEnabled enabled hostNamesDisabled httpLoggingEnabled isPremiumApp linuxFxVersion loadBalancing logsDirectorySizeLimit managedPipelineMode netFrameworkVersion numberOfWorkers operatingSystem phpVersion platformArchitecture regionName remoteDebuggingEnabled resourceGroupName scmSiteAlsoStopped scmType state type usageState vnetName azure/appservice/host-name dnsRecordType hostNameType hostType webAppName name providerAccountId providerAccountName sslState virtualIp azure/appservice/service-plan capacity maxInstances numberOfWebApps OperatingSystem pricingTier regionName resourceGroupName type", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 147.29422, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "tags": "Microsoft Azure integrations", + "body": " operatingSystem phpVersion platformArchitecture regionName remoteDebuggingEnabled resourceGroupName scmSiteAlsoStopped scmType state type usageState vnetName azure/appservice/host-name dnsRecordType hostNameType hostType webAppName name providerAccountId providerAccountName sslState virtualIp azure/appservice/service-plan capacity maxInstances numberOfWebApps OperatingSystem pricingTier regionName resourceGroupName type" + }, + "id": "617da8f464441ff530fbee8f" + }, { "sections": [ "MySQL monitoring integration", @@ -89264,7 +89221,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 156.03725, + "_score": 147.12564, "_version": null, "_explanation": null, "sort": null, @@ -89274,50 +89231,6 @@ }, "id": "61ac554e28ccbc3ccdc24744" }, - { - "sections": [ - "Azure App Service monitoring integration", - "Features", - "Activate integration", - "Configuration and polling", - "Find and use data", - "Metric data", - "WebApp Metrics", - "Service Plan Metrics", - "Inventory data", - "EOL NOTICE", - "azure/appservice/webapp/", - "azure/appservice/host-name", - "azure/appservice/service-plan" - ], - "title": "Azure App Service monitoring integration", - "type": "docs", - "tags": [ - "Azure integrations list", - "Microsoft Azure integrations", - "Integrations" - ], - "external_id": "8d631a91f45c63f6f311b9586c5a93b0e8964ee2", - "image": "", - "url": "https://docs.newrelic.com/docs/infrastructure/microsoft-azure-integrations/azure-integrations-list/azure-app-service-monitoring-integration/", - "published_at": "2022-02-14T12:03:53Z", - "updated_at": "2022-02-14T12:03:53Z", - "document_type": "page", - "popularity": 1, - "body": "New Relic infrastructure monitoring provides an integration for Microsoft Azure App Service that reports data from Azure Apps Service to the New Relic platform. This document explains how to activate this integration and describes the data that can be captured. Features An Azure App Service is a service for hosting and running web applications, REST APIs, and mobile back ends in a fully managed platform. New Relic's integration for Azure App Service reports metric data about your Web Apps, including: Number of requests received Number of HTTP responses and errors It also collects data about the service's status and configuration. Using New Relic, you can: View Azure Storage data in pre-built infrastructure dashboards. Run custom queries and visualize the data in New Relic One. Create alert conditions to notify you of changes in Storage data. Activate integration To enable the integration follow standard procedures to activate your Azure service in New Relic. Configuration and polling You can change the polling frequency and filter data using configuration options. New Relic query your Azure App Services according to a default polling interval, which varies depending on the integration. Polling frequency for the Azure App Services integration: Polling interval: 5 minutes Resolution: 1 data point per minute Find and use data To explore your integration data, go to one.newrelic.com > Infrastructure > Azure. The event types are: AzureAppServiceWebAppSample: Represents each of the Azure App Service Web Apps in the account. Inventory data has a provider value of AzureAppServiceWebApp. AzureAppServiceHostNameSample: Represents each of the domain names bound to a Web App that can be used for accessing it. Inventory data has a provider value of AzureAppServiceHostName. AzureAppServicePlan: Represents a set of compute resources for a web app to run on. Metric data To view metrics reported by the Azure App Service integration, query the AzureAppServiceWebAppSample or AzureAppServicePlanSample event types. Use the metadata associated with each metric to filter and facet the data being reported. For detailed metric information, see the Azure supported metrics documentation. WebApp Metrics Metric Description Metadata appConnections Average number of connections. instance averageMemoryWorkingSetBytes Average memory working set, in bytes. instance averageResponseTimeSeconds Average response time, in seconds. instance cpuTimeSeconds Total CPU time, in seconds. instance currentAssemblies Average number of current assemblies. instance handles Average number of resources handled. instance gen0Collections Total number of Gen 0 garbage collections. instance gen1Collections Total number of Gen 1 garbage collections. instance gen2Collections Total number of Gen 2 garbage collections. instance http101 Total number of HTTP responses 101. instance http2xx Total number of HTTP responses 200-299. instance http3xx Total number of HTTP responses 300-399. instance http401 Total number of HTTP responses 401. instance http403 Total number of HTTP responses 403. instance http404 Total number of HTTP responses 404. instance http406 Total number of HTTP responses 406. instance http4xx Total number of HTTP responses 400-499. instance http5xx Total number of HTTP server errors 500-599. instance ioOtherBytesPerSecond Total number of IO Other bytes per second. instance ioOtherOperationsPerSecond Total number of IO Other operations per second. instance ioReadBytesPerSecond Total number of IO Read bytes per second. instance ioReadOperationsPerSecond Total number of IO Other operations bytes per second. instance ioWriteBytesPerSecond Total number of IO Write bytes per second. instance ioWriteOperationsPerSecond Total number of IO Other operations bytes per second. instance memoryWorkingSetBytes Total bytes of memory working set. instance privateBytes Average number of private bytes. instance requests Total number of requests to the app. instance requestsInApplicationQueue Average number of requests in application queue. instance receivedBytes Total number of bytes received by the app. instance sentBytes Total number of bytes sent by the app. instance threads Average number of threads. instance totalAppDomains Total App domains. instance totalAppDomainsUnloaded Total App domains unloaded. instance Service Plan Metrics Metric Description Metadata bytesReceivedBytes Amount of data received by the application, in bytes. instance bytesSentBytes Amount of data received by the application, in bytes. instance cpuPercent CPU usage. instance diskQueueLength Length of the disk queue. instance httpQueueLength Length of the HTTP queue. instance memoryPercent Memory usage. instance Inventory data EOL NOTICE After March 2022, we're discontinuing support for several capabilities, including inventory data for cloud integrations. For more details, including how you can easily prepare for this transition, see our Explorers Hub post. The Azure App Service integration reports the following inventory data about your system's state and configuration. azure/appservice/webapp/ alwaysOn availabilityState clientAffinityEnabled clientCertEnabled containerSize defaultDocuments detailedErrorLoggingEnabled enabled hostNamesDisabled httpLoggingEnabled isPremiumApp linuxFxVersion loadBalancing logsDirectorySizeLimit managedPipelineMode netFrameworkVersion numberOfWorkers operatingSystem phpVersion platformArchitecture regionName remoteDebuggingEnabled resourceGroupName scmSiteAlsoStopped scmType state type usageState vnetName azure/appservice/host-name dnsRecordType hostNameType hostType webAppName name providerAccountId providerAccountName sslState virtualIp azure/appservice/service-plan capacity maxInstances numberOfWebApps OperatingSystem pricingTier regionName resourceGroupName type", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 155.74414, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "tags": "Microsoft Azure integrations", - "body": " operatingSystem phpVersion platformArchitecture regionName remoteDebuggingEnabled resourceGroupName scmSiteAlsoStopped scmType state type usageState vnetName azure/appservice/host-name dnsRecordType hostNameType hostType webAppName name providerAccountId providerAccountName sslState virtualIp azure/appservice/service-plan capacity maxInstances numberOfWebApps OperatingSystem pricingTier regionName resourceGroupName type" - }, - "id": "617da8f464441ff530fbee8f" - }, { "sections": [ "Infrastructure agent overhead", @@ -89346,7 +89259,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 139.74098, + "_score": 139.43498, "_version": null, "_explanation": null, "sort": null, @@ -89406,7 +89319,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 118.18478, + "_score": 117.60916, "_version": null, "_explanation": null, "sort": null, @@ -89455,7 +89368,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 103.4545, + "_score": 103.04234, "_version": null, "_explanation": null, "sort": null, @@ -89492,7 +89405,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 84.45323, + "_score": 78.32762, "_version": null, "_explanation": null, "sort": null, @@ -89538,7 +89451,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 84.22528, + "_score": 78.15056, "_version": null, "_explanation": null, "sort": null, @@ -89579,7 +89492,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 84.22144, + "_score": 78.14758, "_version": null, "_explanation": null, "sort": null, @@ -89625,7 +89538,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -89666,7 +89579,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -89708,7 +89621,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -89750,7 +89663,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -89792,7 +89705,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 83.88998, + "_score": 84.077385, "_version": null, "_explanation": null, "sort": null, @@ -89839,7 +89752,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 88.65376, + "_score": 84.26276, "_version": null, "_explanation": null, "sort": null, @@ -89890,7 +89803,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 88.65103, + "_score": 84.26053, "_version": null, "_explanation": null, "sort": null, @@ -89948,7 +89861,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 72.542694, + "_score": 71.479034, "_version": null, "_explanation": null, "sort": null, @@ -90002,7 +89915,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 72.055984, + "_score": 68.43709, "_version": null, "_explanation": null, "sort": null, @@ -90043,7 +89956,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 71.78946, + "_score": 68.21969, "_version": null, "_explanation": null, "sort": null, @@ -90094,7 +90007,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 119.52528, + "_score": 113.57048, "_version": null, "_explanation": null, "sort": null, @@ -90106,6 +90019,43 @@ }, "id": "61b9389528ccbcb4d396ee5e" }, + { + "sections": [ + "WordPress specific functionality", + "Configuration", + "Metrics", + "Integration with New Relic partners" + ], + "title": "WordPress specific functionality", + "type": "docs", + "tags": [ + "Frameworks and libraries", + "PHP agent", + "Agents" + ], + "external_id": "89c1d591a4f86ffbf1f7e5400d09d555129691f2", + "image": "", + "url": "https://docs.newrelic.com/docs/apm/agents/php-agent/frameworks-libraries/wordpress-specific-functionality/", + "published_at": "2022-02-14T15:05:08Z", + "updated_at": "2021-12-10T01:03:55Z", + "document_type": "page", + "popularity": 1, + "body": "If you install New Relic for WordPress websites, the PHP agent receives additional metrics. A WordPress page appears in the New Relic user interface: Go to one.newrelic.com > APM > (select a WordPress app). Configuration You can control which WordPress-specific metrics your app sends to New Relic by using the PHP agent's ini setting newrelic.framework.wordpress.hooks. This is enabled by default in versions 5.3 or higher, and it can be disabled with: newrelic.framework.wordpress.hooks = false Copy Although this setting uses the word .hooks, it controls all WordPress metrics sent to New Relic, including hooks, plugins, and themes. Metrics The PHP agent receives metrics for the following: Hooks: These metrics indicate time spent within each WordPress hook. Time is calculated from the hook dispatch functions apply_filters, apply_filters_ref_array, do_action, and do_action_ref_array. Plugins and themes: These metrics indicate time spent within each WordPress plugin and theme. Time is calculated from the dispatch functions for hooks. Integration with New Relic partners By integrating your WordPress application with APM, you can view performance directly from your WordPress app's Administration page. For example, W3 Total Cache is one of New Relic's partners that specializes in WordPress apps.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 91.88058, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "title": "WordPress specific functionality", + "sections": "WordPress specific functionality", + "tags": "PHP agent", + "body": "If you install New Relic for WordPress websites, the PHP agent receives additional metrics. A WordPress page appears in the New Relic user interface: Go to one.newrelic.com > APM > (select a WordPress app). Configuration You can control which WordPress-specific metrics your app sends to New Relic" + }, + "id": "617e9cf4e7b9d25f7dc05b91" + }, { "sections": [ "Advanced configuration for network performance monitoring", @@ -90148,7 +90098,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 95.77371, + "_score": 90.99011, "_version": null, "_explanation": null, "sort": null, @@ -90160,43 +90110,6 @@ }, "id": "61b9389664441f8fc3d7182e" }, - { - "sections": [ - "WordPress specific functionality", - "Configuration", - "Metrics", - "Integration with New Relic partners" - ], - "title": "WordPress specific functionality", - "type": "docs", - "tags": [ - "Frameworks and libraries", - "PHP agent", - "Agents" - ], - "external_id": "89c1d591a4f86ffbf1f7e5400d09d555129691f2", - "image": "", - "url": "https://docs.newrelic.com/docs/apm/agents/php-agent/frameworks-libraries/wordpress-specific-functionality/", - "published_at": "2022-02-14T15:05:08Z", - "updated_at": "2021-12-10T01:03:55Z", - "document_type": "page", - "popularity": 1, - "body": "If you install New Relic for WordPress websites, the PHP agent receives additional metrics. A WordPress page appears in the New Relic user interface: Go to one.newrelic.com > APM > (select a WordPress app). Configuration You can control which WordPress-specific metrics your app sends to New Relic by using the PHP agent's ini setting newrelic.framework.wordpress.hooks. This is enabled by default in versions 5.3 or higher, and it can be disabled with: newrelic.framework.wordpress.hooks = false Copy Although this setting uses the word .hooks, it controls all WordPress metrics sent to New Relic, including hooks, plugins, and themes. Metrics The PHP agent receives metrics for the following: Hooks: These metrics indicate time spent within each WordPress hook. Time is calculated from the hook dispatch functions apply_filters, apply_filters_ref_array, do_action, and do_action_ref_array. Plugins and themes: These metrics indicate time spent within each WordPress plugin and theme. Time is calculated from the dispatch functions for hooks. Integration with New Relic partners By integrating your WordPress application with APM, you can view performance directly from your WordPress app's Administration page. For example, W3 Total Cache is one of New Relic's partners that specializes in WordPress apps.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 91.94968, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "title": "WordPress specific functionality", - "sections": "WordPress specific functionality", - "tags": "PHP agent", - "body": "If you install New Relic for WordPress websites, the PHP agent receives additional metrics. A WordPress page appears in the New Relic user interface: Go to one.newrelic.com > APM > (select a WordPress app). Configuration You can control which WordPress-specific metrics your app sends to New Relic" - }, - "id": "617e9cf4e7b9d25f7dc05b91" - }, { "sections": [ "Browser monitoring and the PHP agent", @@ -90227,7 +90140,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 85.1476, + "_score": 85.176956, "_version": null, "_explanation": null, "sort": null, @@ -90264,7 +90177,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 82.59464, + "_score": 82.62113, "_version": null, "_explanation": null, "sort": null, @@ -90311,7 +90224,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 49.545578, + "_score": 48.534103, "_version": null, "_explanation": null, "sort": null, @@ -90343,7 +90256,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 40.142944, + "_score": 40.02409, "_version": null, "_explanation": null, "sort": null, @@ -90388,7 +90301,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 33.13157, + "_score": 32.82866, "_version": null, "_explanation": null, "sort": null, @@ -90422,7 +90335,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 168.36308, + "_score": 166.79596, "_version": null, "_explanation": null, "sort": null, @@ -90462,7 +90375,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 103.012985, + "_score": 95.44455, "_version": null, "_explanation": null, "sort": null, @@ -90498,7 +90411,7 @@ "external_id": "509f5fb8490b652df4c6f31ec06b403c3393530e", "image": "", "url": "https://docs.newrelic.com/docs/infrastructure/amazon-integrations/aws-integrations-list/aws-elb-classic-monitoring-integration/", - "published_at": "2022-02-14T11:39:18Z", + "published_at": "2022-02-16T01:42:02Z", "updated_at": "2022-02-14T11:39:18Z", "document_type": "page", "popularity": 1, @@ -90506,7 +90419,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 101.05328, + "_score": 94.986206, "_version": null, "_explanation": null, "sort": null, @@ -90552,7 +90465,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 100.90199, + "_score": 93.97276, "_version": null, "_explanation": null, "sort": null, @@ -90601,7 +90514,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 97.67578, + "_score": 91.84445, "_version": null, "_explanation": null, "sort": null, @@ -90655,7 +90568,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 285.10104, + "_score": 264.86844, "_version": null, "_explanation": null, "sort": null, @@ -90698,7 +90611,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 240.85832, + "_score": 235.96222, "_version": null, "_explanation": null, "sort": null, @@ -90712,51 +90625,101 @@ }, { "sections": [ - "Link your applications to Kubernetes", + "New Relic Metrics Adapter", + "BETA FEATURE", + "Requirements", + "Installation", "Tip", - "Compatibility and requirements", - "Kubernetes requirements", - "Network requirements", - "APM agent compatibility", - "Openshift requirements", - "Important", - "Configure the injection of metadata", - "Default configuration", - "Custom configuration", - "Manage custom certificates", - "Validate the injection of metadata", - "Disable the injection of metadata", - "Troubleshooting" + "Configuration", + "How it works", + "Caution", + "Troubleshooting", + "Get verbose logs", + "Get raw metrics", + "Metrics not working" ], - "title": "Link your applications to Kubernetes", + "title": "New Relic Metrics Adapter", "type": "docs", "tags": [ "Link apps and services", "Kubernetes integration", "Integrations" ], - "external_id": "0fe0951312aaf683f6614d5956f8c402b9693780", + "external_id": "51fdc0c8df2fdc91fcc51556e323c62e7c12d48a", "image": "", - "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/link-your-applications/link-your-applications-kubernetes/", - "published_at": "2022-02-06T01:24:10Z", - "updated_at": "2022-02-06T01:24:10Z", + "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/advanced-configuration/newrelic-metrics-adapter/", + "published_at": "2022-02-15T19:18:45Z", + "updated_at": "2022-02-04T12:17:01Z", "document_type": "page", "popularity": 1, - "body": "You can surface Kubernetes metadata and link it to your APM agents as distributed traces to explore performance issues and troubleshoot transaction errors. For more information, see this New Relic blog post. You can quickly start monitoring Kubernetes clusters using our Auto-telemetry with Pixie integration, which doesn't require a language agent. Learn more about Auto-telemetry with Pixie. The metadata injection product uses a MutatingAdmissionWebhook to add the following environment variables to pods: NEW_RELIC_METADATA_KUBERNETES_CLUSTER_NAME NEW_RELIC_METADATA_KUBERNETES_NODE_NAME NEW_RELIC_METADATA_KUBERNETES_NAMESPACE_NAME NEW_RELIC_METADATA_KUBERNETES_DEPLOYMENT_NAME NEW_RELIC_METADATA_KUBERNETES_POD_NAME NEW_RELIC_METADATA_KUBERNETES_CONTAINER_NAME NEW_RELIC_METADATA_KUBERNETES_CONTAINER_IMAGE_NAME Copy Tip Our Kubernetes metadata injection project is open source. Here's the code to link APM and infrastructure data and the code to automatically manage certificates. Compatibility and requirements Before linking Kubernetes metadata to your APM agents, make sure you meet the following requirements: Kubernetes requirements Network requirements APM agent compatibility OpenShift requirements Kubernetes requirements To link your applications and Kubernetes, your cluster must have the MutatingAdmissionWebhook controller enabled, which requires Kubernetes 1.9 or higher. To verify that your cluster is compatible, run the following command: kubectl api-versions | grep admissionregistration.k8s.io/v1beta1 admissionregistration.k8s.io/v1beta1 Copy If you see a different result, follow the Kubernetes documentation to enable admission control in your cluster. Network requirements For Kubernetes to speak to our MutatingAdmissionWebhook, the master node (or the API server container, depending on how the cluster is set up) should be allowed egress for HTTPS traffic on port 443 to pods in all of the other nodes in the cluster. This might require specific configuration depending on how the infrastructure is set up (on-premises, AWS, Google Cloud, etc). Tip Until Kubernetes v1.14, users were only allowed to register admission webhooks on port 443. Since v1.15 it's possible to register them on different ports. To ensure backward compatibility, the webhook is registered by default on port 443 in the YAML config file we distribute. APM agent compatibility The following New Relic agents collect Kubernetes metadata: Go 2.3.0 or higher Java 4.10.0 or higher Node.js 5.3.0 or higher Python 4.14.0 or higher Ruby 6.1.0 or higher .NET 8.17.438 or higher Openshift requirements To link Openshift and Kubernetes you must enable mutating admission webhooks, which requires Openshift 3.9 or higher. During the process, install a resource that requires admin permissions to the cluster. Run this to log in as admin: oc login -u system:admin Copy Check that webhooks are correctly configured. If they are not, update the master-config.yaml file. admissionConfig: pluginConfig: MutatingAdmissionWebhook: configuration: apiVersion: apiserver.config.k8s.io/v1alpha1 kubeConfigFile: /dev/null kind: WebhookAdmission ValidatingAdmissionWebhook: configuration: apiVersion: apiserver.config.k8s.io/v1alpha1 kubeConfigFile: /dev/null kind: WebhookAdmission location: \"\" Copy Important Add kubeConfigFile: /dev/null to address some issues in Openshift. Enable certificate signing by editing the YAML file and updating your configuration: kubernetesMasterConfig: controllerArguments: cluster-signing-cert-file: - \"/etc/origin/master/ca.crt\" cluster-signing-key-file: - \"/etc/origin/master/ca.key\" Copy Restart the Openshift services in the master node. Configure the injection of metadata By default, all the pods you create that include APM agents have the correct environment variables set and the metadata injection applies to the entire cluster. To check that the environment variables have been set, any container that is running must be stopped, and a new instance started (see Validate the injection of metadata). This default configuration also uses the Kubernetes certificates API to automatically manage the certificates required for the injection. If needed, you can limit the injection of metadata to specific namespaces in your cluster or self-manage your certificates. Default configuration We offer instructions for deploying our integration using Helm. Just be sure that, when you are configuring the chart, the webhook that inject the metadata is enabled. Notice that we are specifying --dry-run and --debug, so nothing will be installed in this step: helm upgrade --install newrelic newrelic/nri-bundle \\ --dry-run \\ --debug \\ --namespace newrelic --create-namespace \\ --set global.licenseKey=YOUR_NEW_RELIC_LICENSE_KEY \\ --set global.cluster=K8S_CLUSTER_NAME \\ --set ksm.enabled=true \\ --set newrelic-infrastructure.privileged=true \\ --set infrastructure.enabled=true \\ --set prometheus.enabled=true \\ --set webhook.enabled= true \\ --set kubeEvents.enabled=true \\ --set logging.enabled=true Copy Custom configuration You can limit the injection of metadata only to specific namespaces by using labels. To enable this feature, edit nri-bundle Helm values.yaml file: nri-metadata-injection: injectOnlyLabeledNamespaces: true Copy Or add a --set when installing or upgrading your Helm release: helm upgrade --install newrelic newrelic/nri-bundle \\ --dry-run \\ --debug \\ --namespace newrelic --create-namespace \\ --set global.licenseKey=YOUR_NEW_RELIC_LICENSE_KEY \\ --set global.cluster=K8S_CLUSTER_NAME \\ --set ksm.enabled=true \\ --set newrelic-infrastructure.privileged=true \\ --set infrastructure.enabled=true \\ --set prometheus.enabled=true \\ --set webhook.enabled= true \\ --set nri-metadata-injection.injectOnlyLabeledNamespaces=true \\ --set kubeEvents.enabled=true \\ --set logging.enabled=true Copy With this option, injection is only applied to those namespaces that have the newrelic-metadata-injection label set to enabled: kubectl label namespace YOUR_NAMESPACE newrelic-metadata-injection=enabled Copy Manage custom certificates To use custom certificates you need to disable the automatic installation of certificates when you are installing using Helm. To disable the installation for certificates just modify nri-bundle Helm values.yaml like this: nri-metadata-injection: customTLSCertificate: true Copy Or add a --set when installing or upgrading your Helm release: helm upgrade --install newrelic newrelic/nri-bundle \\ --dry-run \\ --debug \\ --namespace newrelic --create-namespace \\ --set global.licenseKey=YOUR_NEW_RELIC_LICENSE_KEY \\ --set global.cluster=K8S_CLUSTER_NAME \\ --set ksm.enabled=true \\ --set newrelic-infrastructure.privileged=true \\ --set infrastructure.enabled=true \\ --set prometheus.enabled=true \\ --set webhook.enabled= true \\ --set nri-metadata-injection.customTLSCertificate=true \\ --set kubeEvents.enabled=true \\ --set logging.enabled=true Copy Now you can proceed with the custom certificate management option. You need your certificate, server key, and Certification Authority (CA) bundle encoded in PEM format. If you have them in the standard certificate format (X.509), install openssl, and run the following: openssl x509 -in CERTIFICATE_FILENAME -outform PEM -out CERTIFICATE_FILENAME.pem openssl x509 -in SERVER_KEY_FILENAME -outform PEM -out SERVER_KEY_FILENAME.pem openssl x509 -in CA_BUNDLE_FILENAME -outform PEM -out BUNDLE_FILENAME.pem Copy If your certificate/key pair are in another format, see the Digicert knowledgebase for more help. Create the TLS secret with the signed certificate/key pair, and patch the mutating webhook configuration with the CA using the following commands: kubectl create secret tls newrelic-metadata-injection-admission \\ --key=PEM_ENCODED_SERVER_KEY \\ --cert=PEM_ENCODED_CERTIFICATE \\ --dry-run -o yaml | kubectl -n newrelic apply -f - caBundle=$(cat PEM_ENCODED_CA_BUNDLE | base64 | td -d $'\\n') kubectl patch mutatingwebhookconfiguration newrelic-metadata-injection-cfg --type='json' -p \"[{'op': 'replace', 'path': '/webhooks/0/clientConfig/caBundle', 'value':'${caBundle}'}]\" Copy Important Certificates signed by Kubernetes have an expiration of one year. For more information, see the Kubernetes source code in GitHub. Validate the injection of metadata In order to validate that the webhook (responsible for injecting the metadata) was installed correctly, deploy a new pod and check for the New Relic environment variables. Create a dummy pod containing Busybox by running: kubectl create -f https://git.io/vPieo Copy Check if New Relic environment variables were injected: kubectl exec busybox0 -- env | grep NEW_RELIC_METADATA_KUBERNETES NEW_RELIC_METADATA_KUBERNETES_CLUSTER_NAME=fsi NEW_RELIC_METADATA_KUBERNETES_NODE_NAME=nodea NEW_RELIC_METADATA_KUBERNETES_NAMESPACE_NAME=default NEW_RELIC_METADATA_KUBERNETES_POD_NAME=busybox0 NEW_RELIC_METADATA_KUBERNETES_CONTAINER_NAME=busybox Copy Disable the injection of metadata To disable/uninstall the injection of metadata, use the following commands: Delete the Kubernetes objects using the yaml file: kubectl delete -f k8s-metadata-injection-latest.yaml Copy Delete the TLS secret containing the certificate/key pair: kubectl delete secret/newrelic-metadata-injection-secret Copy Troubleshooting Follow these troubleshooting tips as needed. No Kubernetes metadata in APM or distributed tracing transactions Problem The creation of the secret by the k8s-webhook-cert-manager job used to fail due to the kubectl version used by the image when running in Kubernetes version 1.19.x, The new version 1.3.2 fixes this issue, therefore it is enough to run again the job using an update version of the image to fix the issue. Solution Update the image k8s-webhook-cert-manager (to a version >= 1.3.2) and re-run the job. The secret will be correctly created and the k8s-metadata-injection pod will be able to start. Note that the new version of the manifest and of the nri-bundle are already updated with the correct version of the image. Problem In OpenShift version 4.x, the CA that is used in order to patch the mutatingwebhookconfiguration resource is not the one used when signing the certificates. This is a known issue currently tracked here. In the logs of the Pod nri-metadata-injection, you'll see the following error message: TLS handshake error from 10.131.0.29:37428: remote error: tls: unknown certificate authority TLS handshake error from 10.129.0.1:49314: remote error: tls: bad certificate Copy Workaround Manually update the certificate stored in the mutatingwebhookconfiguration object. The correct CA locations might change according to the cluster configuration. However, you can usually find the CA in the secret csr-signer in the namespace openshift-kube-controller-manager. Problem There is no Kubernetes metadata included in the transactions' attributes of your APM agent or in distributed tracing. Solution Verify that the environment variables are being correctly injected by following the instructions described in the Validate your installation step. If they are not present, get the name of the metadata injection pod by running: kubectl get pods | grep newrelic-metadata-injection-deployment kubectl logs -f pod/podname Copy In another terminal, create a new pod (for example, see Validate your installation), and inspect the logs of the metadata injection deployment for errors. For every created pod there should be a set of 4 new entries in the logs like: {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.107Z\",\"caller\":\"server/main.go:139\",\"msg\":\"POST https://newrelic-metadata-injection-svc.default.svc:443/mutate?timeout=30s HTTP/2.0\\\" from 10.11.49.2:32836\"} {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.110Z\",\"caller\":\"server/webhook.go:168\",\"msg\":\"received admission review\",\"kind\":\"/v1, Kind=Pod\",\"namespace\":\"default\",\"name\":\"\",\"pod\":\"busybox1\",\"UID\":\"6577519b-7a61-11ea-965e-0e46d1c9335c\",\"operation\":\"CREATE\",\"userinfo\":{\"username\":\"admin\",\"uid\":\"admin\",\"groups\":[\"system:masters\",\"system:authenticated\"]}} {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.111Z\",\"caller\":\"server/webhook.go:182\",\"msg\":\"admission response created\",\"response\":\"[{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env\\\",\\\"value\\\":[{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_CLUSTER_NAME\\\",\\\"value\\\":\\\"adn_kops\\\"}]},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_NODE_NAME\\\",\\\"valueFrom\\\":{\\\"fieldRef\\\":{\\\"fieldPath\\\":\\\"spec.nodeName\\\"}}}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_NAMESPACE_NAME\\\",\\\"valueFrom\\\":{\\\"fieldRef\\\":{\\\"fieldPath\\\":\\\"metadata.namespace\\\"}}}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_POD_NAME\\\",\\\"valueFrom\\\":{\\\"fieldRef\\\":{\\\"fieldPath\\\":\\\"metadata.name\\\"}}}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_CONTAINER_NAME\\\",\\\"value\\\":\\\"busybox\\\"}},{\\\"op\\\":\\\"add\\\",\\\"path\\\":\\\"/spec/containers/0/env/-\\\",\\\"value\\\":{\\\"name\\\":\\\"NEW_RELIC_METADATA_KUBERNETES_CONTAINER_IMAGE_NAME\\\",\\\"value\\\":\\\"busybox\\\"}}]\"} {\"level\":\"info\",\"ts\":\"2020-04-09T12:55:32.111Z\",\"caller\":\"server/webhook.go:257\",\"msg\":\"writing response\"} Copy If there are no new entries on the logs, it means that the apiserver is not being able to communicate with the webhook service, this could be due to networking rules or security groups rejecting the communication. To check if the apiserver is not being able to communicate with the webhook you should inspect the apiserver logs for errors like: failed calling webhook \"metadata-injection.newrelic.com\": ERROR_REASON Copy To get the apiserver logs: Start a proxy to the Kubernetes API server by the executing the following command in a terminal window and keep it running. kubectl proxy --port=8001 Copy Create a new pod in your cluster, this will make the apiserver try to communicate with the webhook. The following command will create a busybox. kubectl create -f https://git.io/vPieo Copy Retrieve the apiserver logs. curl localhost:8001/logs/kube-apiserver.log > apiserver.log Copy Delete the busybox container. kubectl delete -f https://git.io/vPieo Copy Inspect the logs for errors. grep -E 'failed calling webhook' apiserver.log Copy Remember that one of the requirements for the metadata injection is that the apiserver must be allowed egress to the pods running on the cluster. If you encounter errors regarding connection timeouts or failed connections, make sure to check the security groups and firewall rules of the cluster. If there are no log entries in either the apiserver logs or the metadata injection deployment, it means that the webhook was not properly registered. Ensure the metadata injection setup job ran successfully by inspecting the output of: kubectl get job newrelic-metadata-setup Copy If the job is not completed, investigate the logs of the setup job: kubectl logs job/newrelic-metadata-setup Copy Ensure the CertificateSigningRequest is approved and issued by running: kubectl get csr newrelic-metadata-injection-svc.default Copy Ensure the TLS secret is present by running: kubectl get secret newrelic-metadata-injection-secret Copy Ensure the CA bundle is present in the mutating webhook configuration: kubectl get mutatingwebhookconfiguration newrelic-metadata-injection-cfg -o json Copy Ensure the TargetPort of the Service resource matches the Port of the Deployment's container: kubectl describe service/newrelic-metadata-injection-svc kubectl describe deployment/newrelic-metadata-injection-deployment Copy", + "body": "BETA FEATURE This feature is still in development, but we encourage you to try it out! You can use metrics from your New Relic account to autoscale applications and services in your Kubernetes cluster by deploying the New Relic Metrics Adapter. This adapter fetches the metric values from New Relic and makes them available for the Horizontal Pod Autoscalers. The newrelic-k8s-metrics-adapter implements the external.metrics.k8s.io API to support the use of external metrics based New Relic NRQL queries results. Once deployed, the value for each configured metric is fetched using the NerdGraph API based on the configured NRQL query. The metrics adapter exposes the metrics over a secured endpoint with TLS. New Relic metrics adapter in a cluster. Requirements Kubernetes 1.16 or higher. The New Relic Kubernetes integration. New Relic's user API key. No other External Metrics Adapter installed in the cluster. Installation To install the New Relic Metrics Adapter, we provide the newrelic-k8s-metrics-adapter Helm chart, which is also included in the nri-bundle chart used to deploy all New Relic Kubernetes components. If not already installed, install our Kubernetes integration. Upgrade the installation to include the New Relic Metrics Adapter with the following command: helm upgrade --install newrelic newrelic/nri-bundle \\ --namespace newrelic --create-namespace --reuse-values \\ --set metrics-adapter.enabled=true \\ --set newrelic-k8s-metrics-adapter.personalAPIKey=YOUR_NEW_RELIC_PERSONAL_API_KEY \\ --set newrelic-k8s-metrics-adapter.config.accountID=YOUR_NEW_RELIC_ACCOUNT_ID \\ --set newrelic-k8s-metrics-adapter.config.externalMetrics.external_metric_name.query=NRQL query Copy Please notice and adjust the following flags: metrics-adapter.enabled: Must be set to true so the metrics adapter chart is installed. newrelic-k8s-metrics-adapter.personalAPIKey: Must be set to valid New Relic Personal API key. newrelic-k8s-metrics-adapter.accountID: Must be set to valid New Relic account where metrics are going to be fetched from. newrelic-k8s-metrics-adapter.config.externalMetrics.external_metric_name.query: Adds a new external metric where: external_metric_name: The metric name. query: The base NRQL query that is used to get the value for the metric. Tip Alternatively, you can use a values.yaml file that can be passed to the helm command with the --values flag. Values files can contain all parameters needed to configure the metrics explained in the configuration section. Configuration You can configure multiple metrics in the metrics adapter and change some parameters to modify the behaviour of the metrics cache and filtering. To see the full list and descriptions of all parameters that can be modified, refer to the chart README.md and values.yaml files. How it works The following example is a Helm values file that enable the metrics adapter on the nri-bundle chart installation, and configures the nginx_average_requests metric: metrics-adapter: enabled: true newrelic-k8s-metrics-adapter: personalAPIKey: config: accountID: externalMetrics: nginx_average_requests: query: \"FROM Metric SELECT average(nginx.server.net.requestsPerSecond) SINCE 2 MINUTES AGO\" Copy Caution The default time span for metrics is 1h. Therefore, you should define queries with the SINCE clause to adjust the time span according to your environment and needs. There is an HPA consuming the external metric as follows: kind: HorizontalPodAutoscaler apiVersion: autoscaling/v2beta2 metadata: name: nginx-scaler spec: scaleTargetRef: apiVersion: apps/v1 kind: Deployment name: nginx minReplicas: 1 maxReplicas: 10 metrics: - type: External external: metric: name: nginx_average_requests selector: matchLabels: k8s.namespaceName: nginx target: type: Value value: 10000 Copy Based on the HPA definition, the controller manager fetches the metrics from the external metrics API which are served by the New Relic metrics adapter. The New Relic metrics adapter receives the query including the nginx_average_requests metric name and all the selectors, and searches for a matching metric name in the internal memory based on the configured metrics. Then, it adds the selectors to the query to form a final query that is executed using NerdGraph to fetch the value from New Relic. The above example will generate a query like the following: FROM Metric SELECT average(nginx.server.net.requestsPerSecond) WHERE clusterName= AND `k8s.namespaceName`='nginx' SINCE 2 MINUTES AGO Copy Notice that a clusterName filter has been automatically added to the query to exclude metrics from other clusters in the same account. You can remove it by using the removeClusterFilter configuration parameter. Also the value is cached for a period of time defined by the cacheTTLSeconds configuration parameter, whose default is 30 seconds. Troubleshooting Get verbose logs Most common errors are displayed in the standard (non-verbose) logs. If you're doing a more in-depth investigation on your own or with New Relic Support, you can enable verbose mode. To get verbose logging details for an integration using Helm: Enable verbose logging: bash Copy $ helm upgrade -n newrelic --reuse-values newrelic-bundle --set newrelic-k8s-metrics-adapter.verboseLog=true newrelic/nri-bundle Leave on verbose mode for a few minutes, or until enough activity has occurred. When you have the information you need, disable verbose logging: bash Copy $ helm upgrade --reuse-values newrelic-bundle --set newrelic-k8s-metrics-adapter.verboseLog=false newrelic/nri-bundle Caution Verbose mode increases significantly the amount of information sent to log files. Enable this mode temporarily, only for troubleshooting purposes, and reset the log level when finished. Get raw metrics Sometimes it's useful to get the list of available metrics and also to get the current value of an specific metric. To get the list of metrics available, run: bash Copy $ kubectl get --raw \"/apis/external.metrics.k8s.io/v1beta1/\" To get the value for a specific metric with a selector, run: bash Copy $ kubectl get --raw \"/apis/external.metrics.k8s.io/v1beta1/namespaces/*/__METRIC_NAME__?labelSelector=_SELECTOR_KEY_=_SELECTOR_VALUE_\" Tip You must replace , and with your values. Metrics not working There are some usual errors that could cause a metric fail to retrieve the value. These errors are showed in the status of the metrics when you describe the HPA or are printed when you get the raw metrics directly. executing query: NRQL Syntax Error: Error at line...: The query that is being run has syntax errors. The same error message gives you the executed query and position of the error. You can try this query inside the New Relic query builder and correct the configuration from the adapter. extracting return value: expected first value to be of type \"float64\", got %!q(): The query doesn't return any value. The same error message gives you the executed query so you can try this query inside the New Relic query builder and correct the configuration from the adapter or the match selectors in the HPA.", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 232.55981, + "_score": 223.34854, "_version": null, "_explanation": null, "sort": null, "highlight": { - "title": "Link your applications to Kubernetes", - "sections": "Link your applications to Kubernetes", "tags": "Kubernetes integration", - "body": " the following commands: Delete the Kubernetes objects using the yaml file: kubectl delete -f k8s-metadata-injection-latest.yaml Copy Delete the TLS secret containing the certificate/key pair: kubectl delete secret/newrelic-metadata-injection-secret Copy Troubleshooting Follow these troubleshooting tips" + "body": " in the cluster. Installation To install the New Relic Metrics Adapter, we provide the newrelic-k8s-metrics-adapter Helm chart, which is also included in the nri-bundle chart used to deploy all New Relic Kubernetes components. If not already installed, install our Kubernetes integration. Upgrade" }, - "id": "617daead28ccbc662b7ffe23" + "id": "61fd193d196a672daae826d6" + }, + { + "sections": [ + "Configure control plane monitoring", + "Tip", + "Features", + "Compatibility and requirements", + "Control plane component", + "Autodiscovery and default configuration", + "hostNetwork and privileged", + "Custom autodiscovery", + "mTLS", + "Static endpoints", + "Limitations", + "Important", + "Control plane monitoring for managed and cloud environments", + "Monitoring control plane with integration version 2", + "Control plane monitoring on Integration version 2", + "Discovery of master nodes and control plane components", + "Configuration", + "etcd", + "API server", + "OpenShift configuration", + "OpenShift configuration on Integration version 2", + "Set up mTLS for etcd in OpenShift", + "See your data" + ], + "title": "Configure control plane monitoring", + "type": "docs", + "tags": [ + "Installation", + "Kubernetes integration", + "Integrations" + ], + "external_id": "33b7b8ae3dab9a2ca553dcf8ea0c97499478a85a", + "image": "https://docs.newrelic.com/static/209f301630c770f87ea8cbb1cace8e6e/8c557/new-relic-one-k8s-cluster-explorer-control-plane-parameters.png", + "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/advanced-configuration/configure-control-plane-monitoring/", + "published_at": "2022-02-15T19:15:25Z", + "updated_at": "2022-02-04T12:15:37Z", + "document_type": "page", + "popularity": 1, + "body": "New Relic provides control plane support for your Kubernetes integration, allowing you to monitor and collect metrics from your cluster's control plane components. That data can then be found in New Relic and used to create queries and charts. Tip Unless otherwise specified, this page refers to the Kubernetes integration v3. Details on how to configure control plane monitoring for v2 can be found in a specific section below. Features We monitor and collect metrics from the following control plane components: etcd: leader information, resident memory size, number of OS threads, consensus proposals data, etc. For a list of supported metrics, see etcd data. API server: rate of apiserver requests, breakdown of apiserver requests by HTTP method and response code, etc. For the complete list of supported metrics, see API server data. Scheduler: requested CPU/memory vs available on the node, tolerations to taints, any set affinity or anti-affinity, etc. For the complete list of supported metrics, see Scheduler data. Controller manager: resident memory size, number of OS threads created, goroutines currently existing, etc. For the complete list of supported metrics, see Controller manager data. Compatibility and requirements Control plane monitoring support is limited for managed clusters. This is because most cloud providers do not expose the metrics endpoints for the control plane components, so New Relic cannot access them. When deploying the solution in unprivileged mode, control plane setup will require extra steps and some caveats might apply. OpenShift 4.x uses control plane component metric endpoints that are different than the default. Control plane component The task of monitoring the Kubernetes control plane is a responsibility of the nrk8s-controlplane component, which by default is deployed as a DaemonSet. This component is automatically deployed to master nodes, through the use of a default list of nodeSelectorTerms which includes labels commonly used to identify master nodes, such as node-role.kubernetes.io/control-plane or node-role.kubernetes.io/master. Regardless, this selector is exposed in the values.yml file and therefore can be reconfigured to fit other environments. Clusters that do not have any node matching these selectors will not get any pod scheduled, thus not wasting any resources and being functionally equivalent of disabling control plane monitoring altogether by setting controlPlane.enabled to false in the Helm Chart. Each component of the control plane has a dedicated section, which allows to individually: Enable or disable monitoring of that component Define specific selectors and namespaces for discovering that component Define the endpoints and paths that will be used to fetch metrics for that component Define the authentication mechanisms that need to be used to get metrics for that component Manually specify endpoints that skip autodiscovery completely Autodiscovery and default configuration By default, our Helm Chart ships a configuration that should work out of the box for some control plane components for on-premise distributions that run the control plane inside the cluster, such as Kubeadm or minikube. hostNetwork and privileged Most users and Kubernetes distributions configure the control plane metrics endpoints to listen only in the loopback interface, i.e. localhost. For this reason, the control plane component is deployed with hostNetwork: true by default when privileged is set to true (the default). When the integration is deployed using privileged: false, the hostNetwork setting for the control plane component will be also be set to false. We chose to do it this way because otherwise, we would not be honoring the intent users have when they set privileged: false. Unfortunately, deploying without hostNetwork will cause control plane scraping to fail in most environments, which will result in missing metrics or the nrk8s-controlplane pods getting stuck into a CrashLoopBackoff state. This is a limitation of Kubernetes itself, as control plane cannot be monitored without hostNetwork unless components are manually configured to do so. As it is a common setting to deploy the integration in unprivileged mode (privileged: false), but still consider acceptable running the control plane pods with hostNetwork. This can be achieved by setting controlPlane.unprivilegedHostNetwork to true: This will tell the chart to deploy the control plane component with hostNetwork: true, despite the value of the higher-level privileged flag. If running pods with hostNetwork is not acceptable whatsoever, due to cluster or other policies, control plane monitoring is not possible and should be disabled by setting controlPlane.enabled to false. Custom autodiscovery Selectors used for autodiscovery are completely exposed as configuration entries in the values.yaml file, which means they can be tweaked or replaced to fit almost any environment where the control plane is run as a part of the cluster. An autodiscovery section looks like the following: autodiscover: - selector: \"tier=control-plane,component=etcd\" namespace: kube-system # Set to true to consider only pods sharing the node with the scraper pod. # This should be set to `true` if Kind is Daemonset, `false` otherwise. matchNode: true # Try to reach etcd using the following endpoints. endpoints: - url: https://localhost:4001 insecureSkipVerify: true auth: type: bearer - url: http://localhost:2381 - selector: \"k8s-app=etcd-manager-main\" namespace: kube-system matchNode: true endpoints: - url: https://localhost:4001 insecureSkipVerify: true auth: type: bearer Copy The autodiscover section contains a list of autodiscovery entries. Each entry has: selector: A string-encoded label selector that will be used to look for pods. matchNode: If set to true, it will additionally limit discovery to pods running in the same node as the particular instance of the DaemonSet performing discovery. endpoints: A list of endpoints to try if a pod is found for the specified selector. Additionally, each endpoint has: url: URL to target, including scheme. Can be http or https. insecureSkipVerify: If set to true, certificate will not be checked for https URLs. auth.type: Which mechanism to use to authenticate the request. Currently, the following methods are supported: None: If auth is not specified, the request will not contain any authentication whatsoever. bearer: The same bearer token used to authenticate against the Kubernetes API will be sent to this request. mtls: mTLS will be used to perform the request. mTLS For the mtls type, the following needs to be specified: endpoints: - url: https://localhost:4001 auth: type: mtls mtls: secretName: secret-name secretNamespace: secret-namespace Copy Where secret-name is the name of a Kubernetes TLS Secret, which lives in secret-namespace, and contains the certificate, key, and CA required to connect to that particular endpoint. The integration fetches this secret in runtime rather than mounting it, which means it requires an RBAC role granting it access to it. Our Helm Chart automatically detects auth.mtls entries at render time and will automatically create entries for these particular secrets and namespaces for you, unless rbac.create is set to false. Our integration accepts a secret with the following keys: cert: The PEM-encoded certificate that will be presented to etcd key: The PEM-encoded private key corresponding to the certificate above These certificates should be signed by the same CA etcd is using to operate. How to generate these certificates is out of the scope of this documentation, as it will vary greatly between different Kubernetes distribution. Please refer to your distribution's documentation to see how to fetch the required etcd peer certificates. In Kubeadm, for example, they can be found in /etc/kubernetes/pki/etcd/peer.{crt,key} in the master node. Once you have located or generated the etcd peer certificates, you should rename the files to match the keys we expect to be present in the secret, and create the secret in the cluster bash Copy $ mv peer.crt cert $ mv peer.key key $ mv ca.crt cacert $ $ kubectl -n newrelic create secret generic newrelic-etcd-tls-secret --from-file=./cert --from-file=./key --from-file=./cacert Finally, you can input the secret name (newrelic-etcd-tls-secret) and namespace (newrelic) in the config snippet shown at the beginning of this section. Remember that the Helm Chart will automatically parse this config and create an RBAC role to grant access to this specific secret and namespace for the nrk8s-controlplane component, so there's no manual action needed in that regard. Static endpoints While autodiscovery should cover cases where the control plane lives inside the Kubernetes clusters, some distributions or sophisticated Kubernetes environments run the control plane elsewhere, for a variety of reasons including availability or resource isolation. For these cases, the integration can be configured to scrape an arbitrary, fixed URL regardless of whether a pod with a control plane label is found in the node. This is done by specifying a staticEndpoint entry. For example, one for an external etcd instance would look like this: controlPlane: etcd: staticEndpoint: url: https://url:port insecureSkipVerify: true auth: {} Copy staticEndpoint is the same type of entry as endpoints in the autodiscover entry, whose fields are described above. The authentication mechanisms and schemas are supported here. Please keep in mind that if staticEndpoint is set, the autodiscover section will be ignored in its entirety. Limitations Important If you are using staticEndpoint pointing to an out-of-node (i.e. not localhost) endpoint, you must change controlPlane.kind from DaemonSet to Deployment. When using staticEndpoint, all nrk8s-controlplane pods will attempt to reach and scrape said endpoint. This means that, if nrk8s-controlplane is a DaemonSet (the default), all instances of the DaemonSet will scrape this endpoint. While this is fine if you are pointing them to localhost, if the endpoint is not local to the node you could potentially produce to duplicate metrics and increased billable usage. If you are using staticEndpoint and pointing it to a non-local URL, make sure to change controlPlane.kind to Deployment. For the same reason above, it is currently not possible to use autodiscovery for some control plane components, and a static endpoint for others. This is a known limitation we are working to address in future versions of the integration. Lastly, staticEndpoint allows only to define a single endpoint per component. This means that if you have multiple control plane shards in different hosts, it is currently not possible to point to them separately. This is also a known limitation we are working to address in future versions. For the time being, a workaround could be to aggregate metrics for different shards elsewhere, and point the staticEndpoint URL to the aggregated output. Control plane monitoring for managed and cloud environments Some cloud environments, like EKS or GKE, allow retrieving metrics from the Kubernetes API Server. This can be easily configured as an static endpoint: controlPlane: affinity: nodeAffinity: false # https://github.com/helm/helm/issues/9136 kind: Deployment config: etcd: enabled: false scheduler: enabled: false controllerManager: enabled: false apiServer: staticEndpoint: url: \"https://kubernetes.default:443\" insecureSkipVerify: true auth: type: bearer Copy Please note that this only applies to the API Server and that etcd, the scheduler, and the controller manager remain inaccessible in cloud environments. Monitoring control plane with integration version 2 This section covers how to configure control plane monitoring on versions 2 and earlier of the integration. Please note that these versions had a less flexible autodiscovery options, and did not support external endpoints. We strongly recommend you to update to version 3 at your earliest convenience. See what's changed of the Kubernetes integration. Control plane monitoring on Integration version 2 Discovery of master nodes and control plane components The Kubernetes integration relies on the kubeadm labeling conventions to discover the master nodes and the control plane components. This means that master nodes should be labeled with node-role.kubernetes.io/master=\"\" or kubernetes.io/role=\"master\". The control plane components should have either the k8s-app or the tier and component labels. Refer to the following table for accepted label combinations and values: Component Label Endpoint API server Kubeadm / Kops / ClusterAPI k8s-app=kube-apiserver tier=control-plane component=kube-apiserver OpenShift app=openshift-kube-apiserver apiserver=true localhost:443/metrics by default (can be configured) if the request fails falls back to localhost:8080/metrics etcd Kubeadm / Kops / ClusterAPI k8s-app=etcd-manager-main tier=control-plane component=etcd OpenShift k8s-app=etcd localhost:4001/metrics Scheduler Kubeadm / Kops / ClusterAPI k8s-app=kube-scheduler tier=control-plane component=kube-scheduler OpenShift app=openshift-kube-scheduler scheduler=true localhost:10251/metrics Controller manager Kubeadm / Kops / ClusterAPI k8s-app=kube-controller-manager tier=control-plane component=kube-controller-manager​ OpenShift app=kube-controller-manager kube-controller-manager=true localhost:10252/metrics When the integration detects that it is running inside a master node, it tries to find which components are running on the node by looking for pods that match the labels listed in the table above. For every running component, the integration makes a request to its metrics endpoint. Configuration Control plane monitoring is automatic for agents running inside master nodes. The only component that requires an extra step to run is etcd, because it uses mutual TLS authentication (mTLS) for client requests. The API Server can also be configured to be queried using the Secure Port. Important Control plane monitoring for OpenShift 4.x requires additional configuration. For more information, see the OpenShift 4.x Configuration section. etcd In order to set mTLS for querying etcd, there are two configuration options that need to be set: Option Value ETCD_TLS_SECRET_NAME Name of a Kubernetes secret that contains the mTLS configuration. The secret should contain the following keys: cert: the certificate that identifies the client making the request. It should be signed by an etcd trusted CA. key: the private key used to generate the client certificate. cacert: the root CA used to identify the etcd server certificate. If the ETCD_TLS_SECRET_NAME option is not set, etcd metrics won't be fetched. ETCD_TLS_SECRET_NAMESPACE The namespace where the secret specified in the ETCD_TLS_SECRET_NAME was created. If not set, the default namespace is used. API server By default, the API server metrics are queried using the localhost:8080 unsecured endpoint. If this port is disabled, you can also query these metrics over the secure port. To enable this, set the following configuration option in the Kubernetes integration manifest file: Option Value API_SERVER_ENDPOINT_URL The (secure) URL to query the metrics. The API server uses localhost:443 by default Ensure that the ClusterRole has been updated to the newest version found in the manifest Added in version 1.15.0 Important Note that the port can be different according to the secure port used by the API server. For example, in Minikube the API server secure port is 8443 and therefore API_SERVER_ENDPOINT_URL should be set to https://localhost:8443 OpenShift configuration Version 3 of the Kubernetes Integration includes default settings that will autodiscover control plane components in OpenShift clusters, so it should work out of the box for all components except etcd. Etcd is not supported out of the box as the metrics endpoint is configured to require mTLS authentication in OpenShift environments. Our integration supports mTLS authentication to fetch etcd metrics in this configuration, however you will need to create the required mTLS certificate manually. This is necessary to avoid granting wide permissions to our integration without the explicit approval from the user. To create an mTLS secret, please follow the steps in this section below, and then configure the integration to use the newly created secret as described in the mtls section. OpenShift configuration on Integration version 2 Important When installing openshift through Helm, specify the configuration to automatically include these endpoints. Setting openshift.enabled=true and openshift.version=\"4.x\" will include the secure endpoints and enable the /var/run/crio.sock runtime. Control plane components on OpenShift 4.x use endpoint URLs that require SSL and service account based authentication. Therefore, the default endpoint URLs can not be used. To configure control plane monitoring on OpenShift, uncomment the following environment variables in the customized manifest. URL values are pre-configured to the default base URLs for the control plane monitoring metrics endpoints in OpenShift 4.x. - name: \"SCHEDULER_ENDPOINT_URL\" value: \"https://localhost:10259 - name: \"ETCD_ENDPOINT_URL\" value: \"https://localhost:9979\" - name: \"CONTROLLER_MANAGER_ENDPOINT_URL\" value: \"https://localhost:10257\" - name: \"API_SERVER_ENDPOINT_URL\" value: \"https://localhost:6443\" Copy Important Even though the custom ETCD_ENDPOINT_URL is defined, etcd requires HTTPS and mTLS authentication to be configured. For more on configuring mTLS for etcd in OpenShift, see Set up mTLS for etcd in OpenShift. Set up mTLS for etcd in OpenShift Follow these instructions to set up mutual TLS authentication for etcd in OpenShift 4.x: Export the etcd client certificates from the cluster to an opaque secret. In a default managed OpenShift cluster, the secret is named kube-etcd-client-certs and it is stored in the openshift-monitoring namespace. bash Copy $ kubectl get secret kube-etcd-client-certs -n openshift-monitoring -o yaml > etcd-secret.yaml Open the secret file and change the keys: Rename the certificate authority to cacert. Rename the client certificate to cert. Rename the client key to key. Optionally, change the secret name and namespace to something meaningful. Remove these unnecessary keys in the metadata section: creationTimestamp resourceVersion selfLink uid Install the manifest with its new name and namespace: bash Copy $ kubectl apply -n newrelic -f etcd-secret.yaml Configure the integration to use the newly created secret as described in the mtls section. See your data If the integration has been been set up correctly, the Kubernetes cluster explorer contains all the control plane components and their status in a dedicated section, as shown below. one.newrelic.com > Kubernetes Cluster Explorer: Use the Kubernetes cluster explorer to monitor and collect metrics from your cluster's Control Plane components. You can also check for control plane data with this NRQL query: SELECT latest(timestamp) FROM K8sApiServerSample, K8sEtcdSample, K8sSchedulerSample, K8sControllerManagerSample FACET entityName where clusterName = '_MY_CLUSTER_NAME_' Copy Tip If you still can't see Control Plane data, try the solution described in Kubernetes integration troubleshooting: Not seeing data.", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 218.79257, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "sections": "Static endpoints", + "tags": "Kubernetes integration", + "body": " and the control plane components. This means that master nodes should be labeled with node-role.kubernetes.io/master="" or kubernetes.io/role="master". The control plane components should have either the k8s-app or the tier and component labels. Refer to the following table for accepted label combinations" + }, + "id": "61fd18e9e7b9d2b5cc5e7358" }, { "sections": [ @@ -90797,7 +90760,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 228.10867, + "_score": 216.73175, "_version": null, "_explanation": null, "sort": null, @@ -90808,49 +90771,6 @@ "body": " the infrastructure-agent and on-host integrations. images.integration.* to configure the image in charge of scraping k8s data. Upgrade from v2 In order to upgrade from the Kubernetes integration version 2 (included in nri-bundle chart versions 3.x), we strongly encourage you to create a values" }, "id": "61fd3c9d28ccbc72eec0dcda" - }, - { - "sections": [ - "New Relic Metrics Adapter", - "BETA FEATURE", - "Requirements", - "Installation", - "Tip", - "Configuration", - "How it works", - "Caution", - "Troubleshooting", - "Get verbose logs", - "Get raw metrics", - "Metrics not working" - ], - "title": "New Relic Metrics Adapter", - "type": "docs", - "tags": [ - "Link apps and services", - "Kubernetes integration", - "Integrations" - ], - "external_id": "51fdc0c8df2fdc91fcc51556e323c62e7c12d48a", - "image": "", - "url": "https://docs.newrelic.com/docs/kubernetes-pixie/kubernetes-integration/advanced-configuration/newrelic-metrics-adapter/", - "published_at": "2022-02-15T19:18:45Z", - "updated_at": "2022-02-04T12:17:01Z", - "document_type": "page", - "popularity": 1, - "body": "BETA FEATURE This feature is still in development, but we encourage you to try it out! You can use metrics from your New Relic account to autoscale applications and services in your Kubernetes cluster by deploying the New Relic Metrics Adapter. This adapter fetches the metric values from New Relic and makes them available for the Horizontal Pod Autoscalers. The newrelic-k8s-metrics-adapter implements the external.metrics.k8s.io API to support the use of external metrics based New Relic NRQL queries results. Once deployed, the value for each configured metric is fetched using the NerdGraph API based on the configured NRQL query. The metrics adapter exposes the metrics over a secured endpoint with TLS. New Relic metrics adapter in a cluster. Requirements Kubernetes 1.16 or higher. The New Relic Kubernetes integration. New Relic's user API key. No other External Metrics Adapter installed in the cluster. Installation To install the New Relic Metrics Adapter, we provide the newrelic-k8s-metrics-adapter Helm chart, which is also included in the nri-bundle chart used to deploy all New Relic Kubernetes components. If not already installed, install our Kubernetes integration. Upgrade the installation to include the New Relic Metrics Adapter with the following command: helm upgrade --install newrelic newrelic/nri-bundle \\ --namespace newrelic --create-namespace --reuse-values \\ --set metrics-adapter.enabled=true \\ --set newrelic-k8s-metrics-adapter.personalAPIKey=YOUR_NEW_RELIC_PERSONAL_API_KEY \\ --set newrelic-k8s-metrics-adapter.config.accountID=YOUR_NEW_RELIC_ACCOUNT_ID \\ --set newrelic-k8s-metrics-adapter.config.externalMetrics.external_metric_name.query=NRQL query Copy Please notice and adjust the following flags: metrics-adapter.enabled: Must be set to true so the metrics adapter chart is installed. newrelic-k8s-metrics-adapter.personalAPIKey: Must be set to valid New Relic Personal API key. newrelic-k8s-metrics-adapter.accountID: Must be set to valid New Relic account where metrics are going to be fetched from. newrelic-k8s-metrics-adapter.config.externalMetrics.external_metric_name.query: Adds a new external metric where: external_metric_name: The metric name. query: The base NRQL query that is used to get the value for the metric. Tip Alternatively, you can use a values.yaml file that can be passed to the helm command with the --values flag. Values files can contain all parameters needed to configure the metrics explained in the configuration section. Configuration You can configure multiple metrics in the metrics adapter and change some parameters to modify the behaviour of the metrics cache and filtering. To see the full list and descriptions of all parameters that can be modified, refer to the chart README.md and values.yaml files. How it works The following example is a Helm values file that enable the metrics adapter on the nri-bundle chart installation, and configures the nginx_average_requests metric: metrics-adapter: enabled: true newrelic-k8s-metrics-adapter: personalAPIKey: config: accountID: externalMetrics: nginx_average_requests: query: \"FROM Metric SELECT average(nginx.server.net.requestsPerSecond) SINCE 2 MINUTES AGO\" Copy Caution The default time span for metrics is 1h. Therefore, you should define queries with the SINCE clause to adjust the time span according to your environment and needs. There is an HPA consuming the external metric as follows: kind: HorizontalPodAutoscaler apiVersion: autoscaling/v2beta2 metadata: name: nginx-scaler spec: scaleTargetRef: apiVersion: apps/v1 kind: Deployment name: nginx minReplicas: 1 maxReplicas: 10 metrics: - type: External external: metric: name: nginx_average_requests selector: matchLabels: k8s.namespaceName: nginx target: type: Value value: 10000 Copy Based on the HPA definition, the controller manager fetches the metrics from the external metrics API which are served by the New Relic metrics adapter. The New Relic metrics adapter receives the query including the nginx_average_requests metric name and all the selectors, and searches for a matching metric name in the internal memory based on the configured metrics. Then, it adds the selectors to the query to form a final query that is executed using NerdGraph to fetch the value from New Relic. The above example will generate a query like the following: FROM Metric SELECT average(nginx.server.net.requestsPerSecond) WHERE clusterName= AND `k8s.namespaceName`='nginx' SINCE 2 MINUTES AGO Copy Notice that a clusterName filter has been automatically added to the query to exclude metrics from other clusters in the same account. You can remove it by using the removeClusterFilter configuration parameter. Also the value is cached for a period of time defined by the cacheTTLSeconds configuration parameter, whose default is 30 seconds. Troubleshooting Get verbose logs Most common errors are displayed in the standard (non-verbose) logs. If you're doing a more in-depth investigation on your own or with New Relic Support, you can enable verbose mode. To get verbose logging details for an integration using Helm: Enable verbose logging: bash Copy $ helm upgrade -n newrelic --reuse-values newrelic-bundle --set newrelic-k8s-metrics-adapter.verboseLog=true newrelic/nri-bundle Leave on verbose mode for a few minutes, or until enough activity has occurred. When you have the information you need, disable verbose logging: bash Copy $ helm upgrade --reuse-values newrelic-bundle --set newrelic-k8s-metrics-adapter.verboseLog=false newrelic/nri-bundle Caution Verbose mode increases significantly the amount of information sent to log files. Enable this mode temporarily, only for troubleshooting purposes, and reset the log level when finished. Get raw metrics Sometimes it's useful to get the list of available metrics and also to get the current value of an specific metric. To get the list of metrics available, run: bash Copy $ kubectl get --raw \"/apis/external.metrics.k8s.io/v1beta1/\" To get the value for a specific metric with a selector, run: bash Copy $ kubectl get --raw \"/apis/external.metrics.k8s.io/v1beta1/namespaces/*/__METRIC_NAME__?labelSelector=_SELECTOR_KEY_=_SELECTOR_VALUE_\" Tip You must replace , and with your values. Metrics not working There are some usual errors that could cause a metric fail to retrieve the value. These errors are showed in the status of the metrics when you describe the HPA or are printed when you get the raw metrics directly. executing query: NRQL Syntax Error: Error at line...: The query that is being run has syntax errors. The same error message gives you the executed query and position of the error. You can try this query inside the New Relic query builder and correct the configuration from the adapter. extracting return value: expected first value to be of type \"float64\", got %!q(): The query doesn't return any value. The same error message gives you the executed query so you can try this query inside the New Relic query builder and correct the configuration from the adapter or the match selectors in the HPA.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 227.9711, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "tags": "Kubernetes integration", - "body": " in the cluster. Installation To install the New Relic Metrics Adapter, we provide the newrelic-k8s-metrics-adapter Helm chart, which is also included in the nri-bundle chart used to deploy all New Relic Kubernetes components. If not already installed, install our Kubernetes integration. Upgrade" - }, - "id": "61fd193d196a672daae826d6" } ], "/oma-data-governance/30593cae-4cfb-435c-9170-310e3f38ab40": [ @@ -90871,7 +90791,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 72.73316, + "_score": 72.453186, "_version": null, "_explanation": null, "sort": null, @@ -90906,7 +90826,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 70.14827, + "_score": 69.403625, "_version": null, "_explanation": null, "sort": null, @@ -90952,7 +90872,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 66.9105, + "_score": 66.87514, "_version": null, "_explanation": null, "sort": null, @@ -90994,7 +90914,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 64.11775, + "_score": 63.735867, "_version": null, "_explanation": null, "sort": null, @@ -91036,7 +90956,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 54.477905, + "_score": 54.12998, "_version": null, "_explanation": null, "sort": null, @@ -91079,7 +90999,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 110.50954, + "_score": 102.56169, "_version": null, "_explanation": null, "sort": null, @@ -91121,7 +91041,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 109.66979, + "_score": 101.7832, "_version": null, "_explanation": null, "sort": null, @@ -91163,7 +91083,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 109.66237, + "_score": 101.777435, "_version": null, "_explanation": null, "sort": null, @@ -91205,7 +91125,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 105.832886, + "_score": 98.38831, "_version": null, "_explanation": null, "sort": null, @@ -91248,7 +91168,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 101.10577, + "_score": 95.21576, "_version": null, "_explanation": null, "sort": null, @@ -91281,7 +91201,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.15608, + "_score": 94.278885, "_version": null, "_explanation": null, "sort": null, @@ -91297,24 +91217,24 @@ "Automate workflows", "Guides to automate workflows", "Quickly tag resources", - "Set up New Relic using the Kubernetes operator", "Automate common tasks", + "Set up New Relic using the Kubernetes operator", "Automatically tag a simple \"Hello World\" Demo across the entire stack", "Getting started with New Relic and Terraform", "Set up New Relic using Helm charts" ], - "published_at": "2022-02-15T01:37:23Z", + "published_at": "2022-02-16T01:38:14Z", "title": "Automate workflows", - "updated_at": "2022-02-15T01:37:23Z", + "updated_at": "2022-02-16T01:38:14Z", "type": "developer", "external_id": "d4f408f077ed950dc359ad44829e9cfbd2ca4871", "document_type": "page", "popularity": 1, - "body": "When building today's complex systems, you want an easy, predictable way to verify that your configuration is defined as expected. This concept, Observability as Code, is brought to life through a collection of New Relic-supported orchestration tools, including Terraform, AWS CloudFormation, and a command-line interface. These tools enable you to integrate New Relic into your existing workflows, easing adoption, accelerating deployment, and returning focus to your main job — getting stuff done. In addition to our Terraform and CLI guides below, find more automation solutions in our Developer Toolkit. Guides to automate workflows Quickly tag resources Add tags to apps for easy filtering 5 min Set up New Relic using the Kubernetes operator Learn how to provision New Relic resources using the Kubernetes operator 20 min Automate common tasks Use the New Relic CLI to tag apps and create deployment markers 20 min Automatically tag a simple \"Hello World\" Demo across the entire stack See how easy it is to leverage automation in your DevOps environment! 30 min Getting started with New Relic and Terraform Learn how to provision New Relic resources using Terraform 30 min Set up New Relic using Helm charts Learn how to set up New Relic using Helm charts 20 min", + "body": "When building today's complex systems, you want an easy, predictable way to verify that your configuration is defined as expected. This concept, Observability as Code, is brought to life through a collection of New Relic-supported orchestration tools, including Terraform, AWS CloudFormation, and a command-line interface. These tools enable you to integrate New Relic into your existing workflows, easing adoption, accelerating deployment, and returning focus to your main job — getting stuff done. In addition to our Terraform and CLI guides below, find more automation solutions in our Developer Toolkit. Guides to automate workflows Quickly tag resources Add tags to apps for easy filtering 5 min Automate common tasks Use the New Relic CLI to tag apps and create deployment markers 20 min Set up New Relic using the Kubernetes operator Learn how to provision New Relic resources using the Kubernetes operator 20 min Automatically tag a simple \"Hello World\" Demo across the entire stack See how easy it is to leverage automation in your DevOps environment! 30 min Getting started with New Relic and Terraform Learn how to provision New Relic resources using Terraform 30 min Set up New Relic using Helm charts Learn how to set up New Relic using Helm charts 20 min", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 92.5004, + "_score": 92.15419, "_version": null, "_explanation": null, "sort": null, @@ -91362,7 +91282,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 86.503174, + "_score": 81.99892, "_version": null, "_explanation": null, "sort": null, @@ -91396,7 +91316,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 86.496025, + "_score": 81.99311, "_version": null, "_explanation": null, "sort": null, @@ -91441,7 +91361,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 86.49205, + "_score": 81.98988, "_version": null, "_explanation": null, "sort": null, @@ -91459,31 +91379,31 @@ "Automate workflows", "Guides to automate workflows", "Quickly tag resources", - "Set up New Relic using the Kubernetes operator", "Automate common tasks", + "Set up New Relic using the Kubernetes operator", "Automatically tag a simple \"Hello World\" Demo across the entire stack", "Getting started with New Relic and Terraform", "Set up New Relic using Helm charts" ], - "published_at": "2022-02-15T01:37:23Z", + "published_at": "2022-02-16T01:38:14Z", "title": "Automate workflows", - "updated_at": "2022-02-15T01:37:23Z", + "updated_at": "2022-02-16T01:38:14Z", "type": "developer", "external_id": "d4f408f077ed950dc359ad44829e9cfbd2ca4871", "document_type": "page", "popularity": 1, - "body": "When building today's complex systems, you want an easy, predictable way to verify that your configuration is defined as expected. This concept, Observability as Code, is brought to life through a collection of New Relic-supported orchestration tools, including Terraform, AWS CloudFormation, and a command-line interface. These tools enable you to integrate New Relic into your existing workflows, easing adoption, accelerating deployment, and returning focus to your main job — getting stuff done. In addition to our Terraform and CLI guides below, find more automation solutions in our Developer Toolkit. Guides to automate workflows Quickly tag resources Add tags to apps for easy filtering 5 min Set up New Relic using the Kubernetes operator Learn how to provision New Relic resources using the Kubernetes operator 20 min Automate common tasks Use the New Relic CLI to tag apps and create deployment markers 20 min Automatically tag a simple \"Hello World\" Demo across the entire stack See how easy it is to leverage automation in your DevOps environment! 30 min Getting started with New Relic and Terraform Learn how to provision New Relic resources using Terraform 30 min Set up New Relic using Helm charts Learn how to set up New Relic using Helm charts 20 min", + "body": "When building today's complex systems, you want an easy, predictable way to verify that your configuration is defined as expected. This concept, Observability as Code, is brought to life through a collection of New Relic-supported orchestration tools, including Terraform, AWS CloudFormation, and a command-line interface. These tools enable you to integrate New Relic into your existing workflows, easing adoption, accelerating deployment, and returning focus to your main job — getting stuff done. In addition to our Terraform and CLI guides below, find more automation solutions in our Developer Toolkit. Guides to automate workflows Quickly tag resources Add tags to apps for easy filtering 5 min Automate common tasks Use the New Relic CLI to tag apps and create deployment markers 20 min Set up New Relic using the Kubernetes operator Learn how to provision New Relic resources using the Kubernetes operator 20 min Automatically tag a simple \"Hello World\" Demo across the entire stack See how easy it is to leverage automation in your DevOps environment! 30 min Getting started with New Relic and Terraform Learn how to provision New Relic resources using Terraform 30 min Set up New Relic using Helm charts Learn how to set up New Relic using Helm charts 20 min", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 95.14711, + "_score": 94.86789, "_version": null, "_explanation": null, "sort": null, "highlight": { "title": "Automate workflows", "sections": "Automate workflows", - "body": " deployment markers 20 min Automatically tag a simple "Hello World" Demo across the entire stack See how easy it is to leverage automation in your DevOps environment! 30 min Getting started with New Relic and Terraform Learn how to provision New Relic resources using Terraform 30 min Set up New Relic using Helm charts Learn how to set up New Relic using Helm charts 20 min" + "body": " the Kubernetes operator 20 min Automatically tag a simple "Hello World" Demo across the entire stack See how easy it is to leverage automation in your DevOps environment! 30 min Getting started with New Relic and Terraform Learn how to provision New Relic resources using Terraform 30 min Set up New Relic using Helm charts Learn how to set up New Relic using Helm charts 20 min" }, "id": "6091f7c8e7b9d2f6715068f1" }, @@ -91508,7 +91428,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.15608, + "_score": 94.278885, "_version": null, "_explanation": null, "sort": null, @@ -91554,7 +91474,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 86.503174, + "_score": 81.99892, "_version": null, "_explanation": null, "sort": null, @@ -91588,7 +91508,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 86.496025, + "_score": 81.99311, "_version": null, "_explanation": null, "sort": null, @@ -91633,7 +91553,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 86.49205, + "_score": 81.98988, "_version": null, "_explanation": null, "sort": null, @@ -91651,31 +91571,31 @@ "Automate workflows", "Guides to automate workflows", "Quickly tag resources", - "Set up New Relic using the Kubernetes operator", "Automate common tasks", + "Set up New Relic using the Kubernetes operator", "Automatically tag a simple \"Hello World\" Demo across the entire stack", "Getting started with New Relic and Terraform", "Set up New Relic using Helm charts" ], - "published_at": "2022-02-15T01:37:23Z", + "published_at": "2022-02-16T01:38:14Z", "title": "Automate workflows", - "updated_at": "2022-02-15T01:37:23Z", + "updated_at": "2022-02-16T01:38:14Z", "type": "developer", "external_id": "d4f408f077ed950dc359ad44829e9cfbd2ca4871", "document_type": "page", "popularity": 1, - "body": "When building today's complex systems, you want an easy, predictable way to verify that your configuration is defined as expected. This concept, Observability as Code, is brought to life through a collection of New Relic-supported orchestration tools, including Terraform, AWS CloudFormation, and a command-line interface. These tools enable you to integrate New Relic into your existing workflows, easing adoption, accelerating deployment, and returning focus to your main job — getting stuff done. In addition to our Terraform and CLI guides below, find more automation solutions in our Developer Toolkit. Guides to automate workflows Quickly tag resources Add tags to apps for easy filtering 5 min Set up New Relic using the Kubernetes operator Learn how to provision New Relic resources using the Kubernetes operator 20 min Automate common tasks Use the New Relic CLI to tag apps and create deployment markers 20 min Automatically tag a simple \"Hello World\" Demo across the entire stack See how easy it is to leverage automation in your DevOps environment! 30 min Getting started with New Relic and Terraform Learn how to provision New Relic resources using Terraform 30 min Set up New Relic using Helm charts Learn how to set up New Relic using Helm charts 20 min", + "body": "When building today's complex systems, you want an easy, predictable way to verify that your configuration is defined as expected. This concept, Observability as Code, is brought to life through a collection of New Relic-supported orchestration tools, including Terraform, AWS CloudFormation, and a command-line interface. These tools enable you to integrate New Relic into your existing workflows, easing adoption, accelerating deployment, and returning focus to your main job — getting stuff done. In addition to our Terraform and CLI guides below, find more automation solutions in our Developer Toolkit. Guides to automate workflows Quickly tag resources Add tags to apps for easy filtering 5 min Automate common tasks Use the New Relic CLI to tag apps and create deployment markers 20 min Set up New Relic using the Kubernetes operator Learn how to provision New Relic resources using the Kubernetes operator 20 min Automatically tag a simple \"Hello World\" Demo across the entire stack See how easy it is to leverage automation in your DevOps environment! 30 min Getting started with New Relic and Terraform Learn how to provision New Relic resources using Terraform 30 min Set up New Relic using Helm charts Learn how to set up New Relic using Helm charts 20 min", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 95.14711, + "_score": 94.86789, "_version": null, "_explanation": null, "sort": null, "highlight": { "title": "Automate workflows", "sections": "Automate workflows", - "body": " deployment markers 20 min Automatically tag a simple "Hello World" Demo across the entire stack See how easy it is to leverage automation in your DevOps environment! 30 min Getting started with New Relic and Terraform Learn how to provision New Relic resources using Terraform 30 min Set up New Relic using Helm charts Learn how to set up New Relic using Helm charts 20 min" + "body": " the Kubernetes operator 20 min Automatically tag a simple "Hello World" Demo across the entire stack See how easy it is to leverage automation in your DevOps environment! 30 min Getting started with New Relic and Terraform Learn how to provision New Relic resources using Terraform 30 min Set up New Relic using Helm charts Learn how to set up New Relic using Helm charts 20 min" }, "id": "6091f7c8e7b9d2f6715068f1" }, @@ -91700,7 +91620,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 94.15608, + "_score": 94.278885, "_version": null, "_explanation": null, "sort": null, @@ -91746,7 +91666,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 86.503174, + "_score": 81.99892, "_version": null, "_explanation": null, "sort": null, @@ -91780,7 +91700,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 86.496025, + "_score": 81.99311, "_version": null, "_explanation": null, "sort": null, @@ -91825,7 +91745,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 86.49205, + "_score": 81.98988, "_version": null, "_explanation": null, "sort": null, @@ -91889,7 +91809,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 259.03394, + "_score": 242.18958, "_version": null, "_explanation": null, "sort": null, @@ -91929,7 +91849,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 248.0357, + "_score": 232.14998, "_version": null, "_explanation": null, "sort": null, @@ -91986,7 +91906,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 104.92971, + "_score": 105.02027, "_version": null, "_explanation": null, "sort": null, @@ -92025,7 +91945,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 99.57354, + "_score": 99.74337, "_version": null, "_explanation": null, "sort": null, @@ -92056,7 +91976,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 19.778902, + "_score": 19.735983, "_version": null, "_explanation": null, "sort": null, @@ -92231,7 +92151,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 153.14789, + "_score": 144.04303, "_version": null, "_explanation": null, "sort": null, @@ -92273,7 +92193,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 137.37582, + "_score": 127.73767, "_version": null, "_explanation": null, "sort": null, @@ -92284,39 +92204,6 @@ }, "id": "6044151c28ccbc4b4f2c60af" }, - { - "image": "", - "url": "https://docs.newrelic.com/docs/data-apis/custom-data/custom-events/report-custom-event-data/", - "sections": [ - "Report custom events and attributes", - "Requirements", - "Avoid rate limits", - "Example use cases", - "Using custom attributes", - "Using custom events", - "Send custom events and attributes", - "Extend data retention" - ], - "published_at": "2022-02-14T19:23:22Z", - "title": "Report custom events and attributes", - "updated_at": "2022-02-14T19:23:22Z", - "type": "docs", - "external_id": "ff7b6544c9a15b49f77c4d86f69c66949c45cb87", - "document_type": "page", - "popularity": 1, - "body": "One of the ways to report custom data to New Relic is with custom events and attributes. Have questions about why you'd use custom data? See Introduction to custom data. Requirements For event and attribute formatting requirements and best practices, see our documentation about data limits and requirements. Avoid rate limits Reporting a large number of custom events and/or attributes can cause degraded query performance. It may also result in approaching or passing data collection rate limits. For optimal performance, first think about what data you want to analyze, and then create only the events and/or attributes necessary to meet these specific goals. Be aware of the following data and subscription requirements for inserting and accessing custom data: Ensure you follow limits and requirements around event/attribute data types, naming syntax, and size. The amount of data you have access to over time depends on your data retention policy. Example use cases Two popular custom data solutions are custom events and custom attributes. There are several ways to accomplish this (more on that later in this doc), depending on your New Relic implementation and tools. Here are some common use cases for implementing custom events and attributes. Using custom attributes Custom attributes are often used to add important business and operational context to existing events. Business context might include: Customer token Customer market segment Customer value classification Workflow control values not obvious in the URIStem User/product/account privilege context Operational context might include: Which feature flags were used What datastore was accessed What cache was accessed What errors were detected and ignored (fault partitioning) Using custom events Event data is one of New Relic's four core data types. We recommend reading that definition to understand what we mean by \"event\" and why that data type is most used for reporting specific types of activity. The use cases for custom events vary widely. Basically they are used for any type of activity that an organization deems important and that is not already being monitored. For example: An event can represent an activity involving multiple actions, like a customer purchasing a certain combination of products. An event can record backup activity. For example, you can set up reporting of events that represent production backups of SOLR instances into an event table, with a timestamp of when it occurred, which cluster, and the duration. Send custom events and attributes Methods for sending custom events and attributes include: Source How to send custom data APM agent Use APM agent APIs to report custom events and custom attributes. Browser monitoring agent Add custom attributes to the PageView event via the browser API call setCustomAttribute. Send PageAction event and attributes via the browser API. Forward APM agent custom attributes to the PageView event. Event API To report custom events not associated with other New Relic features, use the Event API. Infrastructure monitoring agent Add custom attributes to default infrastructure events. Use the Flex integration tool to report your own custom event data. Mobile monitoring agent Use the mobile agent API to send custom events and attributes. Synthetic monitoring Add custom attributes to the SyntheticCheck event via the $util.insights tools. For ways to report other types of custom data, see: Metric API Log API Trace API Extend data retention To learn how to extend how long events are retained in your account, see our documentation about data retention.", - "info": "", - "_index": "520d1d5d14cc8a32e600034b", - "_type": "520d1d5d14cc8a32e600034c", - "_score": 125.18846, - "_version": null, - "_explanation": null, - "sort": null, - "highlight": { - "body": " with other New Relic features, use the Event API. Infrastructure monitoring agent Add custom attributes to default infrastructure events. Use the Flex integration tool to report your own custom event data. Mobile monitoring agent Use the mobile agent API to send custom events and attributes. Synthetic" - }, - "id": "609fa5fb64441f9ebfd2a1db" - }, { "image": "https://docs.newrelic.com/static/693426d805c82f1d9155cd04b116c36e/d9199/new-relic-product-relationships.png", "url": "https://docs.newrelic.com/docs/style-guide/capitalization/product-capability-feature-usage/", @@ -92342,7 +92229,7 @@ "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 124.418015, + "_score": 118.06818, "_version": null, "_explanation": null, "sort": null, @@ -92353,241 +92240,298 @@ }, { "image": "", - "url": "https://docs.newrelic.com/attribute-dictionary/", + "url": "https://docs.newrelic.com/docs/data-apis/custom-data/custom-events/report-custom-event-data/", "sections": [ - "New Relic data dictionary", - "AjaxRequest", - "AwsLambdaInvocation", - "AwsLambdaInvocationError", - "BrowserInteraction", - "BrowserTiming", - "ContainerSample", - "DistributedTraceSummary", - "InfrastructureEvent", - "JavaScriptError", - "Metric", - "Mobile", - "MobileCrash", - "MobileHandledException", - "MobileRequest", - "MobileRequestError", - "MobileSession", - "NetworkSample", - "NrAiIncident", - "NrAiIncidentExternal", - "NrAuditEvent", - "NrConsumption", - "NrDailyUsage", - "NrIntegrationError", - "NrMTDConsumption", - "NrUsage", - "PageAction", - "PageView", - "PageViewTiming", - "ProcessSample", - "Span", - "StorageSample", - "SyntheticCheck", - "SyntheticRequest", - "SyntheticsPrivateLocationStatus", - "SyntheticsPrivateMinion", - "SystemSample", - "Transaction", - "TransactionError", - "WorkloadStatus" + "Report custom events and attributes", + "Requirements", + "Avoid rate limits", + "Example use cases", + "Using custom attributes", + "Using custom events", + "Send custom events and attributes", + "Extend data retention" ], - "published_at": "2022-02-14T18:05:43Z", - "title": "New Relic data dictionary", - "updated_at": "2022-02-14T18:05:42Z", + "published_at": "2022-02-14T19:23:22Z", + "title": "Report custom events and attributes", + "updated_at": "2022-02-14T19:23:22Z", "type": "docs", - "external_id": "cbca3a897621bcbb31159067d6d4ec27c5178fe4", - "document_type": "views_page_content", + "external_id": "ff7b6544c9a15b49f77c4d86f69c66949c45cb87", + "document_type": "page", "popularity": 1, - "body": "AjaxRequest Data source : Browser agent AjaxRequest events occur for any Ajax request, including during a BrowserInteraction event. The event attribute tracks geographic and browser info. Use browser app settings to block specific requests. Attribute name Definition Data types actionText The text of the HTML element that was clicked when a browser interaction started. Added to BrowserInteraction and any AjaxRequest, JavaScriptError and BrowserTiming events that occurred during that interaction. AjaxRequest BrowserTiming JavaScriptError BrowserInteraction appId ID The ID of your application, as recorded by New Relic. PageViewTiming AjaxRequest BrowserTiming PageAction PageView BrowserInteraction appName The name of the application that handled the request as shown in New Relic Browser. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction asn Autonomous System Number: a unique number identifying a group of IP networks that serves the content to the end user. PageViewTiming AjaxRequest BrowserTiming MobileRequest MobileHandledException PageAction MobileRequestError JavaScriptError PageView MobileSession BrowserInteraction Span asnLatitude The latitude of the geographic center of the postal code where the Autonomous System Network is registered. This is not the end user's latitude. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction asnLongitude The longitude of the geographic center of the postal code where the Autonomous System Network is registered. This is not the end user's longitude. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction asnOrganization The organization that owns the Autonomous System Number. Often an ISP, sometimes a private company or institution. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction browserInteractionId ID A unique value generated for each browser interaction captured by the New Relic agent. AjaxRequest BrowserTiming JavaScriptError BrowserInteraction browserInteractionName The name of the interaction. This is either the targetGroupedUrl or the custom name set via the API. AjaxRequest BrowserTiming BrowserInteraction city The city in which the event occurred, such as Portland or Seattle. PageViewTiming AjaxRequest BrowserTiming MobileHandledException PageAction JavaScriptError PageView MobileSession BrowserInteraction countryCode The country from which the browser initiated the page load. For a list of country codes, see ISO 3166-1 alpha-2. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction deviceType The type of device that loaded the page: mobile, tablet, or desktop. AjaxRequest BrowserTiming JavaScriptError PageView BrowserInteraction eventId ID A value that you can link to multiple BrowserInteraction events so you can view the interactions that occurred surrounding a specific event. For example, you can see the browser interactions that occurred prior to a JS error. AjaxRequest BrowserTiming BrowserInteraction groupedPageURL The grouped URL of the view that made the AJAX request. For example: myapp.com/acct/*/dash. AjaxRequest BrowserTiming groupedRequestUrl The grouped URL of the AJAX request. For example: myapp.com/acct/*/ajax. AjaxRequest Span hostname The fully qualified domain name (FQDN) of the request URL. AjaxRequest httpMethod enum The HTTP method of the AJAX request. Example: POST. AjaxRequest httpResponseCode enum The HTTP response code. Example: 200. AjaxRequest jsDuration seconds (s) The total duration, in seconds, spent on JavaScript execution. This attribute is only seen in AjaxRequest data that is tied to BrowserInteraction. This attribute doesn't exist for initial page load events. AjaxRequest BrowserTiming BrowserInteraction pageUrl The URL of the page that was loaded for the PageView. For example: http://www.newrelic.com. This URL does not include query parameters. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView Span parentEventId ID A unique value generated for each interaction with the page. You can use this value to group interactions together. AjaxRequest BrowserTiming JavaScriptError BrowserInteraction port enum The request port. AjaxRequest Span priority Likelihood this event will be saved. AjaxRequest regionCode The specific administrative division within a country where the PageView event occurred. In the United States, regions correspond to state codes, such as WA or NY. Outside the United States, a country's regions correspond to numerical codes. In the United States, regions correspond to state codes ; for example, WA or NY. Outside the United States, a country's regions correspond to numerical codes . PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction requestBodySize bytes (B) The payload size of the request body, in bytes. AjaxRequest requestUrl The URL of the AJAX request. For example: myapp.com/acct/1/ajax. AjaxRequest responseBodySize bytes (B) The payload size of the response body, in bytes. AjaxRequest session A unique identifier for a single session. The session cookie expires when the user closes the browser (for example, they fully exit Chrome.) A new session identifier will be assigned when the user opens up a new instance of the browser. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction Span timeSinceBrowserInteractionStart seconds (s) The time in seconds between the start of the BrowserInteraction and the start of the request. AjaxRequest BrowserTiming timeToLastCallbackEnd seconds (s) The duration, in seconds, from the start of the request (timestamp) to the end of the last callback. This is not just an additive function; the callback time can overlap with the wait time. This attribute is only seen in AjaxRequest data that is tied to BrowserInteraction. AjaxRequest BrowserTiming BrowserInteraction timeToLoadEventStart seconds (s) The time, in seconds, from the start of the AJAX request to the start of its load event. This value represents the duration of the AJAX request with single page app (SPA) monitoring. For more information, see the Mozilla developer documentation about XMLHttpRequest load events. AjaxRequest timeToSettle seconds (s) The time, in seconds, from the start of the request to when all resulting callbacks (including callbacks of subsequent AJAX requests) are complete. This attribute is only seen in AjaxRequest data that is tied to BrowserInteraction. AjaxRequest BrowserTiming timestamp The time (date, hour, minute, second) at which the interaction occurred. PageViewTiming AjaxRequest BrowserTiming PageAction PageView BrowserInteraction Span userAgentName The browser’s name, such as Chrome and Firefox, obtained from the User-Agent header of an HTTP request. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction userAgentOS The browser’s reported operating system, such as Windows or Linux, that it is running on. This is obtained from the User-Agent header of an HTTP request. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction userAgentVersion The browser’s reported software version, which is obtained from the User-Agent header of an HTTP request. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction AwsLambdaInvocation Data source : AWS Lambda This event is reported by New Relic monitoring for AWS Lambda. This event captures overall function timing and associated metadata. A single AwsLambdaInvocation event is generated for each invocation. Attribute name Definition Data types aws.lambda.arn The Amazon Resource Name (ARN) of the instrumented Lambda function. AwsLambdaInvocation AwsLambdaInvocationError aws.lambda.coldStart A Boolean indicating if the AWS Lambda invocation is a cold start. AwsLambdaInvocation AwsLambdaInvocationError aws.lambda.eventSource.arn The Amazon Resource Name (ARN) of the entity that invoked the instrumented Lambda function. AwsLambdaInvocation AwsLambdaInvocationError aws.requestId ID AWS identifier of the invocation. AwsLambdaInvocation AwsLambdaInvocationError databaseCallCount count The number of database calls made by this transaction. TransactionError AwsLambdaInvocation Transaction AwsLambdaInvocationError databaseDuration seconds (s) The database response time in seconds. TransactionError AwsLambdaInvocation Transaction AwsLambdaInvocationError duration seconds (s) The total invocation time for the transaction, in seconds. (Data source: AWS Lambda) AwsLambdaInvocation AwsLambdaInvocationError externalCallCount count The number of external calls made by this transaction. TransactionError AwsLambdaInvocation Transaction AwsLambdaInvocationError externalDuration seconds (s) The total response time of all external (out-of-process) services, in seconds. TransactionError AwsLambdaInvocation Transaction AwsLambdaInvocationError newRelic.ingestPoint Where the data point entered the platform (such as browser.spans, or api.traces). AwsLambdaInvocation AwsLambdaInvocationError Span parent.account ID If a distributed tracing payload is received, this is the account identifier for the transaction's upstream caller. TransactionError AwsLambdaInvocation Transaction AwsLambdaInvocationError parent.app ID If a distributed tracing payload is received, this is the application identifier. APM agents retrieve this value in the connect response under the key primary_application_id. TransactionError AwsLambdaInvocation Transaction AwsLambdaInvocationError parent.transportType When a distributed tracing payload is received, the method of transport for the payload. Example values: Unknown, HTTP, HTTPS, Kafka, JMS, IronMQ, AMQP, Queue, or Other. TransactionError AwsLambdaInvocation Transaction AwsLambdaInvocationError parent.type If a distributed trace payload was received, the parent's data source type. Example values: App, Browser, Mobile. TransactionError AwsLambdaInvocation Transaction AwsLambdaInvocationError request.headers.accept The types as read from the HTTP Accept request header. TransactionError AwsLambdaInvocation Transaction AwsLambdaInvocationError request.headers.contentLength bytes (B) Incoming request size in bytes as read from the Content-Length HTTP request header. TransactionError AwsLambdaInvocation Transaction AwsLambdaInvocationError request.headers.contentType Incoming request content-type as read from the HTTP request header Content-Type. Example value: application/octet-stream. TransactionError AwsLambdaInvocation Transaction AwsLambdaInvocationError request.headers.host The name from the HTTP host request header. TransactionError AwsLambdaInvocation Transaction AwsLambdaInvocationError request.headers.referer The incoming request referer as read from the Referer request header. TransactionError AwsLambdaInvocation Transaction AwsLambdaInvocationError request.headers.userAgent The contents of the User-Agent HTTP header. TransactionError AwsLambdaInvocation Transaction AwsLambdaInvocationError request.method The HTTP request method used. Example values: POST, GET. TransactionError AwsLambdaInvocation Transaction AwsLambdaInvocationError Span response.headers.contentLength bytes (B) The outgoing response size in bytes as read from the Content-Length response header. TransactionError AwsLambdaInvocation Transaction AwsLambdaInvocationError response.headers.contentType For an HTTP response, the data type of the returned response. Example values: text/html, application/json. TransactionError AwsLambdaInvocation Transaction AwsLambdaInvocationError response.status bytes (B) The response code for an HTTP request AwsLambdaInvocation totalTime seconds (s) The sum of all async components' duration, in seconds. An async component is a method or function where there is no instrumented encapsulating method or function. AwsLambdaInvocation Transaction traceId ID The unique ID (a randomly generated string) used to identify a single request as it crosses inter- and intra- process boundaries. This ID allows the linking of spans in a distributed trace. Included when distributed tracing is enabled. MobileRequest AwsLambdaInvocation MobileRequestError AwsLambdaInvocationError Span type The New Relic event type. Example values: Transaction, Span. AwsLambdaInvocation AwsLambdaInvocationError AwsLambdaInvocationError Data source : AWS Lambda This event is reported by New Relic monitoring for AWS Lambda. It's generated when an error occurs during a Lambda function invocation. Attribute name Definition Data types aws.lambda.arn The Amazon Resource Name (ARN) of the instrumented Lambda function. AwsLambdaInvocation AwsLambdaInvocationError aws.lambda.coldStart A Boolean indicating if the AWS Lambda invocation is a cold start. AwsLambdaInvocation AwsLambdaInvocationError aws.lambda.eventSource.arn The Amazon Resource Name (ARN) of the entity that invoked the instrumented Lambda function. AwsLambdaInvocation AwsLambdaInvocationError aws.requestId ID AWS identifier of the invocation. AwsLambdaInvocation AwsLambdaInvocationError databaseCallCount count The number of database calls made by this transaction. TransactionError AwsLambdaInvocation Transaction AwsLambdaInvocationError databaseDuration seconds (s) The database response time in seconds. TransactionError AwsLambdaInvocation Transaction AwsLambdaInvocationError duration seconds (s) The total invocation time for the transaction, in seconds. (Data source: AWS Lambda) AwsLambdaInvocation AwsLambdaInvocationError error.class The class name or type for the error. This will be server and platform specific. TransactionError AwsLambdaInvocationError error.message The error message for the transaction. This will be server and platform specific. TransactionError AwsLambdaInvocationError externalCallCount count The number of external calls made by this transaction. TransactionError AwsLambdaInvocation Transaction AwsLambdaInvocationError externalDuration seconds (s) The total response time of all external (out-of-process) services, in seconds. TransactionError AwsLambdaInvocation Transaction AwsLambdaInvocationError newRelic.ingestPoint Where the data point entered the platform (such as browser.spans, or api.traces). AwsLambdaInvocation AwsLambdaInvocationError Span parent.account ID If a distributed tracing payload is received, this is the account identifier for the transaction's upstream caller. TransactionError AwsLambdaInvocation Transaction AwsLambdaInvocationError parent.app ID If a distributed tracing payload is received, this is the application identifier. APM agents retrieve this value in the connect response under the key primary_application_id. TransactionError AwsLambdaInvocation Transaction AwsLambdaInvocationError parent.transportType When a distributed tracing payload is received, the method of transport for the payload. Example values: Unknown, HTTP, HTTPS, Kafka, JMS, IronMQ, AMQP, Queue, or Other. TransactionError AwsLambdaInvocation Transaction AwsLambdaInvocationError parent.type If a distributed trace payload was received, the parent's data source type. Example values: App, Browser, Mobile. TransactionError AwsLambdaInvocation Transaction AwsLambdaInvocationError request.headers.accept The types as read from the HTTP Accept request header. TransactionError AwsLambdaInvocation Transaction AwsLambdaInvocationError request.headers.contentLength bytes (B) Incoming request size in bytes as read from the Content-Length HTTP request header. TransactionError AwsLambdaInvocation Transaction AwsLambdaInvocationError request.headers.contentType Incoming request content-type as read from the HTTP request header Content-Type. Example value: application/octet-stream. TransactionError AwsLambdaInvocation Transaction AwsLambdaInvocationError request.headers.host The name from the HTTP host request header. TransactionError AwsLambdaInvocation Transaction AwsLambdaInvocationError request.headers.referer The incoming request referer as read from the Referer request header. TransactionError AwsLambdaInvocation Transaction AwsLambdaInvocationError request.headers.userAgent The contents of the User-Agent HTTP header. TransactionError AwsLambdaInvocation Transaction AwsLambdaInvocationError request.method The HTTP request method used. Example values: POST, GET. TransactionError AwsLambdaInvocation Transaction AwsLambdaInvocationError Span response.headers.contentLength bytes (B) The outgoing response size in bytes as read from the Content-Length response header. TransactionError AwsLambdaInvocation Transaction AwsLambdaInvocationError response.headers.contentType For an HTTP response, the data type of the returned response. Example values: text/html, application/json. TransactionError AwsLambdaInvocation Transaction AwsLambdaInvocationError stackTrace The error stack trace. The format will be different depending on the agent language. AwsLambdaInvocationError traceId ID The unique ID (a randomly generated string) used to identify a single request as it crosses inter- and intra- process boundaries. This ID allows the linking of spans in a distributed trace. Included when distributed tracing is enabled. MobileRequest AwsLambdaInvocation MobileRequestError AwsLambdaInvocationError Span transactionName Name of the transaction in which the error occurred. Example value: Controller/customers/show. Value may be 'Unknown' if an error occurs outside of a transaction. TransactionError AwsLambdaInvocationError type The New Relic event type. Example values: Transaction, Span. AwsLambdaInvocation AwsLambdaInvocationError BrowserInteraction Data source : Browser agent A BrowserInteraction represents a unit of work in a browser session, triggered by a user interacting with the webpage. It captures information about the session, AJAX calls and custom JavaScript timing that occurred as a result of the interaction. Initial load and route changes are captured as special types of browser interactions, and are used for SPA monitoring. Attribute name Definition Data types actionText The text of the HTML element that was clicked when a browser interaction started. Added to BrowserInteraction and any AjaxRequest, JavaScriptError and BrowserTiming events that occurred during that interaction. AjaxRequest BrowserTiming JavaScriptError BrowserInteraction ajaxCount count A count of all XHRs included in the timing of a SPA interaction. BrowserInteraction appId ID The ID of your application, as recorded by New Relic. PageViewTiming AjaxRequest BrowserTiming PageAction PageView BrowserInteraction appName The name of the application that handled the request as shown in New Relic Browser. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction asn Autonomous System Number: a unique number identifying a group of IP networks that serves the content to the end user. PageViewTiming AjaxRequest BrowserTiming MobileRequest MobileHandledException PageAction MobileRequestError JavaScriptError PageView MobileSession BrowserInteraction Span asnLatitude The latitude of the geographic center of the postal code where the Autonomous System Network is registered. This is not the end user's latitude. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction asnLongitude The longitude of the geographic center of the postal code where the Autonomous System Network is registered. This is not the end user's longitude. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction asnOrganization The organization that owns the Autonomous System Number. Often an ISP, sometimes a private company or institution. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction backendTransactionName The name of the backend transaction that served the initial page load. BrowserInteraction browserInteractionId ID A unique value generated for each browser interaction captured by the New Relic agent. AjaxRequest BrowserTiming JavaScriptError BrowserInteraction browserInteractionName The name of the interaction. This is either the targetGroupedUrl or the custom name set via the API. AjaxRequest BrowserTiming BrowserInteraction category The type of interaction; either initial page load, route change, or custom. BrowserInteraction city The city in which the event occurred, such as Portland or Seattle. PageViewTiming AjaxRequest BrowserTiming MobileHandledException PageAction JavaScriptError PageView MobileSession BrowserInteraction countryCode The country from which the browser initiated the page load. For a list of country codes, see ISO 3166-1 alpha-2. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction deviceType The type of device that loaded the page: mobile, tablet, or desktop. AjaxRequest BrowserTiming JavaScriptError PageView BrowserInteraction domain The domain portion of the request URL. PageViewTiming JavaScriptError PageView BrowserInteraction duration seconds (s) The total time elapsed of the interaction event BrowserInteraction eventId ID A value that you can link to multiple BrowserInteraction events so you can view the interactions that occurred surrounding a specific event. For example, you can see the browser interactions that occurred prior to a JS error. AjaxRequest BrowserTiming BrowserInteraction firstContentfulPaint firstContentfulPaint is the point when the browser renders the first bit of content from the DOM, which may be text, an image, SVG, or a element. Google's User-centric Performance Metrics contains detailed information about its Paint Timing API and firstContentfulPaint. See Compatibility and requirements for New Relic Browser for additional information about firstContentfulPaint browser compatibility. PageView BrowserInteraction firstPaint firstPaint marks the point when the browser renders anything that is visually different from what was on the screen prior to navigation. This includes non-default background paint and the enclosing box of an iframe. Google's User-centric Performance Metrics contains detailed information about its Paint Timing API and firstPaint. See Compatibility and requirements for New Relic Browser for additional information about firstPaint browser compatibility. PageView BrowserInteraction jsDuration seconds (s) The total duration, in seconds, spent on JavaScript execution. This attribute is only seen in AjaxRequest data that is tied to BrowserInteraction. This attribute doesn't exist for initial page load events. AjaxRequest BrowserTiming BrowserInteraction monitorAccountId The Synthetics account from which you are running the monitor. JavaScriptError BrowserInteraction monitorId ID A unique number identifying a particular monitor. SyntheticCheck JavaScriptError BrowserInteraction monitorJobId ID The ID of a single Synthetics monitor run, which began at a specific time and originated from a specific location. JavaScriptError BrowserInteraction parentEventId ID A unique value generated for each interaction with the page. You can use this value to group interactions together. AjaxRequest BrowserTiming JavaScriptError BrowserInteraction previousGroupedUrl The grouped version of the URL in the browser at the start of the interaction. BrowserInteraction previousRouteName The route name of the page at the start of the interaction. This is the last value passed by setCurrentRouteName before the start of the interaction. BrowserInteraction previousURL The ungrouped URL in the browser at the start of the interaction. BrowserInteraction regionCode The specific administrative division within a country where the PageView event occurred. In the United States, regions correspond to state codes, such as WA or NY. Outside the United States, a country's regions correspond to numerical codes. In the United States, regions correspond to state codes ; for example, WA or NY. Outside the United States, a country's regions correspond to numerical codes . PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction session A unique identifier for a single session. The session cookie expires when the user closes the browser (for example, they fully exit Chrome.) A new session identifier will be assigned when the user opens up a new instance of the browser. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction Span targetGroupedUrl The grouped version of the URL in the browser at the end of the interaction. BrowserInteraction targetRouteName The route name for the page at the end of the interaction. The last value passed by setCurrentRouteName before the end of the interaction. BrowserInteraction targetUrl The ungrouped URL in the browser at the end of the interaction. BrowserInteraction timeToConnectEnd seconds (s) The time, in seconds, from the start of the interaction to the connectEnd, as defined by the Navigation Timing API. This attribute exists for initial page load events, not route changes. For more information about the Navigation Timing API, see Instrumentation for Browser monitoring. BrowserInteraction timeToConnectStart seconds (s) The time, in seconds, from the start of the interaction to the connectStart, as defined by the Navigation Timing API. This attribute exists for initial page load events, not route changes. For more information about the Navigation Timing API, see Instrumentation for Browser monitoring. BrowserInteraction timeToDomComplete seconds (s) The time, in seconds, from the start of the interaction to the domComplete, as defined by the Navigation Timing API. This attribute exists for initial page load events, not route changes. For more information about the Navigation Timing API, see Instrumentation for Browser monitoring. BrowserInteraction timeToDomContentLoadedEventEnd seconds (s) The time, in seconds, from the start of the interaction to the domContentLoadedEventEnd, as defined by the Navigation Timing API. This attribute exists for initial page load events, not route changes. For more information about the Navigation Timing API, see Instrumentation for Browser monitoring. BrowserInteraction timeToDomContentLoadedEventStart seconds (s) The time, in seconds, from the start of the interaction to the domContentLoadedEventStart, as defined by the Navigation Timing API. This attribute exists for initial page load events, not route changes. For more information about the Navigation Timing API, see Instrumentation for Browser monitoring. BrowserInteraction timeToDomInteractive seconds (s) The time, in seconds, from the start of the interaction to the domInteractive, as defined by the Navigation Timing API. This attribute exists for initial page load events, not route changes. For more information about the Navigation Timing API, see Instrumentation for Browser monitoring. BrowserInteraction timeToDomLoading seconds (s) The time, in seconds, from the start of the interaction to the domLoading, as defined by the Navigation Timing API. This attribute exists for initial page load events, not route changes. For more information about the Navigation Timing API, see Instrumentation for Browser monitoring. BrowserInteraction timeToDomainLookupEnd seconds (s) The time, in seconds, from the start of the interaction to the domainLookupEnd, as defined by the Navigation Timing API. This attribute exists for initial page load events, not route changes. For more information about the Navigation Timing API, see Instrumentation for Browser monitoring. BrowserInteraction timeToDomainLookupStart seconds (s) The time, in seconds, from the start of the interaction to the domainLookupStart, as defined by the Navigation Timing API. This attribute exists for initial page load events, not route changes. For more information about the Navigation Timing API, see Instrumentation for Browser monitoring. BrowserInteraction timeToFetchStart seconds (s) The time, in seconds, from the start of the interaction to the fetchStart, as defined by the Navigation Timing API. This attribute exists for initial page load events, not route changes. For more information about the Navigation Timing API, see Instrumentation for Browser monitoring. BrowserInteraction timeToLastCallbackEnd seconds (s) The duration, in seconds, from the start of the request (timestamp) to the end of the last callback. This is not just an additive function; the callback time can overlap with the wait time. This attribute is only seen in AjaxRequest data that is tied to BrowserInteraction. AjaxRequest BrowserTiming BrowserInteraction timeToLoadEventEnd seconds (s) The time, in seconds, from the start of the interaction to the loadEventEnd, as defined by the Navigation Timing API. This attribute exists for initial page load events, not route changes. For more information about the Navigation Timing API, see Instrumentation for Browser monitoring. BrowserInteraction timeToLoadEventStart seconds (s) The time, in seconds, from the start of the interaction to the loadEventStart, as defined by the Navigation Timing API. This attribute exists for initial page load events, not route changes. For more information, see our documentation about instrumentation for the Navigation Timing API. BrowserInteraction timeToRedirectEnd seconds (s) The time, in seconds, from the start of the interaction to the redirectEnd, as defined by the Navigation Timing API. This attribute exists only for initial page load events, not route changes. For more information about the Navigation Timing API, see Instrumentation for Browser monitoring. BrowserInteraction timeToRedirectStart seconds (s) The time, in seconds, from the start of the interaction to the redirectStart, as defined by the Navigation Timing API. This attribute exists only for initial page load events, not route changes. For more information about the Navigation Timing API, see Instrumentation for Browser monitoring. BrowserInteraction timeToRequestStart seconds (s) The time, in seconds, from the start of the interaction to the requestStart, as defined by the Navigation Timing API. This attribute exists for initial page load events, not route changes. For more information about the Navigation Timing API, see Instrumentation for Browser monitoring. BrowserInteraction timeToResponseEnd seconds (s) The time, in seconds, from the start of the interaction to the responseEnd, as defined by the Navigation Timing API. This attribute exists for initial page load events, not route changes. For more information about the Navigation Timing API, see Instrumentation for Browser monitoring. BrowserInteraction timeToResponseStart seconds (s) The time, in seconds, from the start of the interaction to the responseStart, as defined by the Navigation Timing API. This attribute exists for initial page load events, not route changes. For more information about the Navigation Timing API, see Instrumentation for Browser monitoring. BrowserInteraction timeToSecureConnectionStart seconds (s) The time, in seconds, from the start of the interaction to the secureConnectionStart, as defined by the Navigation Timing API. This attribute exists for initial page load events, not route changes. For more information about the Navigation Timing API, see Instrumentation for Browser monitoring. BrowserInteraction timeToUnloadEventEnd seconds (s) The time, in seconds, from the start of the interaction to the unloadEventEnd, as defined by the Navigation Timing API. This attribute exists for initial page load events, not route changes. For more information about the Navigation Timing API, see Instrumentation for Browser monitoring. BrowserInteraction timeToUnloadEventStart seconds (s) The time, in seconds, from the start of the interaction to the unloadEventStart, as defined by the Navigation Timing API. This attribute exists for initial page load events, not route changes. For more information about the Navigation Timing API, see Instrumentation for Browser monitoring. BrowserInteraction timestamp The time (date, hour, minute, second) at which the interaction occurred. PageViewTiming AjaxRequest BrowserTiming PageAction PageView BrowserInteraction Span trigger The cause of the route change or page load. The default values are click, submit, popstate, or initial page load. For a custom event created with the API, the default value for trigger will be api. This value can also be set via the API. BrowserInteraction userAgentName The browser’s name, such as Chrome and Firefox, obtained from the User-Agent header of an HTTP request. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction userAgentOS The browser’s reported operating system, such as Windows or Linux, that it is running on. This is obtained from the User-Agent header of an HTTP request. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction userAgentVersion The browser’s reported software version, which is obtained from the User-Agent header of an HTTP request. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction BrowserTiming Data source : Browser agent BrowserTiming is a custom event that captures SPA timing data for browser interactions started using the custom createTracer SPA API method. BrowserTiming contains many of the same attributes used by other events, especially AjaxRequest. Attribute name Definition Data types actionText The text of the HTML element that was clicked when a browser interaction started. Added to BrowserInteraction and any AjaxRequest, JavaScriptError and BrowserTiming events that occurred during that interaction. AjaxRequest BrowserTiming JavaScriptError BrowserInteraction appId ID The ID of your application, as recorded by New Relic. PageViewTiming AjaxRequest BrowserTiming PageAction PageView BrowserInteraction appName The name of the application that handled the request as shown in New Relic Browser. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction asn Autonomous System Number: a unique number identifying a group of IP networks that serves the content to the end user. PageViewTiming AjaxRequest BrowserTiming MobileRequest MobileHandledException PageAction MobileRequestError JavaScriptError PageView MobileSession BrowserInteraction Span asnLatitude The latitude of the geographic center of the postal code where the Autonomous System Network is registered. This is not the end user's latitude. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction asnLongitude The longitude of the geographic center of the postal code where the Autonomous System Network is registered. This is not the end user's longitude. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction asnOrganization The organization that owns the Autonomous System Number. Often an ISP, sometimes a private company or institution. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction browserInteractionId ID A unique value generated for each browser interaction captured by the New Relic agent. AjaxRequest BrowserTiming JavaScriptError BrowserInteraction browserInteractionName The name of the interaction. This is either the targetGroupedUrl or the custom name set via the API. AjaxRequest BrowserTiming BrowserInteraction browserTimingName The name of the event. This is taken from the name argument of createTracer. BrowserTiming city The city in which the event occurred, such as Portland or Seattle. PageViewTiming AjaxRequest BrowserTiming MobileHandledException PageAction JavaScriptError PageView MobileSession BrowserInteraction countryCode The country from which the browser initiated the page load. For a list of country codes, see ISO 3166-1 alpha-2. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction deviceType The type of device that loaded the page: mobile, tablet, or desktop. AjaxRequest BrowserTiming JavaScriptError PageView BrowserInteraction eventId ID A value that you can link to multiple BrowserInteraction events so you can view the interactions that occurred surrounding a specific event. For example, you can see the browser interactions that occurred prior to a JS error. AjaxRequest BrowserTiming BrowserInteraction groupedPageURL The grouped URL of the view that made the AJAX request. For example: myapp.com/acct/*/dash. AjaxRequest BrowserTiming jsDuration seconds (s) The total duration, in seconds, spent on JavaScript execution. This attribute is only seen in AjaxRequest data that is tied to BrowserInteraction. This attribute doesn't exist for initial page load events. AjaxRequest BrowserTiming BrowserInteraction pageUrl The URL of the page that was loaded for the PageView. For example: http://www.newrelic.com. This URL does not include query parameters. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView Span parentEventId ID A unique value generated for each interaction with the page. You can use this value to group interactions together. AjaxRequest BrowserTiming JavaScriptError BrowserInteraction regionCode The specific administrative division within a country where the PageView event occurred. In the United States, regions correspond to state codes, such as WA or NY. Outside the United States, a country's regions correspond to numerical codes. In the United States, regions correspond to state codes ; for example, WA or NY. Outside the United States, a country's regions correspond to numerical codes . PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction session A unique identifier for a single session. The session cookie expires when the user closes the browser (for example, they fully exit Chrome.) A new session identifier will be assigned when the user opens up a new instance of the browser. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction Span timeSinceBrowserInteractionStart seconds (s) The time in seconds between the start of the BrowserInteraction and the start of the request. AjaxRequest BrowserTiming timeToLastCallbackEnd seconds (s) The duration, in seconds, from the start of the request (timestamp) to the end of the last callback. This is not just an additive function; the callback time can overlap with the wait time. This attribute is only seen in AjaxRequest data that is tied to BrowserInteraction. AjaxRequest BrowserTiming BrowserInteraction timeToSettle seconds (s) The time, in seconds, from the start of the request to when all resulting callbacks (including callbacks of subsequent AJAX requests) are complete. This attribute is only seen in AjaxRequest data that is tied to BrowserInteraction. AjaxRequest BrowserTiming timeToTracedCallbackStart seconds (s) The time in seconds from the start of the custom tracer until the start of the traced callback. This attribute is unique to the BrowserTiming event. BrowserTiming timestamp The time (date, hour, minute, second) at which the interaction occurred. PageViewTiming AjaxRequest BrowserTiming PageAction PageView BrowserInteraction Span tracedCallbackDuration seconds (s) The duration in seconds of the traced callback. This attribute is unique to the BrowserTiming event. BrowserTiming userAgentName The browser’s name, such as Chrome and Firefox, obtained from the User-Agent header of an HTTP request. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction userAgentOS The browser’s reported operating system, such as Windows or Linux, that it is running on. This is obtained from the User-Agent header of an HTTP request. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction userAgentVersion The browser’s reported software version, which is obtained from the User-Agent header of an HTTP request. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction ContainerSample Data source : Infrastructure This event is reported by the New Relic Infrastructure agent. It collects data from all the Docker containers on the host (which may or may not be running). It includes the container's ID, name, image, image name, and metrics about CPU, memory and networking. We take a snapshot of this data every 15 seconds for each container and package it into this event, which is then sent to New Relic. This data appears on the Containers UI page. Attribute name Definition Data types StorageDataAvailableBytes bytes (B) Data space available in the Storage Driver. Only Device Mapper driver is supported. ContainerSample StorageDataTotalBytes bytes (B) Total Data space in the Storage Driver. Only Device Mapper driver is supported. ContainerSample StorageDataUsagePercent percentage (%) Percent of Data space used in the Storage Driver. Only Device Mapper driver is supported. ContainerSample StorageDataUsedBytes bytes (B) Data space used by the Storage Driver. Only Device Mapper driver is supported. ContainerSample StorageMetadataAvailableBytes bytes (B) Metadata space available in the Storage Driver. Only Device Mapper driver is supported. ContainerSample StorageMetadataTotalBytes bytes (B) Total Metadata space in the Storage Driver. Only Device Mapper driver is supported. ContainerSample StorageMetadataUsagePercent percentage (%) Percent of Metadata space used in the Storage Driver. Only Device Mapper driver is supported. ContainerSample StorageMetadataUsedBytes bytes (B) Metadata space used by the Storage Driver. Only Device Mapper driver is supported. ContainerSample commandLine The command line used in the container. ContainerSample containerId ID The unique Docker container ID. ContainerSample cpuKernelPercent percentage (%) CPU time percentage used in kernel space. ContainerSample cpuLimitCores count Number of cores available for the container. ContainerSample cpuPercent percentage (%) CPU usage percentage used. ContainerSample cpuShares count Number of CPU shares assigned to the container. ContainerSample cpuThrottlePeriods count Total number of periods throttled. ContainerSample cpuThrottleTimeMs milliseconds (ms) Total throttling time in milliseconds. ContainerSample cpuUsedCores percentage (%) CPU usage per core. ContainerSample cpuUsedCoresPercent percentage (%) CPU usage percentage per core. ContainerSample cpuUserPercent percentage (%) CPU time percentage used in user space. ContainerSample criticalViolationCount count The number of times that alert conditions violated critical thresholds, causing critical violations and opening incidents. If this attribute does not exist on the sample, it has zero violations. ProcessSample StorageSample NetworkSample ContainerSample SystemSample image ID The Docker image ID for the image the container is based on. ContainerSample imageName The Docker image name for the container. ContainerSample label_KEY Docker labels associated with this container (where KEY represents a custom label's key value). ContainerSample memoryCacheBytes count The amount of memory used by the container that can be associated precisely with a block on a block device. ContainerSample memoryKernelUsageBytes bytes (B) The amount of current kernel memory allocation. ContainerSample memoryResidentSizeBytes bytes (B) The amount of memory that doesn't correspond to anything on disk: stacks, heaps, and anonymous memory maps. ContainerSample memorySizeLimitBytes bytes (B) The total amount of memory the container is allowed to use. ContainerSample memorySoftLimitBytes bytes (B) The soft limit of memory usage equivalent to the memory reservation of the container. ContainerSample memorySwapLimitBytes bytes (B) The total amount of memory the container is using, including swap. ContainerSample memorySwapLimitUsagePercent percentage (%) This metric is calculated as the percentage of memorySwapUsageBytes over memorySwapLimitBytes, if the limit exists. ContainerSample memorySwapOnlyUsageBytes bytes (B) The amount of swap memory the container is using. This memory doesn't include non-swap memory. ContainerSample memorySwapUsageBytes bytes (B) The amount of memory swap the container is using, including swap. ContainerSample memoryUsageBytes bytes (B) This metric doesn't account for swap usage. ContainerSample memoryUsageLimitPercent percentage (%) This metric is calculated as the memoryUsageBytes percentage over memorySizeLimitBytes, if the limit exists. ContainerSample name The Docker container name. ContainerSample networkRxBytes bytes (B) Total number of received bytes. ContainerSample networkRxBytesPerSecond rate Number of received bytes per second. ContainerSample networkRxDropped count Total number of received packets dropped. ContainerSample networkRxDroppedPerSecond rate Number of received packets dropped per second. ContainerSample networkRxError count Total number of received packets with error. ContainerSample networkRxErrorsPerSecond rate Number of received packets with error per second. ContainerSample networkRxPackets count Total number of received packets. ContainerSample networkRxPacketsPerSecond rate Number of received packets with error per second. ContainerSample networkTxBytesPerSecond rate Number of transmitted bytes per second. ContainerSample networkTxDropped count Total number of transmitted packets dropped. ContainerSample networkTxDroppedPerSecond rate Number of transmitted packets dropped per second. ContainerSample networkTxErrors count Total number of transmitted packets with error. ContainerSample networkTxErrorsPerSecond rate Number of transmitted packets with error per second. ContainerSample networkTxPackets count Total number of transmitted packets. ContainerSample networkTxPacketsPerSecond rate Number of transmitted packets per second. ContainerSample networksTxBytes count Total number of bytes transmitted. ContainerSample restartCount count The number of times the container was restarted. ContainerSample state It can be: created, restarting, running, removing, paused, exited, or dead. ContainerSample status Holds the current container state. ContainerSample warningViolationCount count The number of times that alert conditions violated warning thresholds, causing warning violations. ProcessSample StorageSample NetworkSample ContainerSample SystemSample DistributedTraceSummary Data source : Distributed Tracing This event contains summary data about a distributed trace and provides an aggregated view of distributed tracing data. DistributedTraceSummary events are generated by New Relic and are triggered by distributed tracing data from agents or the Trace API. Attribute name Definition Data types accountIds A comma delimited list of newrelic accountIds that took part in this trace. DistributedTraceSummary backend.duration.ms milliseconds (ms) The total elapsed time in milliseconds of all backend services in this trace. DistributedTraceSummary backend.timestamp milliseconds (ms) The timestamp of the first span in this trace from a backend entity. In distributed tracing, any events that are not from client-side applications contributed to the backend duration. DistributedTraceSummary duration.ms The duration of the entire distributed trace, including both backend and client-side entities. the earliest span to the latest. DistributedTraceSummary entityCount count The number of unique entities that took part in this trace. DistributedTraceSummary entityGuids A comma delimited list of entity GUIDs for entities that participated in this trace. These GUIDs are assigned by New Relic for the New Relic-monitored entity (host, application, etc.). Each GUID is stored as a Base64 encoded value. DistributedTraceSummary errorCount count The number of events in this distributed trace that were identified as errors. DistributedTraceSummary newRelic.traceFilter.type The name of the trace filter used by the Infinite Tracing trace observer to select this trace. DistributedTraceSummary Span root.entity.accountId The New Relic account ID that the root entity of this trace reports data to. DistributedTraceSummary root.entity.guid The entity GUID associated with the root entity of this trace. DistributedTraceSummary root.entity.name The name of the root entity of this trace. DistributedTraceSummary root.span.duration.ms milliseconds (ms) The elapsed time in milliseconds of the root span of this trace. The root of a distributed trace is the first span, and will have a null value for parent.id. DistributedTraceSummary root.span.id The unique identifier of the root span of this trace. The root of a distributed trace is the first span, and it has a null value for parent.id. DistributedTraceSummary root.span.name The name of the root span of this trace. DistributedTraceSummary root.span.timestamp milliseconds (ms) The timestamp of the root span of this trace. The root of a distributed trace is the first span, and will have a null value for parent.id. DistributedTraceSummary spanCount count The number of events in this distributed trace. Events in a distributed trace can have several event types, including Span, Transaction, and TransactionError. DistributedTraceSummary timestamp milliseconds (ms) The timestamp of the root span in this distributed trace. DistributedTraceSummary trace.id ID The unique ID (a randomly generated string) used to identify a single request as it crosses inter- and intra- process boundaries. This ID allows the linking of spans in a distributed trace. Included when distributed tracing is enabled. TransactionError DistributedTraceSummary MobileRequest MobileRequestError Transaction Span InfrastructureEvent Data sources : InfrastructureCloudTrail InfrastructureEvent describes changes (deltas) that occur in a system's live state. When an inventory or system state is added, removed, or changed, Infrastructure will produce an InfrastructureEvent that logs that activity. Attribute name Definition Data types category A New Relic category used to organize events in the UI. For example: automation, notification, and service. InfrastructureEvent changeType A simple classification of the type of change made to the entity: added, modified, or removed. InfrastructureEvent changedPath The fully specified name of the item that changed. This is constructed by taking the source and adding one or more additional path elements that uniquely identify the item that changed. InfrastructureEvent deltaId Delta refers to a recorded change in the system. The deltaId is a number used by New Relic to organize incoming inventory change data. InfrastructureEvent eventId The unique ID of the event, generated by New Relic. InfrastructureEvent format The type of infrastructure event. Each format type includes attributes that may be used to render the event in the UI. InfrastructureEvent newStatus The new agent status: disconnected or connected. InfrastructureEvent newValue If a change is made to the entity, this attribute contains the new value of the inventory that was changed. This will have no value if no change has been made. The value will display approximately 4K bytes of data. InfrastructureEvent oldValue If a change is made to the entity, this attribute contains the old value of the inventory that was changed. This will be blank if no change has been made. InfrastructureEvent provider For integrations that use generic event types (like the DatastoreSample event), the provider value specifies the source of the data (the service, or a sub-category of data from that service). Some Insights events are generic and are used by several integrations. For example, the DatastoreSample event is used by several integrations, including the AWS DynamoDB integration and the AWS RDS integration. In these cases, the provider attribute value represents the source of that attribute. This will usually be the service that data comes from or, for integrations that use several provider values, a certain sub-category of data from that service. When a provider value is present for a generic event, that event will have additional integration-specific attributes attached to it. Here’s an example of an Insights NRQL query that returns the attributes present for a DatastoreSample event reported by the AWS RDS integration: SELECT * from DatastoreSample where provider = 'RdsDbCluster' InfrastructureEvent source The fully specified origin of this inventory item. This is typically in the form category/plugin, where plugin is the generic word used for the tool that gathered this data. InfrastructureEvent summary A summary of the change that happened. Uses a human-friendly string, such as Agent disconnected. InfrastructureEvent violationUpdateType The type of change to the violation: For example: open or closed. InfrastructureEvent JavaScriptError Data source : Browser agent As JavaScript errors are triggered, we capture details as events. The JavaScriptError event contains information to help you segment errors to understand how they impact performance. Attribute name Definition Data types actionText The text of the HTML element that was clicked when a browser interaction started. Added to BrowserInteraction and any AjaxRequest, JavaScriptError and BrowserTiming events that occurred during that interaction. AjaxRequest BrowserTiming JavaScriptError BrowserInteraction appId ID The identification number for the reporting browser agent. JavaScriptError appName The name of the application that handled the request as shown in New Relic Browser. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction asn Autonomous System Number: a unique number identifying a group of IP networks that serves the content to the end user. PageViewTiming AjaxRequest BrowserTiming MobileRequest MobileHandledException PageAction MobileRequestError JavaScriptError PageView MobileSession BrowserInteraction Span asnLatitude The latitude of the geographic center of the postal code where the Autonomous System Network is registered. This is not the end user's latitude. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction asnLongitude The longitude of the geographic center of the postal code where the Autonomous System Network is registered. This is not the end user's longitude. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction asnOrganization The organization that owns the Autonomous System Number. Often an ISP, sometimes a private company or institution. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction browserInteractionId ID A unique value generated for each browser interaction captured by the New Relic agent. AjaxRequest BrowserTiming JavaScriptError BrowserInteraction browserStackHash ID An unique identifier generated for a stack trace. The browserStackHash for a stack trace is different across different browsers. An identical stack trace will generate the same identifier. JavaScriptError city The city in which the event occurred, such as Portland or Seattle. PageViewTiming AjaxRequest BrowserTiming MobileHandledException PageAction JavaScriptError PageView MobileSession BrowserInteraction countryCode The country from which the browser initiated the page load. For a list of country codes, see ISO 3166-1 alpha-2. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction deviceType The type of device that loaded the page: mobile, tablet, or desktop. AjaxRequest BrowserTiming JavaScriptError PageView BrowserInteraction domain The domain portion of the request URL. PageViewTiming JavaScriptError PageView BrowserInteraction entityGuid The unique identifier of the monitor referenced in New Relic One. SyntheticCheck SyntheticRequest JavaScriptError errorClass The error type of the JavaScript Error object. Examples: ReferenceError, SyntaxError, and UncaughtException. JavaScriptError errorMessage The error message that was delivered. JavaScriptError firstErrorInSession A value to indicate whether or not this was the first JS error in the session. Example: true. JavaScriptError monitorAccountId The Synthetics account from which you are running the monitor. JavaScriptError BrowserInteraction monitorId ID A unique number identifying a particular monitor. SyntheticCheck JavaScriptError BrowserInteraction monitorJobId ID The ID of a single Synthetics monitor run, which began at a specific time and originated from a specific location. JavaScriptError BrowserInteraction pageUrl The URL of the page that was loaded for the PageView. For example: http://www.newrelic.com. This URL does not include query parameters. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView Span parentEventId ID A unique value generated for each interaction with the page. You can use this value to group interactions together. AjaxRequest BrowserTiming JavaScriptError BrowserInteraction regionCode The specific administrative division within a country where the PageView event occurred. In the United States, regions correspond to state codes, such as WA or NY. Outside the United States, a country's regions correspond to numerical codes. In the United States, regions correspond to state codes ; for example, WA or NY. Outside the United States, a country's regions correspond to numerical codes . PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction releaseIds ID The releases in which the app was updated. Example: {\\\"jQuery\\\":\\\"v3.1.1\\\",\\\"multiverse\\\":\\\"96e9ac7\\\"}. JavaScriptError requestUri The URI of the requested resource. JavaScriptError session A unique identifier for a single session. The session cookie expires when the user closes the browser (for example, they fully exit Chrome.) A new session identifier will be assigned when the user opens up a new instance of the browser. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction Span stackHash ID A unique identifier generated by the Browser agent for a stack trace. The stackHash for a stack trace is the same across different browsers. An identical stack trace will generate the same identifier. JavaScriptError stackTrace A collection of the active stack frames when the error occurred. JavaScriptError stackTraceGzip A compressed version of the stackTrace attribute. JavaScriptError timestamp The time that the error occurred, in Unix time. JavaScriptError transactionName The full metric name of the transaction in which the error occurred, or Unknown if the error occurs outside of a transaction. JavaScriptError userAgentName The browser’s name, such as Chrome and Firefox, obtained from the User-Agent header of an HTTP request. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction userAgentOS The browser’s reported operating system, such as Windows or Linux, that it is running on. This is obtained from the User-Agent header of an HTTP request. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction userAgentVersion The browser’s reported software version, which is obtained from the User-Agent header of an HTTP request. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction Metric Data source : Metrics Represents a metric data point (a measurement over a range of time, or a sample at a specific point in time) with multiple attributes attached, which allow for in-depth analysis and querying. This metric data comes from our Metric API, our Telemetry SDKs, network performance monitoring, and some of our open-source exporters/integrations. Attribute name Definition Data types endTimestamp milliseconds (ms) The end of the time range associated with the metric, in Unix time, in milliseconds. This is calculated by adding the metric interval to the timestamp of the metric (timestamp + interval.ms). Metric interval.ms milliseconds (ms) The length of the time window. Metric metricName Name of the metric. Metric newrelic.source The source of this data. For example: metricAPI. Metric timestamp milliseconds (ms) The start time for the metric, in Unix time. Metric Mobile Data source : Mobile A Mobile event is created when a crash occurs, when an interaction ends or has run for 1 second, or if a session completes after the app is closed, backgrounded, or has run for 10 minutes. Mobile events were once the only event type and were generated for every event, but now there are several specialized event types. Recommendation: Upgrade to the most recent mobile monitoring agent version to take full advantage of the new event types. Attribute name Definition Data types category The type of data, either session or interaction. MobileSession Mobile interactionDuration For interaction category events only. This is the total time for the interaction to render on the device. In addition to render time, this usually includes all external calls associated with the interaction. Currently, this attribute is measured in seconds for Android devices and in milliseconds for iOS devices. Mobile name For interaction category events only. This is the label of the interaction associated with the event. It is by default assigned by New Relic. For example: ApplicationsListFragment or Display iOS_Example.MasterViewController. Mobile reportedTimestampMs For interaction category events only. The UTC based timestamp for when the event was sent to New Relic. This is different from the attribute ‘timestamp’, which is when the event began. Mobile MobileCrash Data source : Mobile The MobileCrash event is created when an app crashes. MobileCrash includes attributes such as crash line number, class, and crash message. Attribute name Definition Data types appBuild Indicates the technical build number of the app binary. As a developer, you can use this attribute to identify specific builds of your app. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession appId ID A unique identifier for a monitored app, based on the app token. For example: 35091. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession appName The name of the monitored app. For example: My Mobile App - iOS. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession appToken The mobile application license token. MobileCrash appVersion The version number of the monitored app. For example: 2.2.9. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession appVersionId ID An identifier for the specific version of the app. For example: 1713477. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession architecture The processor architecture of the device. For example: armv7 or arm64. MobileCrash asnOwner The telecom owner of the ASN. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession bundleId ID The unique string used to identify the application. MobileCrash MobileSession carrier The network over which the app transferred data, such as Wi-Fi, Verizon, or Sprint. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession crashException The exception associated with the crash, if one is present. For example: java.lang.NullPointerException. MobileCrash crashFingerprint ID The New Relic-generated fingerprint used to uniquely identify the crash and other crashes identical to this one. MobileCrash crashLocationFile The file in which the crash occurred. MobileCrash crashMessage The message associated with the crash, if one is present. MobileCrash deviceManufacturer The manufacturer of the device, such as Motorola or HTC. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession deviceModel The model number of the device, such as XT1039 or SM-G900F. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession deviceName The device's name. MobileCrash deviceUuid ID A unique identifier assigned at the time of app installation by New Relic. It is only reset if a user deletes and then reinstalls the app. For example: B8B0BC30-0235-11E4-9191-0800200C9A66. Dupliate of uuid. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession diskAvailable bytes (B) Space available on the device, in bytes. MobileCrash interactionHistory The client interactions with the application that led to the crash. MobileCrash isFirstOccurrence A boolean value indicating whether or not this was the first occurrence of the crash. MobileCrash lastInteraction The last interaction before a crash or harvest event, if one is present. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession memUsageMb megabytes (MB) The total amount of memory, in MB, used by the application. Updated every 60 seconds. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession modelNumber The model of the device. This is the same as the session-level deviceModel attribute. MobileCrash networkStatus The type of network that the device was on at the time of crash, such as wifi or LTE. MobileCrash newRelicVersion The version number of the agent running on the application. For example: 4.232.0. Duplicate of newRelicAgentVersion. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession occurrenceId ID The ID for this instance of the crash. MobileCrash orientation The orientation of the device, such as landscape or portrait. MobileCrash osBuild For Android only. The specific build of the Android OS. MobileCrash MobileHandledException osMajorVersion The simplified version number of the app's host operating system, such as iOS 11, as compared to iOS 11.0.4. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession osName The name of the app's host operating system, for example, iOS or Android. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession osVersion The exact version number of the app's host operating system, such as iOS 11.0.4, as compared to iOS 11. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession parentProcess The parent process that launched the crashing process. MobileCrash parentProcessId ID The parent identification number (PID) of the parent process. MobileCrash platform The platform type of the mobile monitoring agent, such as native or Cordova. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession processId ID The PID of the previously running process on the device. MobileCrash processName The name of the previously running process. MobileCrash processPath The path to the binary. MobileCrash reportedTimestampMs The UTC timestamp for when the event was received by New Relic. (This is different from timestamp, which is when the MobileSession event began that crashed.) MobileCrash runTime For Android only. The Android Runtime version where the exception/crash was generated. MobileCrash MobileHandledException sessionCrashed A boolean value indicating whether or not that session crashed. MobileCrash MobileSession sessionId ID A unique identifier for a single user session. A new sessionId is created each time the app is brought into the foreground. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession symbolicated A boolean value indicating whether or not the crash was properly symbolicated. MobileCrash timeSinceLastInteraction milliseconds (ms) The time, in milliseconds, since the interaction before a crash event. MobileCrash userImageUuids ID The array of build UUIDs for applications and libraries. MobileCrash uuid ID A unique identifier assigned by New Relic for a specific app on a particular device. It is only reset if a user deletes and then reinstalls the app. For example: B8B0BC30-0235-11E4-9191-0800200C9A66. Dupliate of deviceUuid. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession MobileHandledException Data source : Mobile MobileHandledException is sent when an exception is caught and is used for non-fatal exceptions reported to New Relic using the recordHandledException API for Android or iOS. Attribute name Definition Data types appBuild Indicates the technical build number of the app binary. As a developer, you can use this attribute to identify specific builds of your app. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession appId ID A unique identifier for a monitored app, based on the app token. For example: 35091. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession appName The name of the monitored app. For example: My Mobile App - iOS. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession appVersion The version number of the monitored app. For example: 2.2.9. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession appVersionId ID An identifier for the specific version of the app. For example: 1713477. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession asn Autonomous System Number: a unique number identifying a group of IP networks that serves the content to the end user. PageViewTiming AjaxRequest BrowserTiming MobileRequest MobileHandledException PageAction MobileRequestError JavaScriptError PageView MobileSession BrowserInteraction Span asnOwner The telecom owner of the ASN. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession carrier The network over which the app transferred data, such as Wi-Fi, Verizon, or Sprint. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession city The city in which the event occurred, such as Portland or Seattle. PageViewTiming AjaxRequest BrowserTiming MobileHandledException PageAction JavaScriptError PageView MobileSession BrowserInteraction countryCode The country from which the device ran the application. For a list of country codes, see ISO 3166-1 alpha-2. MobileRequest MobileHandledException MobileRequestError MobileSession device The specific type of the device: iPhone 8, iPad Pro, etc. Duplicate of deviceType. MobileRequest MobileHandledException MobileRequestError MobileSession deviceManufacturer The manufacturer of the device, such as Motorola or HTC. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession deviceModel The model number of the device, such as XT1039 or SM-G900F. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession deviceType The specific type of device: iPhone 8, iPad Pro, etc. Duplicate of device. MobileRequest MobileHandledException MobileRequestError MobileSession deviceUuid ID A unique identifier assigned at the time of app installation by New Relic. It is only reset if a user deletes and then reinstalls the app. For example: B8B0BC30-0235-11E4-9191-0800200C9A66. Dupliate of uuid. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession exceptionAppBuildUuid ID The build uuid of the application binary in which the exception was caught. MobileHandledException exceptionCause The unsymbolicated, platform-specific cause of the exception. MobileHandledException exceptionLocation New Relic defined location of an exception. Contains a combination of exception file name, class, line number, and method. MobileHandledException exceptionLocationClass The class that generated the exception. Only present if symbolication succeeded. MobileHandledException exceptionLocationFile The class that generated the exception. Only present if symbolication succeeded. MobileHandledException exceptionLocationLibraryOffset For XCFramework agent only. The library offset of the library that generated the exception. Only present if symbolication succeeded. MobileHandledException exceptionLocationLine Comes from the exception: The line number where the exception was generated. Only present if symbolication succeeded. MobileHandledException exceptionLocationMethod The method that generated the exception. Only present if symbolication succeeded. MobileHandledException exceptionMessage The unsymbolicated message from the exception. It can be user-generated or a generic system message. For Android, this is the Throwable message. MobileHandledException exceptionName The unsymbolicated exception type. MobileHandledException fingerprint ID The New Relic-generated identifier used to group like exceptions. MobileHandledException handledExceptionUuid ID The unique ID of the exception event. MobileHandledException lastInteraction The last interaction before a crash or harvest event, if one is present. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession libraryName For XCFramework agent only. The library name where the exception was generated. MobileHandledException libraryStartAddr For XCFramework agent only. The library start address where the exception was generated. MobileHandledException memUsageMb megabytes (MB) The total amount of memory, in MB, used by the application. Updated every 60 seconds. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession newRelicVersion The version number of the agent running on the application. For example: 4.232.0. Duplicate of newRelicAgentVersion. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession occurrenceTimestamp Agent-reported epoch timestamp of the handled exception. MobileHandledException osBuild For Android only. The specific build of the Android OS. MobileCrash MobileHandledException osMajorVersion The simplified version number of the app's host operating system, such as iOS 11, as compared to iOS 11.0.4. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession osName The name of the app's host operating system, for example, iOS or Android. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession osVersion The exact version number of the app's host operating system, such as iOS 11.0.4, as compared to iOS 11. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession platform The platform type of the mobile monitoring agent, such as native or Cordova. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession regionCode The specific region within a country where the monitored app is located. In the United States, regions are states. The regionCode is based on IP address and may not always match your region. MobileRequest MobileHandledException MobileRequestError MobileSession runTime For Android only. The Android Runtime version where the exception/crash was generated. MobileCrash MobileHandledException sessionId ID A unique identifier for a single user session. A new sessionId is created each time the app is brought into the foreground. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession timestamp Epoch timestamp of the handled exception. This exception timestamp represents the time New Relic created the event, if it's older than two days or some other unexpected time. MobileHandledException uuid ID A unique identifier assigned by New Relic for a specific app on a particular device. It is only reset if a user deletes and then reinstalls the app. For example: B8B0BC30-0235-11E4-9191-0800200C9A66. Dupliate of deviceUuid. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession MobileRequest Data source : Mobile A MobileRequest event is created when an HTTP request successfully completes, resulting in a response code below 400. Attribute name Definition Data types appBuild Indicates the technical build number of the app binary. As a developer, you can use this attribute to identify specific builds of your app. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession appId ID A unique identifier for a monitored app, based on the app token. For example: 35091. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession appName The name of the monitored app. For example: My Mobile App - iOS. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession appVersion The version number of the monitored app. For example: 2.2.9. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession appVersionId ID An identifier for the specific version of the app. For example: 1713477. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession asn Autonomous System Number: a unique number identifying a group of IP networks that serves the content to the end user. PageViewTiming AjaxRequest BrowserTiming MobileRequest MobileHandledException PageAction MobileRequestError JavaScriptError PageView MobileSession BrowserInteraction Span asnOwner The telecom owner of the ASN. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession bytesReceived bytes (B) Optional: If the application received a response from the requestUrl, the size of that response in bytes. MobileRequest MobileRequestError bytesSent bytes (B) Optional: If the application sent a request to the requestUrl, the size of that request in bytes. MobileRequest MobileRequestError carrier The network over which the app transferred data, such as Wi-Fi, Verizon, or Sprint. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession connectionType The type of connection which the device was using, such as 2G or 3G. MobileRequest MobileRequestError countryCode The country from which the device ran the application. For a list of country codes, see ISO 3166-1 alpha-2. MobileRequest MobileHandledException MobileRequestError MobileSession device The specific type of the device: iPhone 8, iPad Pro, etc. Duplicate of deviceType. MobileRequest MobileHandledException MobileRequestError MobileSession deviceGroup The category of the device, such as iPhone or Tablet. MobileRequest MobileRequestError MobileSession deviceManufacturer The manufacturer of the device, such as Motorola or HTC. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession deviceModel The model number of the device, such as XT1039 or SM-G900F. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession deviceSize The display size of the device: Small, normal, large, xlarge. MobileRequest MobileRequestError deviceType The specific type of device: iPhone 8, iPad Pro, etc. Duplicate of device. MobileRequest MobileHandledException MobileRequestError MobileSession deviceUuid ID A unique identifier assigned at the time of app installation by New Relic. It is only reset if a user deletes and then reinstalls the app. For example: B8B0BC30-0235-11E4-9191-0800200C9A66. Dupliate of uuid. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession duration seconds (s) Optional: The time to complete the request, measured in fractional seconds. MobileRequest MobileRequestError guid ID The unique identifier for the segment. This is equivalent to spanID in OpenTracing semantics. MobileRequest MobileRequestError Span lastInteraction The last interaction before a crash or harvest event, if one is present. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession memUsageMb megabytes (MB) The total amount of memory, in MB, used by the application. Updated every 60 seconds. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession newRelicVersion The version number of the agent running on the application. For example: 4.232.0. Duplicate of newRelicAgentVersion. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession osMajorVersion The simplified version number of the app's host operating system, such as iOS 11, as compared to iOS 11.0.4. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession osName The name of the app's host operating system, for example, iOS or Android. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession osVersion The exact version number of the app's host operating system, such as iOS 11.0.4, as compared to iOS 11. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession platform The platform type of the mobile monitoring agent, such as native or Cordova. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession regionCode The specific region within a country where the monitored app is located. In the United States, regions are states. The regionCode is based on IP address and may not always match your region. MobileRequest MobileHandledException MobileRequestError MobileSession requestDomain The domain that the application attempted to access when the event occurred. MobileRequest MobileRequestError requestFingerprint ID The New Relic-generated identifier used to group like request events. MobileRequest requestMethod The REST method (GET, PUT, POST, etc.) that the application attempted when the event occurred. MobileRequest MobileRequestError requestPath The path that the application attempted to access when the event occurred. MobileRequest MobileRequestError requestUrl The URL that the application attempted to access when the event occurred. MobileRequest MobileRequestError requestUuid ID A unique identifer for the request event. MobileRequest MobileRequestError responseTime seconds (s) The time between the request and the response in fractional seconds. MobileRequest MobileRequestError sessionId ID A unique identifier for a single user session. A new sessionId is created each time the app is brought into the foreground. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession statusCode Optional: The HTTP status code for the HTTP event. MobileRequest MobileRequestError timestamp The UTC epoch time at which an event began. MobileRequest MobileRequestError MobileSession trace.id ID The unique ID (a randomly generated string) used to identify a single request as it crosses inter- and intra- process boundaries. This ID allows the linking of spans in a distributed trace. Included when distributed tracing is enabled. TransactionError DistributedTraceSummary MobileRequest MobileRequestError Transaction Span traceId ID The unique ID (a randomly generated string) used to identify a single request as it crosses inter- and intra- process boundaries. This ID allows the linking of spans in a distributed trace. Included when distributed tracing is enabled. MobileRequest AwsLambdaInvocation MobileRequestError AwsLambdaInvocationError Span uuid ID A unique identifier assigned by New Relic for a specific app on a particular device. It is only reset if a user deletes and then reinstalls the app. For example: B8B0BC30-0235-11E4-9191-0800200C9A66. Dupliate of deviceUuid. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession MobileRequestError Data source : Mobile A MobileRequestError is used for HTTP errors or network failures. HTTP errors are HTTP requests that have a status code greater than 400. A network failure is a HTTP request that results in no response. The event is sent when the HTTP request completes. Attribute name Definition Data types appBuild Indicates the technical build number of the app binary. As a developer, you can use this attribute to identify specific builds of your app. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession appId ID A unique identifier for a monitored app, based on the app token. For example: 35091. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession appName The name of the monitored app. For example: My Mobile App - iOS. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession appVersion The version number of the monitored app. For example: 2.2.9. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession appVersionId ID An identifier for the specific version of the app. For example: 1713477. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession asn Autonomous System Number: a unique number identifying a group of IP networks that serves the content to the end user. PageViewTiming AjaxRequest BrowserTiming MobileRequest MobileHandledException PageAction MobileRequestError JavaScriptError PageView MobileSession BrowserInteraction Span asnOwner The telecom owner of the ASN. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession bytesReceived bytes (B) Optional: If the application received a response from the requestUrl, the size of that response in bytes. MobileRequest MobileRequestError bytesSent bytes (B) Optional: If the application sent a request to the requestUrl, the size of that request in bytes. MobileRequest MobileRequestError carrier The network over which the app transferred data, such as Wi-Fi, Verizon, or Sprint. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession connectionType The type of connection which the device was using, such as 2G or 3G. MobileRequest MobileRequestError countryCode The country from which the device ran the application. For a list of country codes, see ISO 3166-1 alpha-2. MobileRequest MobileHandledException MobileRequestError MobileSession device The specific type of the device: iPhone 8, iPad Pro, etc. Duplicate of deviceType. MobileRequest MobileHandledException MobileRequestError MobileSession deviceGroup The category of the device, such as iPhone or Tablet. MobileRequest MobileRequestError MobileSession deviceManufacturer The manufacturer of the device, such as Motorola or HTC. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession deviceModel The model number of the device, such as XT1039 or SM-G900F. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession deviceSize The display size of the device: Small, normal, large, xlarge. MobileRequest MobileRequestError deviceType The specific type of device: iPhone 8, iPad Pro, etc. Duplicate of device. MobileRequest MobileHandledException MobileRequestError MobileSession deviceUuid ID A unique identifier assigned at the time of app installation by New Relic. It is only reset if a user deletes and then reinstalls the app. For example: B8B0BC30-0235-11E4-9191-0800200C9A66. Dupliate of uuid. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession duration seconds (s) Optional: The time to complete the request, measured in fractional seconds. MobileRequest MobileRequestError errorType Either HTTPError or NetworkFailure, depending on whether the error is a result of a failed request to a host or a failure on the cellular network. MobileRequestError guid ID The unique identifier for the segment. This is equivalent to spanID in OpenTracing semantics. MobileRequest MobileRequestError Span lastInteraction The last interaction before a crash or harvest event, if one is present. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession memUsageMb megabytes (MB) The total amount of memory, in MB, used by the application. Updated every 60 seconds. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession networkError The error message associated with the iOS NSURL Error code. See networkErrorCode for more information. MobileRequestError networkErrorCode If the error is a network error, this is the iOS network error code. For Android applications, this is the mapped value. MobileRequestError newRelicVersion The version number of the agent running on the application. For example: 4.232.0. Duplicate of newRelicAgentVersion. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession osMajorVersion The simplified version number of the app's host operating system, such as iOS 11, as compared to iOS 11.0.4. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession osName The name of the app's host operating system, for example, iOS or Android. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession osVersion The exact version number of the app's host operating system, such as iOS 11.0.4, as compared to iOS 11. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession platform The platform type of the mobile monitoring agent, such as native or Cordova. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession regionCode The specific region within a country where the monitored app is located. In the United States, regions are states. The regionCode is based on IP address and may not always match your region. MobileRequest MobileHandledException MobileRequestError MobileSession requestDomain The domain that the application attempted to access when the event occurred. MobileRequest MobileRequestError requestErrorFingerprint ID The New Relic-generated identifier used to group like request error events. MobileRequestError requestMethod The REST method (GET, PUT, POST, etc.) that the application attempted when the event occurred. MobileRequest MobileRequestError requestPath The path that the application attempted to access when the event occurred. MobileRequest MobileRequestError requestUrl The URL that the application attempted to access when the event occurred. MobileRequest MobileRequestError requestUuid ID A unique identifer for the request event. MobileRequest MobileRequestError responseBody Optional: The response that is sent from the requestDomain for the HTTP error, up to 4096 bytes. MobileRequestError responseTime seconds (s) The time between the request and the response in fractional seconds. MobileRequest MobileRequestError sessionId ID A unique identifier for a single user session. A new sessionId is created each time the app is brought into the foreground. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession statusCode Optional: The HTTP status code for the HTTP event. MobileRequest MobileRequestError timestamp The UTC epoch time at which an event began. MobileRequest MobileRequestError MobileSession trace.id ID The unique ID (a randomly generated string) used to identify a single request as it crosses inter- and intra- process boundaries. This ID allows the linking of spans in a distributed trace. Included when distributed tracing is enabled. TransactionError DistributedTraceSummary MobileRequest MobileRequestError Transaction Span traceId ID The unique ID (a randomly generated string) used to identify a single request as it crosses inter- and intra- process boundaries. This ID allows the linking of spans in a distributed trace. Included when distributed tracing is enabled. MobileRequest AwsLambdaInvocation MobileRequestError AwsLambdaInvocationError Span uuid ID A unique identifier assigned by New Relic for a specific app on a particular device. It is only reset if a user deletes and then reinstalls the app. For example: B8B0BC30-0235-11E4-9191-0800200C9A66. Dupliate of deviceUuid. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession MobileSession Data source : Mobile A MobileSession event is sent when an app is closed, backgrounded, or when 10 minutes of active use has elapsed. This is the source of the general session data used by the other mobile monitoring events. MobileSession captures attributes such as device type, device OS, and geographical information. Attribute name Definition Data types appBuild Indicates the technical build number of the app binary. As a developer, you can use this attribute to identify specific builds of your app. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession appId ID A unique identifier for a monitored app, based on the app token. For example: 35091. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession appName The name of the monitored app. For example: My Mobile App - iOS. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession appVersion The version number of the monitored app. For example: 2.2.9. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession appVersionId ID An identifier for the specific version of the app. For example: 1713477. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession asn Autonomous System Number: a unique number identifying a group of IP networks that serves the content to the end user. PageViewTiming AjaxRequest BrowserTiming MobileRequest MobileHandledException PageAction MobileRequestError JavaScriptError PageView MobileSession BrowserInteraction Span asnOwner The telecom owner of the ASN. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession bundleId ID The unique string used to identify the application. MobileCrash MobileSession carrier The network over which the app transferred data, such as Wi-Fi, Verizon, or Sprint. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession category The type of data, either session or interaction. MobileSession Mobile city The city in which the event occurred, such as Portland or Seattle. PageViewTiming AjaxRequest BrowserTiming MobileHandledException PageAction JavaScriptError PageView MobileSession BrowserInteraction countryCode The country from which the device ran the application. For a list of country codes, see ISO 3166-1 alpha-2. MobileRequest MobileHandledException MobileRequestError MobileSession device The specific type of the device: iPhone 8, iPad Pro, etc. Duplicate of deviceType. MobileRequest MobileHandledException MobileRequestError MobileSession deviceGroup The category of the device, such as iPhone or Tablet. MobileRequest MobileRequestError MobileSession deviceManufacturer The manufacturer of the device, such as Motorola or HTC. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession deviceModel The model number of the device, such as XT1039 or SM-G900F. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession deviceType The specific type of device: iPhone 8, iPad Pro, etc. Duplicate of device. MobileRequest MobileHandledException MobileRequestError MobileSession deviceUuid ID A unique identifier assigned at the time of app installation by New Relic. It is only reset if a user deletes and then reinstalls the app. For example: B8B0BC30-0235-11E4-9191-0800200C9A66. Dupliate of uuid. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession install Indicates true only if the current session is the first session after app install. MobileSession lastInteraction The last interaction before a crash or harvest event, if one is present. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession memUsageMb megabytes (MB) The total amount of memory, in MB, used by the application. Updated every 60 seconds. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession newRelicAgent The New Relic agent running on the application. For example: the iOSAgent or the androidAgent. MobileSession newRelicVersion The version number of the agent running on the application. For example: 4.232.0. Duplicate of newRelicAgentVersion. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession osMajorVersion The simplified version number of the app's host operating system, such as iOS 11, as compared to iOS 11.0.4. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession osName The name of the app's host operating system, for example, iOS or Android. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession osVersion The exact version number of the app's host operating system, such as iOS 11.0.4, as compared to iOS 11. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession platform The platform type of the mobile monitoring agent, such as native or Cordova. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession regionCode The specific region within a country where the monitored app is located. In the United States, regions are states. The regionCode is based on IP address and may not always match your region. MobileRequest MobileHandledException MobileRequestError MobileSession sessionCrashed A boolean value indicating whether or not that session crashed. MobileCrash MobileSession sessionDuration seconds (s) The length of time for which the user used the application in seconds. If the session crashes, sessionDuration is not captured (although other events and attributes are still recorded). For sessions longer than 10 minutes, events in the Interaction and Custom event categories are sent to Insights while the session is ongoing, and therefore do not have sessionDuration attributes. Events recorded near the end of the session will include the duration, as will the Session event category. MobileSession sessionId ID A unique identifier for a single user session. A new sessionId is created each time the app is brought into the foreground. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession timeSinceLoad seconds (s) The time, in seconds, from the beginning of the mobile session to the time the event occurred. MobileSession timestamp The UTC epoch time at which an event began. MobileRequest MobileRequestError MobileSession upgradeFrom Indictates previous version number only if this is the first launch after app upgrade. MobileSession uuid ID A unique identifier assigned by New Relic for a specific app on a particular device. It is only reset if a user deletes and then reinstalls the app. For example: B8B0BC30-0235-11E4-9191-0800200C9A66. Dupliate of deviceUuid. MobileRequest MobileCrash MobileHandledException MobileRequestError MobileSession NetworkSample Data source : Infrastructure NetworkSample event captures the descriptive and state information for each network device associated with a server. It includes the device's interface and address information, as well as current usage data. New Relic samples this data every 10 seconds for each attached network interface and packages it into a NetworkSample event, then sends the raw data to New Relic's collectors every 60 seconds. Attribute name Definition Data types agentName The name of the agent (Infrastructure). ProcessSample StorageSample NetworkSample SystemSample agentVersion The version of the New Relic Infrastructure agent. ProcessSample StorageSample NetworkSample SystemSample criticalViolationCount count The number of times that alert conditions violated critical thresholds, causing critical violations and opening incidents. If this attribute does not exist on the sample, it has zero violations. ProcessSample StorageSample NetworkSample ContainerSample SystemSample entityID ID New Relic's unique ID number for the entity that is reporting data. This is used by New Relic to distinguish between customers and their entities. ProcessSample StorageSample NetworkSample SystemSample fullHostname The fully qualified (DNS) hostname. ProcessSample StorageSample NetworkSample SystemSample hardwareAddress The unique hardware address of the interface. NetworkSample hostname The short version of the entity's name. ProcessSample StorageSample NetworkSample SystemSample interfaceName The interface name ", + "body": "One of the ways to report custom data to New Relic is with custom events and attributes. Have questions about why you'd use custom data? See Introduction to custom data. Requirements For event and attribute formatting requirements and best practices, see our documentation about data limits and requirements. Avoid rate limits Reporting a large number of custom events and/or attributes can cause degraded query performance. It may also result in approaching or passing data collection rate limits. For optimal performance, first think about what data you want to analyze, and then create only the events and/or attributes necessary to meet these specific goals. Be aware of the following data and subscription requirements for inserting and accessing custom data: Ensure you follow limits and requirements around event/attribute data types, naming syntax, and size. The amount of data you have access to over time depends on your data retention policy. Example use cases Two popular custom data solutions are custom events and custom attributes. There are several ways to accomplish this (more on that later in this doc), depending on your New Relic implementation and tools. Here are some common use cases for implementing custom events and attributes. Using custom attributes Custom attributes are often used to add important business and operational context to existing events. Business context might include: Customer token Customer market segment Customer value classification Workflow control values not obvious in the URIStem User/product/account privilege context Operational context might include: Which feature flags were used What datastore was accessed What cache was accessed What errors were detected and ignored (fault partitioning) Using custom events Event data is one of New Relic's four core data types. We recommend reading that definition to understand what we mean by \"event\" and why that data type is most used for reporting specific types of activity. The use cases for custom events vary widely. Basically they are used for any type of activity that an organization deems important and that is not already being monitored. For example: An event can represent an activity involving multiple actions, like a customer purchasing a certain combination of products. An event can record backup activity. For example, you can set up reporting of events that represent production backups of SOLR instances into an event table, with a timestamp of when it occurred, which cluster, and the duration. Send custom events and attributes Methods for sending custom events and attributes include: Source How to send custom data APM agent Use APM agent APIs to report custom events and custom attributes. Browser monitoring agent Add custom attributes to the PageView event via the browser API call setCustomAttribute. Send PageAction event and attributes via the browser API. Forward APM agent custom attributes to the PageView event. Event API To report custom events not associated with other New Relic features, use the Event API. Infrastructure monitoring agent Add custom attributes to default infrastructure events. Use the Flex integration tool to report your own custom event data. Mobile monitoring agent Use the mobile agent API to send custom events and attributes. Synthetic monitoring Add custom attributes to the SyntheticCheck event via the $util.insights tools. For ways to report other types of custom data, see: Metric API Log API Trace API Extend data retention To learn how to extend how long events are retained in your account, see our documentation about data retention.", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 100.073204, + "_score": 117.528824, "_version": null, "_explanation": null, "sort": null, "highlight": { - "sections": "BrowserInteraction", - "body": " request. PageViewTiming AjaxRequest BrowserTiming PageAction JavaScriptError PageView BrowserInteraction ContainerSample Data source : Infrastructure This event is reported by the New Relic Infrastructure agent. It collects data from all the Docker containers on the host (which may or may not be running" + "body": " with other New Relic features, use the Event API. Infrastructure monitoring agent Add custom attributes to default infrastructure events. Use the Flex integration tool to report your own custom event data. Mobile monitoring agent Use the mobile agent API to send custom events and attributes. Synthetic" }, - "id": "603f53b164441f41894e8875" + "id": "609fa5fb64441f9ebfd2a1db" + }, + { + "sections": [ + "On-host integration executable file: JSON specifications", + "Executable file requirements", + "File placement", + "Integration protocol v4: Example JSON output", + "Integration protocol v3: Example JSON output", + "JSON: General specifications", + "General output and JSON formatting", + "Errors and logging", + "Exit/close of executable", + "JSON: Header", + "JSON: Entities", + "Loopback address replacement on entity names", + "JSON: Metric, inventory, and event data", + "Important", + "Metric data", + "Event data", + "Inventory data" + ], + "title": "On-host integration executable file: JSON specifications", + "type": "docs", + "tags": [ + "Specifications", + "Infrastructure Integrations SDK", + "Create integrations" + ], + "external_id": "f8a03eb3e346e4b403c4f5bea5228ce4ea69dfe9", + "image": "https://docs.newrelic.com/static/de6d60d8375ae15068eea2d7d28b9e3f/ade6e/new-relic-integrations-sdk-data-structure.png", + "url": "https://docs.newrelic.com/docs/infrastructure/host-integrations/infrastructure-integrations-sdk/specifications/host-integration-executable-file-json-specifications/", + "published_at": "2022-02-14T14:57:37Z", + "updated_at": "2022-01-17T18:20:50Z", + "document_type": "page", + "popularity": 1, + "body": "When using our Integrations SDK for infrastructure monitoring to build a custom on-host integration, the integration will consist of at least three files: an executable file and at least one configuration file. The executable file generates JSON data that is consumed by the infrastructure monitoring agent and sent to New Relic. We refer to the JSON object as the SDK integration protocol. Executable file requirements The executable can be any file that runs from a command-line interface; for example: A shell script A scripting language script A compiled binary The only requirement of your executable file is that it exports JSON data, in a single line format, that meets the specifications in this document. Recommendation: Use Go to create integrations; it's the language we use to create on-host integrations and the integration building tools. However, you can create an integration in any language. File placement The executable file goes in this directory: Linux: /var/db/newrelic-infra/custom-integrations Copy Windows: C:\\Program Files\\New Relic\\newrelic-infra\\newrelic-integrations Copy Integration protocol v4: Example JSON output The following section explains the new JSON schema (integration protocol v4). The SDK v4 only supports this new protocol version. These are the most important changes: A new integration object at the top level. The entity and metrics objects have been modified. See the v3 to v4 migration guide for more information. { \"protocol_version\":\"4\", # protocol version number \"integration\":{ # this data will be added to all metrics and events as attributes, # and also sent as inventory \"name\":\"integration name\", \"version\":\"integration version\" }, \"data\":[ # List of objects containing entities, metrics, events and inventory { \"entity\":{ # this object is optional. If it's not provided, then the Entity will get # the same entity ID as the agent that executes the integration. \"name\":\"redis:192.168.100.200:1234\", # unique entity name per customer account \"type\":\"RedisInstance\", # entity's category \"displayName\":\"my redis instance\", # human readable name \"metadata\":{} # can hold general metadata or tags. Both are key-value pairs that will # be also added as attributes to all metrics and events }, \"metrics\":[ # list of metrics using the dimensional metric format { \"name\":\"redis.metric1\", \"type\":\"count\", # gauge, count, summary, cumulative-count, rate or cumulative-rate \"value\":93, \"attributes\":{} # set of key-value pairs that define the dimensions of the metric } ], \"inventory\":{...}, # Inventory remains the same \"events\":[...] # Events remain the same } ] } Copy Integration protocol v3: Example JSON output The JSON includes: A header, with basic integration data (name, version) A data list, which includes one or more entities reporting data (metric, inventory, and/or event data) This diagram shows this structure: Here is an example JSON output (formatted with line breaks for readability). Definitions and specifications follow this example: { \"name\": \"my.company.integration\", \"protocol_version\": \"3\", \"integration_version\": \"x.y.z\", \"data\": [ { \"entity\": { \"name\": \"my_garage\", \"type\": \"building\", \"id_attributes\": [ { \"key\": \"environment\", \"value\": \"production\" }, { \"key\": \"node\", \"value\": \"master\" } ] }, \"metrics\": [ { \"temperature\": 25.3, \"humidity\": 0.45, \"displayName\": \"my_garage\", \"entityName\": \"building:my_garage\", \"event_type\": \"BuildingStatus\" } ], \"inventory\": { \"out_door\": { \"status\": \"open\" } }, \"events\": [] }, { \"entity\": { \"name\": \"my_family_car\", \"type\": \"car\", \"id_attributes\": [ { \"key\": \"environment\", \"value\": \"production\" }, { \"key\": \"node\", \"value\": \"master\" } ] }, \"metrics\": [ { \"speed\": 95, \"fuel\": 768, \"displayName\": \"my_family_car\", \"entityName\": \"car:my_family_car\", \"event_type\": \"VehicleStatus\" } ], \"inventory\": { \"motor\": { \"brand\": \"renault\", \"cc\": 1800 } }, \"events\": [ { \"category\": \"gear\", \"summary\": \"gear has been changed\" } ], \"add_hostname\": true } ] } Copy JSON: General specifications Here are general specifications for the JSON output: General output and JSON formatting Data is emitted to stdout (standard output) in JSON format. The agent will treat stdout and stderr file descriptors as line-wise buffers. Use standard JSON, not \"pretty printed\" JSON, for the output. Recommendation: Include an optional command line switch (for example, --pretty) to make JSON \"pretty printed\" for debugging purposes. Errors and logging Error and debug information must be emitted to stderr (standard error). Follow New Relic's recommendations and best practices for integration logging. Exit/close of executable The exit code must exit with a 0 status code and follow platform-specific conventions. For example: Linux: 0 == EX_OK Windows: 0 == ERROR_SUCCESS If the executable exits with a non-zero status, the agent will discard any data from stdout and write a message to its log file with the name of the integration, the exit code, and any diagnostic information it can gather. JSON: Header Here's an example of the first part of an on-host integration's JSON output: \"name\":\"com.myorg.nginx\", \"protocol_version\":\"3\", \"integration_version\":\"1.0.0\", \"data\": [ {entities}...] Copy A minimal payload would be a JSON object with only the header fields. Recommendation: If there is no data to collect, use the program return code and log messages written to stderr. JSON header fields Description name Required. Must be identical to the name field in the configuration file. Recommendation: Use reverse domain names to generate unique integration names. protocol_version Required. The version number of the exchange protocol between the integration and the agent that the integration executable is using. The current version is 3. This protocol requires Infrastructure agent 1.2.25 or higher. Protocol 2 requires Infrastructure agent 1.0.859 or higher. Protocol 1 is compatible with all agents. For more information, see SDK changes. integration_version Optional. The integration version. Used to track the integration version running on each host. An integration can have more than one executable. Therefore this is not simply the executable's version. data Required for reporting data. A list containing the data reported from one or more entities. JSON: Entities Inside the data list of the JSON output are one or more entities. The entity entry fields include: Entity JSON fields Description entity Required. Entity data or properties. metrics Optional. Entity related metric list. inventory Optional. Entity related inventory items. events Optional. Entity related event list. add_hostname Optional. Boolean. If true, the entity metrics will be decorated with the hostname. Inside the data list of the JSON output are one or more entities and their data. The entity entry has two fields: Entity data JSON fields Description name Required. The identifier/name of the entity. Recommendation: Use reverse domain names to generate unique integration names. type Required. The kind of entity. It will be used by the Infrastructure agent as a namespace to compose a unique identifier in conjunction with the name. id_attributes Optional. A list of key-value attributes that provide uniqueness to an entity. They are attached to the name in the form of key=value to ease readability, provide extra information, and improve entity name uniqueness. Identifier attributes are useful when the entity name is not enough to work as a unique identifier, or when it doesn't provide enough meaningful information. For example: [ { \"key\": \"service\", \"value\": \"mysql\" }, { \"key\": \"role\", \"value\": \"master\" }, ... ] Copy Loopback address replacement on entity names As of Infrastructure agent version 1.2.25 or higher, protocol v3 improves remote entities uniqueness by adding local address replacement on entity names at agent level. When several remote entities have their name based on an endpoint (either ip or hostname), and this name contains loopback addresses, there are two problems: This localhost value does not provide valuable info without more context. The name could collide with other service being named with a local address. This happens when: Endpoints names are like localhost:port. Ports tend to be the same for a given service; for example, 3306 for Mysql. On incoming protocol v3 data, the Infrastructure agent replaces loopback addresses on the entity name (and key) with the first available item of the following list: Cloud provider instance ID, retrieved by the agent if applicable Display name, set via the display_name agent config option Hostname, as retrieved by the agent For example, if an integration using protocol v3 returns an entity with the name localhost:3306, and the agent is running on bare metal (doesn’t have cloud provider instance ID), the display_name has not been set, and the hostname is prod-mysql-01, then the agent will replace the localhost and produce the entity name prod-mysql-01:3306. The Infrastructure agent enables loopback address replacement automatically for v3 integration protocol. You can also enable this for v2 via the agent configuration flag replace_v2_loopback_entity_names. In this case all the integrations being run by the agent using v2 will have their names replaced whenever they carry a local address. JSON: Metric, inventory, and event data Data values follow the executable file header. You can record three data types: Metrics Events Inventory Important From the perspective of New Relic dashboards, infrastructure metrics and events are both classified as event data. Metric data Infrastructure metric data typically is used for simple numeric data; for example: Count of MySQL requests in a queue per second Count of active connections to a specific system per minute Besides associated metadata, a metric is essentially just a metric name and a numeric value. To learn more about this data, see Event data. Here's an example of an entity's metric data JSON: [ { \"event_type\":\"MyorgNginxSample\", \"net.connectionsActive\": 54, # metric data (a key/value pair) \"net.requestsPerSecond\": 21, # metric data (a key/value pair) \"net.reading\": 23, # metric data (a key/value pair) } ] Copy JSON metric data field Description event_type Required. event_type defines where the metrics will be stored. Each set of metrics is stored as a sample inside the specified event type. Each integration must store its data in its own event type. If you are generating multiple types of samples from the same integration, use different event types for each. Recommendation: To ensure the event types used by your integration are unique, prefix the event type with your company name or acronym. For example, if your custom integration captures Cassandra node metrics and Cassandra column family metrics as different samples, store them in different event types, such as MyOrgCassandraSample and MyOrgCassandraColumnFamilySample. If the event type does not exist, it will be created automatically when New Relic receives data from your integration and make it available in the UI. One or more metric data key/value pairs Required (at least one). A metric measurement containing a name (key) and its value. Make sure these generally conform to the entity type's specification for maximum compatibility with Infrastructure features. Recommendation: Prefix your metric with a category to help when navigating through metrics in the New Relic UI. New Relic integrations currently use: net: Number of connections, web server requests, bytes transmitted over the network, etc.; for example, net.connectionsActive. query: Metrics directly related to database queries; for example, query.comInsertPerSecond. db: Internal database metrics; for example, db.openTables. Use multilevel prefixes for additional grouping when it makes sense; for example, db.innodb.bufferPoolPagesFree. Use the innerCamelCase naming format; for example: net.requestsPerSecond. Use a metric name as close to the original one as possible while respecting the other specifications. For example: Original name: Qcache_hits Metric name: db.qCacheHits Measurement unit Recommendation: Specify the measurement unit using a unit suffix if it is not already included in the original metric name, as in the following examples: Percentages: Use Percent; for example: cpuUtilPercent. Rates: Use a format such as PerSecond. Seconds is the standard rate measurement, but you can also use other units, such as PerMinute or PerDay. Byte measurements: Use Bytes. Recommendation: If a metric is captured in a different unit, such as Megabytes, convert it to Bytes. For example: db.allMemtablesOffHeapSizeBytes. Time measurements: Use Milliseconds. Recommendation: If a metric is captured in a different unit, such as Seconds, convert it to Milliseconds. For example: query.readLatency50thPercentileMilliseconds Value Use a string or a number (integer or float). Strings can be used as associated metadata, allowing data to be filtered in the New Relic UI. A boolean would need to be expressed as either a string (\"true\", \"false\") or an integer (1, 0). Do not use complex types of values, such as arrays or hashes. Event data Infrastructure event data represents arbitrary, one-off messages for key activities on a system; for example: Starting up a specific service Creating a new table You can view this data in the Infrastructure Events page and Infrastructure events heatmap. You can also query the InfrastructureEvent event type in New Relic. Here's an example of an integration's event data JSON payload, which follows the header JSON, and field definitions. [ { \"summary\":\"More than 10 request errors logged in the last 5 minutes\", \"category\": \"notifications\" } ] Copy JSON event field Description summary Required. The message to be sent. Use a simple string. category Optional. String value of one of the existing categories used in the Infrastructure product, or a new category. The default value is notifications. Examples of categories: applications automation configuration metadata notifications os packages services sessions system users Inventory data Infrastructure inventory data captures live state system information; for example: Configuration data System versions installed Other system metadata You can view this data on the Inventory page and Infrastructure events heatmap. You can also query data related to inventory changes. The inventory data type is a hash of one or more JSON sub-objects containing: A unique inventory id key (required): The inventory item's identifier. This is used in combination with the integration's prefix to create a path to the inventory item's data. Like paths combine across entities and show possible variance. This ID points to a hash. A hash of key/value pairs, one per inventory attribute. At least one is required. Keys should be strings. Values may either be a scalar type (string or number) or another hash object of key/values. New Relic supports hierarchy, but the final value nodes must be a scalar. Here's an example of an integration's inventory data JSON: { \"events/worker_connections\": { \"value\": 1024 }, \"http/gzip\" : { \"value\": \"on\" } } Copy", + "info": "", + "_index": "520d1d5d14cc8a32e600034b", + "_type": "520d1d5d14cc8a32e600034c", + "_score": 95.78803, + "_version": null, + "_explanation": null, + "sort": null, + "highlight": { + "title": "On-host integration executable file: JSON specifications", + "sections": "On-host integration executable file: JSON specifications", + "tags": "Infrastructure Integrations SDK", + "body": " is using. The current version is 3. This protocol requires Infrastructure agent 1.2.25 or higher. Protocol 2 requires Infrastructure agent 1.0.859 or higher. Protocol 1 is compatible with all agents. For more information, see SDK changes. integration_version Optional. The integration version. Used" + }, + "id": "617dc09e64441f0a22fbde01" } ], "/battlesnake/63a65857-a25e-4654-9174-928e5894b3b9": [ { "sections": [ - "User type: basic, core, and full platform users", - "What's the \"user type\"?", - "Overview of user type capabilities", - "Important", - "Detailed capability comparison table", - "Applied intelligence capabilities", - "Log management capabilities", - "How to pick a user type", - "Manage user type and upgrade requests", - "Change to \"full user\" language", - "Lacking access to something?" + "Introduction to automated user management (SCIM provisioning)", + "Benefits", + "Requirements and recommendations", + "Set up automated user management" ], - "title": "User type: basic, core, and full platform users", + "title": "Introduction to automated user management (SCIM provisioning)", "type": "docs", "tags": [ - "New Relic One user management", + "Automated user management", "Accounts and billing", "Accounts" ], - "external_id": "9c5f86e8314ea896333ac36452994c7ee712e7c5", + "external_id": "831a5f1137eccac9540d716302645b4e976a6332", "image": "", - "url": "https://docs.newrelic.com/docs/accounts/accounts-billing/new-relic-one-user-management/user-type/", - "published_at": "2022-02-14T06:37:32Z", - "updated_at": "2022-02-14T06:37:32Z", + "url": "https://docs.newrelic.com/docs/accounts/accounts/automated-user-management/automated-user-provisioning-single-sign/", + "published_at": "2022-02-14T07:42:35Z", + "updated_at": "2022-02-14T07:42:35Z", "document_type": "page", "popularity": 1, - "body": "In this doc you'll learn how we define user type, what capabilities each user type has, and how to decide on a user type. Want to learn about how users are calculated for billing purposes? See New Relic One pricing. What's the \"user type\"? A user’s user type is what determines the set of New Relic capabilities a user can theoretically access. In practice, users will often have roles assigned to them that limit their capabilities in various ways, but the user type represents their maximum theoretical set of capabilities. There are three user types: Basic user: several basic but powerful New Relic platform capabilities. Core user (limited availability): has more capabilities than a basic user. Full platform user: all capabilities. To skip to learning more about capabilities, see Capabilities. To learn more about the user type concept, keep reading. On the New Relic One pricing model, basic users are free, and core users and full platform users are billable. Your user count is tracked in the usage UI. Because user count is a billing factor, it's important to understand how user billing calculation works. If you're tasked with adding New Relic users, one of the key decisions to make is what user type to make them. If you're not sure at first, you can add them as basic users and later decide which users you want to upgrade. For how to adjust user type, see Manage user type. The user type is meant to be a fairly long-term setting based on a user's expected New Relic responsibilities over the next several months or longer. That intention is reflected in our billing calculations and downgrade rules. For more frequent or more granular adjustments to a user's New Relic access, you can assign them to more restrictive roles. Overview of user type capabilities Important Basic users have free preview access to core user capabilities starting January 12, 2022 and ending February 28, 2022. Here's an overview of the capabilities of each user type: Basic user Core user (limited availability) Full platform user Basic users are free and you can have an unlimited number of them. They can set up New Relic observability tools, run queries of your data, create charts and dashboards, use some basic alerting features, and more. For more detail, see the complete capabilities table. Core users can access more features than basic users but less than full platform users. They have access to some powerful developer-centric features like New Relic CodeStream, our logs UI, and the ability to build New Relic One apps. For more detail, see the complete capabilities table. Full platform users can access everything, including our more curated observability UI experiences, such as APM, infrastructure monitoring, browser monitoring, mobile monitoring, synthetic monitors, and more. For more details, see the complete capabilities table. Remember that the user type governs a user's theoretical access. In practice, users can have roles assigned that restrict their access further. Detailed capability comparison table Here's a detailed comparison of capabilities by user type. For tips on why you'd make a user one user type or another, see Decide on user type. What you get Basic user Core user (see availability) Full platform user Unlimited queries Custom charts and dashboards Report data from any source (agents, integrations, APIs) Alerts and applied intelligence1 Get notifications & resolve incidents Get notifications & resolve incidents Log management1 Search/view only New Relic Instant Observability (I/O) Install only Install only New Relic Explorer with Lookout and Navigator List view only NerdGraph (our GraphQL API) (for permitted features) New Relic Codestream integration Errors tracking with errors inbox New Relic One custom apps Application performance monitoring (APM) Kubernetes monitoring and Pixie Infrastructure monitoring Distributed tracing Infinite Tracing (Pro & Enterprise editions only) Network performance monitoring (NPM) Browser and mobile monitoring Synthetic monitoring Serverless monitoring Entity maps and service maps Workloads Manage data and manage other users 1Here are more details about how user type access works for some specific features: Applied intelligence capabilities Here are more details about how user type impacts access to applied intelligence features: What you get Basic user Core user (see availability) Full platform user Acknowledge and resolve issues Root cause analysis Incident/anomaly analysis Correlation assistant Issue maps Machine learning classification Log management capabilities Important On January 12, 2022, basic users had some log management capabilities removed. Their current capabilities are described below. Log-related capabilities: Basic users can view and search the log management UI. Core users and full platform users have access to all log management UI features and configuration capabilities, and can see logs in context for the UI experiences they have access to (for example, core users can see applicable log data in the errors inbox UI). How to pick a user type For New Relic One pricing, user type is a factor in billing. Before deciding on user types, you'll want to ensure you understand user-related calculations and downgrade rules. For an in-depth capability comparison, see the Capability table. Here are some tips for deciding on a user type: Reasons to make someone a full platform user: They play a key role in the development, testing, deployment, and maintenance phases of the application development lifecycle. They break/fix code regularly; they are responsible for triaging workflows, troubleshooting, or managing users and roles for their team. They have DevOps practices (for example, version control systems, and implementation of CI/CD). They need to use New Relic's curated dashboards and experiences (not just the ability to create their own custom queries and charts); in other words, they need full access to our platform. They need to be able to manage users and/or billing. Reasons to make someone a core user: They're developers who aren't tasked with reliability and uptime as their primary responsibility. They don’t require full platform access, but would benefit from some of the specific functionalities offered to core users, like: Debugging code issues directly from their IDE, using New Relic CodeStream. Viewing errors in one place from across your stack, using Errors Inbox. Using our logs management UI to spot problems and patterns in logs. The ability to build and deploy New Relic One apps, and use apps from the app catalog. Reasons to make someone a basic user: They play a key role in the planning phase of the application development lifecycle. They use and configure New Relic tools to get data into New Relic, and access, configure, and use alerts on such data (not necessarily responsible for triaging workflows, troubleshooting, or managing users and roles for their team). They want to see high-level analytics and business metrics for future planning (such as C-Suite executives). They don't need to use our curated experiences and dashboards, but would benefit from the ability to create their own custom queries and charts of data; in other words, they don't need full access to the platform. They don't manage users or billing. Manage user type and upgrade requests How you manage a user's user type depends on which user model your organization's users are on: New Relic One user model docs Original user model docs For rules around billing and downgrading users, see Billing and downgrade rules. Change to \"full user\" language In November of 2021, in the product and public docs, the \"full user\" user type name was changed to \"full platform user.\" For organizations that have the version of New Relic One pricing without core users: \"Full platform users\" are equivalent to what are referred to as \"full users\" and \"monthly provisioned users\" in your New Relic agreement. For more about agreement-level language, see Billable user agreement terminology. When querying your full platform user counts, you use FullUsersBillable and not FullPlatformUsersBillable. Lacking access to something? For questions related to lack of access to New Relic accounts or features, see Factors affecting access.", + "body": "New Relic lets you set up automated user management (AUM), which allows you to import, update, and deactivate your New Relic users from an identity provider, like Azure AD, Okta, or OneLogin. Benefits Before reading the benefits of automated user management, we recommend reading Get started with SAML SSO and SCIM. Benefits of enabling automated user management include: Time and cost efficiency: When you make changes in your identity provider, such as creating, updating, and removing users, these changes are automatically reflected in New Relic. By being able to manage a large set of users from your identity provider, it reduces the workload of your admins who'd otherwise need to do a significant amount of work in New Relic to accomplish the same thing. Increased productivity: By having a more automatic way to set up users and groups, your users are more quickly enabled and ready to use New Relic. Enhanced security: SCIM is an industry standard protocol for maintaining groups of users. Use of this feature requires SAML SSO, so once your users are added to New Relic, they can log in using your identity provider. Popular identity providers Azure AD, Okta, and OneLogin have dedicated New Relic apps, improving ease of enablement. Requirements and recommendations Requirements and recommendations: Requires Pro or Enterprise edition. Supports SAML 2.0 standard for single sign on (SSO). Supports SCIM 2.0 standard. User model-related requirements: This feature requires you to be on our New Relic One user model and creates users on that model. If you're on our original user model, talk to your New Relic account representative. Configuration requires the Authentication domain manager role (users in the default group Admin have this). There are three identity providers that have a New Relic app: Azure AD, Okta, and OneLogin. For other identity providers, you can use our SCIM API. Before enabling this, it helps to first set up user groups in your identity provider service and think about which New Relic roles and accounts those groups will have access to. Set up automated user management For an explanation of how your identity provider groups map over to New Relic groups, see How your groups map over. To use automated user management to import users from your identity provider: It's important to first review the requirements. In the authentication domain UI, create a new authentication domain. If you use Azure AD, Okta, or OneLogin, use the applicable guide: Azure AD | Okta | OneLogin. If you don't use one of the above services, you'll need to: Use the authentication domain UI to enable SCIM as the source of users. Use our SCIM API to integrate with your identity provider service. See the SCIM API tutorial for all the steps involved. Recommended: Set a time zone in your identity provider. How this is done depends on the service you use. If you don't set a time zone, our UI uses UTC time zone (specified in IANA format, also known as the \"Olson\" format: for example, \"America/Los_Angeles\"). Your users also have an option to override your settings and set their own time zone. If you have issues, contact your account representative. After being provisioned, your users can click on the New Relic SCIM/SSO application tile in their identity provider to be logged into New Relic. To learn more about New Relic's roles and capabilities, see Standard roles.", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 2.2140393, + "_score": 2.0981786, "_version": null, "_explanation": null, "sort": null, - "id": "61e2dd0a64441fa32500e1f5", + "id": "6043d60e64441ff8f5378f37", "highlight": {} }, { "sections": [ - "Billable user calculation and user downgrade rules", - "View user count in the UI", - "Pricing versions (with and without core users)", - "User calculation for the primary pricing version (with core users)", - "Important", - "User downgrade rules for primary pricing version (with core users)", - "Pay-as-you-go: downgrade rules", - "Annual pool of funds (longer term contract): downgrade rules", - "Rules for New Relic One pricing without core users", - "User downgrade rules" + "Okta SCIM/SSO application configuration", + "Requirements", + "Step 1. Create authentication domain and enable SCIM", + "Step 2. Set up Okta's New Relic app", + "Step 3. Configure provisioning", + "Step 4. Assign users and groups", + "Assignments tab", + "Push groups tab", + "Step 5. Set your users' user type", + "Step 6. Assign access grants", + "Step 7. Configure SAML SSO", + "Additional considerations", + "Moving users between groups" ], - "title": "Billable user calculation and user downgrade rules", + "title": "Okta SCIM/SSO application configuration", "type": "docs", "tags": [ - "New Relic One pricing and billing", + "Automated user management", "Accounts and billing", "Accounts" ], - "external_id": "b173a862fbec8fef9d39427ae690c16c7cee3c14", - "image": "", - "url": "https://docs.newrelic.com/docs/accounts/accounts-billing/new-relic-one-pricing-billing/user-count-billing/", - "published_at": "2022-02-14T06:14:45Z", - "updated_at": "2022-02-14T06:14:45Z", + "external_id": "7a00399a6ce11aaa2cb52046f994a80f5986c0e4", + "image": "https://docs.newrelic.com/static/3f3318e1dc8c9049231c207a7b4e5c54/c1b63/okta-add-user-type-to-profile.png", + "url": "https://docs.newrelic.com/docs/accounts/accounts/automated-user-management/okta-scimsso-application-configuration/", + "published_at": "2022-02-14T07:41:30Z", + "updated_at": "2022-02-14T07:41:30Z", "document_type": "page", "popularity": 1, - "body": "For New Relic One pricing, the count of billable users is a billing factor. View user count in the UI For how to view your user count in the UI, see Billing-related UI. Pricing versions (with and without core users) We have two versions of the New Relic One pricing model: Our primary New Relic One pricing version, also known as \"New Relic One pricing with core users.\" It was released January 12, 2022. This version gives you access to a third user type: the core user. It also has different billing calculations and user downgrade rules. The \"New Relic One without core users\" version. If your New Relic organization existed prior to January 12, 2022, and hasn't switched to the newer version, this is your version. Organizations on this version can add only basic users and full platform users; they don't have access to core users. This version will be increasingly deprecated over time as customers on this version switch to the core user version. See the rules for this version. Tips for determining which version you're on: If your organization has core users or has the ability to add core users, you're on our primary version. If you can only add basic users and full platform users, you're on the \"New Relic One without core users\" version. You can view your users by going to the User management UI. Another way to determine your version is by going to the Manage your plan UI. If you see \"New Relic One - Users\", that means you're on the \"New Relic One without core users\" version. For more about the differences between these versions, see Core users release. User calculation for the primary pricing version (with core users) Important These rules apply for organizations on our primary New Relic One pricing version (the version with core users). You can use the usage UI to get an overview of your billable user count. If you need more detail than the UI provides, you can also run usage-related NRQL queries. To determine an organization's count of billable users in a calendar month, we count the users during that month who had a billable user type of either full platform user or core user. A user’s billable user type is defined as the highest user type at which a user was set during a calendar month. We use UTC timezone to define the start and end of a calendar month. For an example of how this works in practice: If a user is set as a full platform user at any point during a calendar month, their billable user type for that month is \"full platform user,\" and won't change, even if they downgrade later that month. This is the case even if that user is changed to a full platform user only briefly. If you're planning on adding billable users or changing your users' user type, you should keep these rules in mind. Some tips: If you want to add a billable user or upgrade a user, you might choose to do that at the beginning of the month. If you want to downgrade a user, you might choose to do that at the end of the month. The count of your unique users is determined by email address. If there are multiple user records in an organization that have the same email address, for billing purposes those user records would count as a single user, and that user's billable user type would be their highest user type assigned during that month. The cost of your billable users depends on an organization’s pricing edition (Standard, Pro, or Enterprise) or on any custom deals you have with New Relic. When a New Relic organization first starts being billed, their billable user count is pro-rated based on when during the month they started. If an organization cancels their subscription, prorating is also applied for their last month. User downgrade rules for primary pricing version (with core users) Important This section applies only for organizations on the our primary New Relic One pricing version (the version with core users). The rules pertaining to how many times you can downgrade full platform users differ depending on your usage plan: Pay-as-you-go: downgrade rules For the pay-as-you-go usage plan, there are no rules limiting the downgrading of users but the billing impacts may impact when you decide to upgrade or downgrade users. Annual pool of funds (longer term contract): downgrade rules Before upgrading or downgrading a user, ensure you understand the billing impacts. For the annual pool of funds plan, we have rules regarding how many times a user can be downgraded from being a full platform user to a lower user type. (There are no rules regarding how often users change between core user and basic user.) During a New Relic organization's contract year (defined below), if a full platform user is changed to a lower user type and back to a full platform user twice, that user will be billed as a full platform user for the remainder of that contract year, regardless of user type adjustments. A contract year is defined as a year starting at your contract's starting point, or at the anniversary of that point. If your organization started out on a different pricing plan and switched to this version of pricing, the user type downgrade rule will apply from when you opted in until the renewal of your subscription term or, if applicable, the annual anniversary date of your commitment term, whichever is earlier. Rules for New Relic One pricing without core users The following rules apply only for organizations on the version of New Relic One pricing without core users: How billable users are determined. For a calendar month, an organization is billed based on a calculation of the number of full platform users for that month. Prorating in first and last month. The count of billable users is prorated based on when a New Relic organization starts their subscription, or based on when a user becomes a full platform user (added as a full platform user or converted to one). Users are billable when provisioned. A user counts as billable the moment they are set to a billable user type in New Relic. This applies regardless of whether that user has ever logged into or used New Relic. User count based on email address. If there are multiple user records in an organization that have the same email address, for billing purposes that would count as a single user. Caveat for our original user model. If your organization has users on our original user model: If a user is set as a basic user in one account in the organization and as a billable user in another account, the billable user status takes precedence. For user downgrade rules, see below: User downgrade rules The following user downgrade rules apply for organizations on the version of New Relic One pricing without core users: User type is meant to be a fairly long-term setting based on a user’s expected New Relic duties and responsibilities. For that reason, a full platform user may only be downgraded a maximum of two times in a 12-month period. If a user’s user type has changed more than this allowed number of changes, New Relic can charge that user as a full platform user.", + "body": "Our automated user management allows allows you to import and configure your New Relic users from your identity provider via SCIM. This guide provides Okta specific details on how to configure the New Relic Okta SCIM/SSO application. Requirements Before using this guide, read the automated user management requirements. Note that these instructions require going back and forth between your identity provider and New Relic. Step 1. Create authentication domain and enable SCIM To get to the New Relic authentication domain UI: From one.newrelic.com, click the account dropdown, click Organization and access, and then click Authentication domains. If you don't already have one, create a new authentication domain for your SCIM-provisioned users by clicking + Add new. For that authentication domain, under Source of users, select SCIM. Copy and save the API token for later use. Note that this will be shown only once. Step 2. Set up Okta's New Relic app Next, set up Okta's New Relic SCIM/SSO application: Go to okta.com/ and sign in with an account that has administrator permissions. From the Okta home page, click on Admin. From the Okta admin Dashboard, choose the Applications page. Click Browse app catalog and search for \"New Relic by organization\" (not \"New Relic by account\") and choose that from the results. From the New Relic by Organization page, click on Add. From the Add New Relic by organization page, check the two Application visibility \"Do not display...\" checkboxes and click on Done. We will make the application visible later after configuration is complete and provisioning has begun. Step 3. Configure provisioning Configure Okta's New Relic SCIM/SSO application to automatically provision your users to New Relic: From the app, click on the Provisioning tab. From the Integration form, click on Configure API integration and check the Enable API integration checkbox. Take the API token you saved in Step 1 and input it in the Okta New Relic app's API token field. Optional: click on Test API credentials to verify a SCIM connection can be established to New Relic. If a connection can be established, a success message is displayed. If a connection was not established, re-enter the API Token and try the test again. Click Save. Note that the save process does a test of the API credentials. If a connection is not established to New Relic, the save will fail. On the newly displayed To App form, click on Edit. Check the Enable checkbox in the Create users, Update user attributes, and Deactivate users sections. Click Save. Go to the Sign on tab. In the authentication domain field, input your authentication domain ID, which you'll find in New Relic's authentication domain UI. Step 4. Assign users and groups If you don't already have your user groups set up in Okta, you'll need to create them. These will be the groups that you'll later assign access grants to in New Relic, which will be what gives those groups access to specific roles on specific accounts. To learn how to create groups, see Okta's documentation on groups. Assignments tab Next, you'll assign users. Assigning users is done using two different tabs in the app. We recommend having your New Relic users selected on the Assignments tab and their associated groups selected on the Push groups tab. In the app, click on the Assignments tab. From the Assignments form, click on Assign. From the pop up menu, click on Assign to groups. From the Assign ... to groups form, click on Assign for the group you wish to assign to the application. Highly recommended: Configure the time zone for your users in Okta. That will determine how dates/times for your users are displayed in New Relic. If you don't set a time zone, we use UTC time for those users unless they've set their own time zone. Time zone is specified in IANA Time Zone database format, also known as the \"Olson\" time zone database format (for example, \"America/Los_Angeles\"). There are several ways in Okta to configure time zone settings, so consult the Okta docs if more detail is needed. Here's one way to do this in the Assignments tab: In the Time zone field, enter the default time zone for members of the group. Click on Save and go back. Repeat the steps to add a group until all desired groups have been assigned to the application. Click Done. Push groups tab In the app, click on the Push groups tab. From the Push groups form, click on Push groups. From the pop up menu, click on Find groups by name. From the Push groups to... form, in the search field enter the first few characters of the name of the group you want to send to New Relic. Leave the Push group memberships immediately checkbox checked. Click on your group in the pop up search results list. In the Match result & push action section, No match found should be displayed, meaning that the group does not yet exist at New Relic. Leave the selector set to Create group and leave the default name for the group. The intent here is to have a group of the same name created at New Relic. If this is the last group you wish to send to New Relic, click on Save. Otherwise, if you have more groups to configure, click on Save & add another and repeat the steps to add a group. When you've added one or more groups, you should be able to see the users you've added by going to the User management UI page. Step 5. Set your users' user type When your users are provisioned in New Relic, you're able to see them in the User management UI. If you're adding users to New Relic via SCIM but not managing their user type via SCIM, they start out as basic users. To upgrade users, you have two options: Use the User management UI to edit users. Manage user type from Okta (described below). To manage your users' user type from Okta: Go to the New Relic authentication domain UI and click Enable Manage user type with SCIM. Note that when this is enabled, you can’t manage user type from the New Relic UI and can only manage it from Okta. Go into your Okta instance. The rest of these instructions are done from Okta. Next, you'll configure Okta to be able to send a new attribute nrUserType. Steps: Go to the Profile editor. In the Attributes section, click Add attribute. Set your settings to match the screenshot below. The only two fields that must match exactly are External name (value: nrUserType) and External namespace (value: urn:ietf:params:scim:schemas:extension:newrelic:2.0:User). The variable value can be any value. Next, you'll configure your Okta user profile to have this field. Steps: In the Profile editor, go to Users and click the User (default) profile. Add a new New Relic user type attribute to that profile (see Okta user profile instructions). How you set this will depend on your own setup and preferences for defining user type. Note that the expected values for user type are Basic user, Core user, and Full user. Below is an example with information filled in. In the People section, define the user type for your users. How you do this will depend on your setup and preferences. For example, you may choose to set this manually by setting each user’s user type, or you may use Okta to manage these in bulk. Next, you’ll set up mapping for that attribute. Steps: In the app's Provisioning section, click Unmapped attributes. Go into edit mode for the unmapped New Relic user type attribute. Configure it based on how you want to set the user type. To learn about choosing user type, see User type. Learn more about Okta attribute mappings. Step 6. Assign access grants Once these steps are completed, you're able to see your users in New Relic by going to the User management UI. Now that your users are present in New Relic, you must grant them access to specific roles on specific accounts. If this is not done, your users don't yet have access to New Relic. To learn how to do this, see: How access grants work The user management tutorial Step 7. Configure SAML SSO To enable SAML SSO, see the SAML instructions. Additional considerations In this section we discuss other important things to know when using the New Relic SCIM/SSO application. This section includes tips to work around potential issues that could cause undesired results when integrating between Okta and New Relic. Moving users between groups When moving a user between groups, you must manually synchronize the old group's membership with New Relic. This is because Okta does not send a SCIM request to remove a user from a group. So, the admin needs to push the old group's membership to New Relic manually to inform New Relic that the user is no longer a member of the old group. Here are the steps to manually synchronize a group's membership: From the New Relic SCIM/SSO application page, click on the Push groups tab. From the Push groups form, open the pick list on the desired group's button under the Push Status column. From the displayed pick list on the button, click Push now. This causes an immediate synchronization of the group's membership with New Relic.", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 2.2119207, + "_score": 2.0980961, "_version": null, "_explanation": null, "sort": null, - "id": "61e2e3eb196a678b9a02b837", + "id": "6043f5cae7b9d2758b579a0c", "highlight": {} }, { "sections": [ - "NerdGraph tutorial: APM agent configuration examples", - "Configure server-side configuration", - "Retrieve settings" - ], - "title": "NerdGraph tutorial: APM agent configuration examples", + "Create NRQL alert conditions", + "Tip", + "Create a NRQL alert condition", + "Create a condition from a chart", + "Caution", + "NRQL alert syntax", + "Important", + "Reformatting incompatible NRQL", + "NRQL alert threshold examples", + "Alert on specific segments of your data", + "Alert on Nth percentile of your data", + "Alert on max, min, avg of your data", + "Alert on a percentage of your data", + "Alert on Apdex with any T-value", + "NRQL conditions and query order of operations", + "Example: null value returned", + "Example: zero value returned", + "Nested aggregation NRQL alerts", + "Nested queries with a non-faceted innermost query are not currently supported", + "Queries at all levels must have the same aggregation window size", + "Signal loss is not yet supported for nested queries", + "Nested queries containing 'WITH METRIC_FORMAT' in the inner query are not currently supported", + "NRQL condition creation tips", + "Condition edits can reset condition evaluation", + "Alert condition types", + "Sum of query results (limited or intermittent data)", + "Set the loss of signal threshold", + "Advanced signal settings", + "Aggregation window duration", + "Sliding window aggregation", + "Streaming method", + "Delay/timer", + "Fill data gaps" + ], + "title": "Create NRQL alert conditions", "type": "docs", "tags": [ - "Examples", - "NerdGraph", - "APIs" + "Alert conditions", + "New Relic Alerts", + "Alerts and Applied Intelligence" ], - "external_id": "219a704e974c1c3cb1223fc90e50dcdf09709587", - "image": "", - "url": "https://docs.newrelic.com/docs/apis/nerdgraph/examples/apm-config-nerdgraph/", - "published_at": "2022-02-14T06:02:53Z", - "updated_at": "2022-02-14T06:02:53Z", + "external_id": "2872f770a5fc8a802f99b9f848906f7e351ad761", + "image": "https://docs.newrelic.com/static/eb8e1b2d826f9fd9cf46fe3dd5455217/c1b63/nr1_nrql_alert_conditions.png", + "url": "https://docs.newrelic.com/docs/alerts-applied-intelligence/new-relic-alerts/alert-conditions/create-nrql-alert-conditions/", + "published_at": "2022-02-14T06:58:30Z", + "updated_at": "2022-02-14T06:58:29Z", "document_type": "page", "popularity": 1, - "body": "This doc is a place for examples of configuring APM agents using our NerdGraph API. Configure server-side configuration Note that for APM agents to use configuration values changed via NerdGraph, server side configuration must be enabled. For requirements, see server-side config requirements. Here's an example query returning the status of the server side configuration setting for a given entity. query ExampleReadQuery { actor { entity(guid:\"ZjY1ODgxfEFQTXxBUFBYSUNBVElPTnz0ODEwMTY3NzZ\") { ...on ApmApplicationEntity { apmSettings { apmConfig { useServerSideConfig } } } } } } Copy Here's an example of disabling server-side configuration. Note that settings uses an array, which may be helpful if you want to update multiple entities. mutation ExampleUpdateQuery(guid: \"ZjY1ODgxfEFQTXxBUFBYSUNBVElPTnz0ODEwMTY3NzZ\", settings: { apmConfig: { useServerSideConfig: false }} ) { apmSettings { apmConfig { useServerSideConfig } } errors { description } } } Copy For how to find an entity's GUID, see Find entity data. Retrieve settings Here's an example of returning an entity's transaction tracer settings: query ExampleReadQuery { actor { entity(guid:\"ZjY1ODgxfEFQTXxBUFBYSUNBVElPTnz0ODEwMTY3NzZ\") { ... on ApmApplicationEntity { guid name apmSettings { transactionTracer { enabled explainEnabled explainThresholdType explainThresholdValue } } } } } } Copy", + "body": "You can use NRQL queries to create alert conditions. Once you've defined your signal, you can further define your warning and critical threshold levels. This determines when an alerts violation is created. Read on to learn more about how to do this. Go to one.newrelic.com, click Alerts & AI, in the left sidebar click Policies, select a policy, then Add a condition. Click NRQL, and then Next, define thresholds. Tip For more information on key concepts relating to NRQL alert conditions and streaming alerts, see Streaming alerts: key terms and concepts. Create a NRQL alert condition To create a NRQL alert condition for a policy: On one.newrelic.com, in the header click Alerts & AI, then in the left sidebar click Policies. Select an existing policy or click New alert policy to create a new policy. Click Add a condition. Under Select a product click NRQL, and then click Next, define thresholds. Note that editing an existing condition can result in resetting its evaluation. Create a condition from a chart You can use a chart to create a NRQL alert condition. To create a NRQL alerts condition from a chart, click the chart menu , then click Create alert condition. Once you've named and customized your condition, you can add it to an existing policy or create a new one. Caution A small number of our older charts don't include the option to create an alert condition. NRQL alert syntax Here's the basic syntax for creating all NRQL alert conditions. The FACET clause is required for outlier condition types. It's optional for static and baseline. SELECT function(attribute) FROM Event WHERE attribute [comparison] [AND|OR ...] Copy Clause Notes SELECT function(attribute) Required Supported functions that return numbers include: apdex average count latest max min percentage percentile sum uniqueCount Tip If you use the percentile aggregator in a faceted alert condition with many facets, this may cause the following error to appear: An error occurred while fetching chart data. If you see this error, use average instead. FROM data type Required Only one data type can be targeted. Supported data types: Event Metric (RAW data points will be returned) WHERE attribute [comparison] [AND|OR ...] Use the WHERE clause to specify a series of one or more conditions. All the operators are supported. FACET attribute Required for outlier conditions Include an optional FACET clause in your NRQL syntax depending on the threshold type: static or baseline. Use the FACET clause to separate your results by attribute and alert on each attribute independently. No LIMIT clause is allowed, but all queries will receive the maximum number of facets possible. Faceted queries can return a maximum of 5000 values for static and baseline conditions and a maximum of 500 values for outlier conditions. Important If the query returns more than the maximum number of values, the alert condition can't be created. If you create the condition and the query returns more than this number later, the alert will fail. Modify your query so that it returns a fewer number of values. Reformatting incompatible NRQL Some elements of NRQL used in charts don’t make sense in the streaming context of alerts. Here’s a list of the most common incompatible elements and suggestions for reformatting a NRQL alert query to achieve the same effect. Element Notes SINCE and UNTIL Example: SELECT percentile(largestContentfulPaint, 75) FROM PageViewTiming WHERE (appId = 837807) SINCE yesterday Copy NRQL conditions produce a never-ending stream of windowed query results, so the SINCE and UNTIL keywords to scope the query to a point in time are not compatible. As a convenience, we automatically strip SINCE and UNTIL from a query when creating a condition from the context of a chart. TIMESERIES In NRQL queries, the TIMESERIES clause is used to return data as a time series broken out by a specified period of time. For NRQL conditions, the equivalent property of a signal is the aggregation duration window. histogram() The histogram() aggregation function is used to generate histograms. histogram() is not compatible with NRQL alerting: histogram aggregations can not be formatted as a time series. To create an alert from a portion of a histogram (for example, 95th percentile), use the percentile() aggregation function. Multiple aggregation functions Each condition can only target a single aggregated value. To alert on multiple values simultaneously, you’ll need to decompose them into individual conditions within the same policy. Original Query: SELECT count(foo), average(bar), max(baz) from Transaction Copy Decomposed: SELECT count(foo) from Transaction SELECT average(bar) from Transaction SELECT max(baz) from Transaction Copy COMPARE WITH The COMPARE WITH clause is used to compare the values for two different time ranges. This type of query is incompatible with NRQL alerting. We recommend using a Baseline Alert Condition to dynamically detect deviations for a particular signal. SLIDE BY The SLIDE BY clause supports a feature known as sliding windows. With sliding windows, SLIDE BY data is gathered into \"windows\" of time that overlap with each other. These windows can help to smooth out line graphs with a lot of variation in cases where the rolling aggregate (such as a rolling mean) is more important than aggregates from narrow windows of time. You can enable sliding windows in the UI. When creating or editing a condition, go to Fine-tune advanced signal settings > Data aggregation settings > Use sliding window aggregation. LIMIT In NRQL queries, the LIMIT clause is used to control the amount of data a query returns, either the maximum number of facet values returned by FACET queries or the maximum number of items returned by SELECT * queries. LIMIT is not compatible with NRQL alerting: evaluation is always performed on the full result set. NRQL alert threshold examples Here are some common use cases for NRQL conditions. These queries will work for static and baseline condition types. The outlier condition type will require additional FACET clauses. Alert on specific segments of your data Create constrained alerts that target a specific segment of your data, such as a few key customers or a range of data. Use the WHERE clause to define those conditions. SELECT average(duration) FROM Transaction WHERE account_id in (91290, 102021, 20230) Copy SELECT percentile(duration, 95) FROM Transaction WHERE name LIKE 'Controller/checkout/%' Copy Alert on Nth percentile of your data Create alerts when an Nth percentile of your data hits a specified threshold; for example, maintaining SLA service levels. Since we evaluate the NRQL query based on the aggregation window duration, percentiles will be calculated for each duration separately. SELECT percentile(duration, 95) FROM Transaction Copy SELECT percentile(databaseDuration, 75) FROM Transaction Copy Alert on max, min, avg of your data Create alerts when your data hits a certain maximum, minimum, or average; for example, ensuring that a duration or response time does not pass a certain threshold. SELECT max(duration) FROM Transaction Copy SELECT average(duration) FROM Transaction Copy Alert on a percentage of your data Create alerts when a proportion of your data goes above or below a certain threshold. SELECT percentage(count(*), WHERE duration > 2) FROM Transaction Copy SELECT percentage(count(*), WHERE httpResponseCode = '500') FROM Transaction Copy Alert on Apdex with any T-value Create alerts on Apdex, applying your own T-value for certain transactions. For example, get an alert notification when your Apdex for a T-value of 500ms on transactions for production apps goes below 0.8. SELECT apdex(duration, t:0.5) FROM Transaction WHERE appName like '%prod%' Copy NRQL conditions and query order of operations By default, the aggregation window duration is 1 minute, but you can change the window to suit your needs. Whatever the aggregation window, New Relic will collect data for that window using the function in the NRQL condition’s query. The query is parsed and executed by our systems in the following order: FROM clause – which event type needs to be grabbed? WHERE clause – what can be filtered out? SELECT clause – what information needs to be returned from the now-filtered data set? Example: null value returned Let's say this is your alert condition query: SELECT count(*) FROM SyntheticCheck WHERE monitorName = 'My Cool Monitor' AND result = 'FAILURE' Copy If there are no failures for the aggregation window: The system will execute the FROM clause by grabbing all SyntheticCheck events on your account. Then it will execute the WHERE clause to filter through those events by looking only for the ones that match the monitor name and result specified. If there are still events left to scan through after completing the FROM and WHERE operations, the SELECT clause will be executed. If there are no remaining events, the SELECT clause will not be executed. This means that aggregators like count() and uniqueCount() will never return a zero value. When there is a count of 0, the SELECT clause is ignored and no data is returned, resulting in a value of NULL. Example: zero value returned If you have a data source delivering legitimate numeric zeroes, the query will return zero values and not null values. Let's say this is your alert condition query, and that MyCoolEvent is an attribute that can sometimes return a zero value. SELECT average(MyCoolAttribute) FROM MyCoolEvent Copy If, in the aggregation window being evaluated, there's at least one instance of MyCoolEvent and if the average value of all MyCoolAttribute attributes from that window is equal to zero, then a 0 value will be returned. If there are no MyCoolEvent events during that minute, then a NULL will be returned due to the order of operations. Tip For more information about this topic, you can check out our blog post on troubleshooting for zero versus null values. Tip You can determine how null values will be handled by adjusting loss of signal and gap filling settings in the Alert Conditions UI. Tip You can avoid NULL values entirely with a query order of operations shortcut. Do this by using a filter sub-clause, then including all filter elements within that sub-clause. The main body of the query will run and return data, at which point the SELECT clause will then run and apply the filter elements. The query will return a value of 0 if the filter elements result in no matching data. Here's an example: SELECT filter(count(*), WHERE result = 'SUCCESS' AND monitorName = 'My Favorite Monitor') FROM SyntheticCheck Copy Nested aggregation NRQL alerts Nested aggregation queries are a powerful way to query your data. However, they have a few restrictions that are important to note. Nested queries with a non-faceted innermost query are not currently supported Without a FACET, the inner query produces a single result, giving the outer query nothing to aggregate. If you're using a nested query, make sure your inner query is faceted. SELECT max(cpu) FROM (FROM Event SELECT min(cpuTime) as cpu) ​​​​​ Copy Queries at all levels must have the same aggregation window size With an alert aggregation window of 1 minute, the inner query would produce two smaller windows of 30 seconds. In theory, these two windows could be aggregated by the outer query. However, this is not currently supported. SELECT max(cpu) FROM (FROM Event SELECT min(cpuTime) as cpu TIMESERIES 30 seconds)​​ Copy Signal loss is not yet supported for nested queries For more information on signal loss, see NerdGraph API: Loss of signal and gap filling. Nested queries containing 'WITH METRIC_FORMAT' in the inner query are not currently supported You can't use a nested query containing the WITH METRIC_FORMAT in the inner query to create NRQL alert conditions. NRQL condition creation tips Here are some tips for creating and using a NRQL condition: Topic Tips Condition types NRQL condition types include static, baseline, and outlier. Create a description For NRQL conditions, you can create a custom description to add to each violation. Descriptions can be enhanced with variable substitution based on metadata in the specific violation. For details, see Description Query results Queries must return a number. The condition evaluates the returned number against the thresholds you've set. Time period NRQL conditions evaluate data based on how it's aggregated, using aggregation windows from 30 seconds to 120 minutes, in increments of 15 seconds. For best results, we recommend using the event flow or event timer aggregation methods. For the cadence aggregation method, the implicit SINCE ... UNTIL clause specifying which minute to evaluate is controlled by your delay/timer setting. Since very recent data may be incomplete, you may want to query data from 3 minutes ago or longer, especially for: Applications that run on multiple hosts. SyntheticCheck data: Timeouts can take 3 minutes, so 5 minutes or more is recommended. Also, if a query will generate intermittent data, consider using the sum of query results option. Lost signal threshold (loss of signal detection) You can use loss of signal detection to alert on when your data (a telemetry signal) should be considered lost. A signal loss can indicate that a service or entity is no longer online or that a periodic job failed to run. You can also use this to make sure that violations for sporadic data, such as error counts, are closed when no signal is coming in. Advanced signal settings These settings give you options for better handling continuous, streaming data signals that may sometimes be missing. These settings include the aggregation window duration, the delay/timer, and an option for filling data gaps. For more on using these, see Advanced signal settings. Condition settings Use the Condition settings to: Create a concise, descriptive condition name. Provide a custom violation description for the condition that will be included in violations and notifications. Add the runbook URL to include your organization's procedures for handling incidents. You may also add this information to the custom violation description. Limits on conditions See the maximum values. Health status In order for a NRQL alert condition health status display to function properly, use a FACET clause to scope each signal to a single entity (for example, FACET hostname or FACET appname). Examples For more information, see: Expected NRQL syntax Examples of NRQL condition queries Condition edits can reset condition evaluation When you edit NRQL alert conditions in some specific ways (detailed below), their evaluations are reset, meaning that any evaluation up until that point is lost, and the evaluation starts over from that point. The two ways this will affect you are: For \"for at least x minutes\" thresholds: because the evaluation window has been reset, there will be a delay of at least x minutes before any violations can be reported. For baseline conditions: the condition starts over again and all baseline learning is lost. The following actions cause an evaluation reset for NRQL conditions: Changing the query Changing the aggregation window, aggregation method, or aggregation delay/timer setting Changing the \"close violations on signal loss\" setting Changing any gap fill settings Changing the baseline direction (if applicable) – higher, lower, or higher/lower Change the threshold value, threshold window, or threshold operator The following actions (along with any other actions not covered in the above list) will not reset the evaluation: Changing the loss of signal time window (expiration duration) Changing the time function (switching \"for at least\" to \"at least once in,\" or vice-versa) Toggling the \"open violation on signal loss\" setting Alert condition types When you create a NRQL alert, you can choose from different types of conditions: NRQL alert condition types Description Static This is the simplest type of NRQL condition. It allows you to create a condition based on a NRQL query that returns a numeric value. Optional: Include a FACET clause. Baseline (Dynamic) Uses a self-adjusting condition based on the past behavior of the monitored values. Uses the same NRQL query form as the static type, including the optional FACET clause. Outlier Looks for group behavior and values that are outliers from those groups. Uses the same NRQL query form as the static type, but requires a FACET clause. Sum of query results (limited or intermittent data) Important Available only for static (basic) condition types. If a query returns intermittent or limited data, it may be difficult to set a meaningful threshold. Missing or limited data will sometimes generate false positives or false negatives. You can use loss of signal, aggregation duration, and gap filling settings to minimize these false notifications. To avoid this problem when using the static threshold type, you can set the selector to sum of query results. This lets you set the alert on an aggregated sum instead of a value from a single harvest cycle. Up to two hours of one-minute data checks can be aggregated. The duration you select determines the width of the rolling sum and the preview chart will update accordingly. Set the loss of signal threshold Loss of signal occurs when no data matches the NRQL condition over a specific period of time. You can set your loss of signal threshold duration and also what happens when the threshold is crossed. Go to one.newrelic.com, click Alerts & AI, in the left sidebar click Policies, select a policy, then Add a condition. Loss of signal is only available for NRQL conditions. You may also manage these settings using the GraphQL API (recommended), or the REST API. Go here for specific GraphQL API examples. Loss of signal settings: Loss of signal settings include a time duration and two possible actions. Signal loss expiration time UI label: Signal is lost after: GraphQL Node: expiration.expirationDuration Expiration duration is a timer that starts and resets when we receive a data point in the streaming alerts pipeline. If we don't receive another data point before your 'expiration time' expires, we consider that signal to be lost. This can be because no data is being sent to New Relic or the WHERE clause of your NRQL query is filtering that data out before it is streamed to the alerts pipeline. Note that when you have a faceted query, each facet is a signal. So if any one of those signals ends during the duration specified, that will be considered a loss of signal. The loss of signal expiration time is independent of the threshold duration and triggers as soon as the timer expires. The maximum expiration duration is 48 hours. This is helpful when monitoring for the execution of infrequent jobs. The minimum is 30 seconds, but we recommend using at least 3-5 minutes. Loss of signal actions Once a signal is considered lost, you can close open violations, open new violations, or both. Close all current open violations: This closes all open violations that are related to a specific signal. It won't necessarily close all violations for a condition. If you're alerting on an ephemeral service, or on a sporadic signal, you'll want to choose this action to ensure that violations are closed properly. The GraphQL node name for this is \"closeViolationsOnExpiration\" Open new violations: This will open a new violation when the signal is considered lost. These violations will indicate that they are due to a loss of signal. Based on your incident preferences, this should trigger a notification. The graphQL node name for this is \"openViolationOnExpiration\" When you enable both actions, we'll close all open violations first, and then open a new violation for loss of signal. To create a NRQL alert configured with loss of signal detection in the UI: For a policy, when you create a condition, under Select a product, click NRQL, then click Next, define thresholds. Write a NRQL query that returns the values you want to alert on. For Threshold type, select Static or Baseline. Click + Add lost signal threshold, then set the signal expiration duration time in minutes or seconds in the Signal is lost after field. Choose what you want to happen when the signal is lost. You can check one or both of Close all current open violations and Open new \"lost signal\" violation. These control how loss of signal violations will be handled for the condition. Make sure you name your condition before you save it. Violations open due to loss of signal close when the signal comes back. Newly opened lost signal violations will close immediately when new data is evaluated. the condition they belong to expires. By default, conditions expire after 3 days. you manually close the violation with the Close all current open violations option. Tip Loss of signal detection doesn't work on NRQL queries that use nested aggregation or sub-queries. Advanced signal settings When creating a NRQL alert condition, use the advanced signal settings to control streaming alert data and avoid false alarms. When creating a NRQL condition, there are several advanced signal settings: Aggregation window duration Sliding window aggregation Streaming method Delay/timer Fill data gaps To read an explanation of what these settings are and how they relate to each other, see Streaming alerts concepts. Below are instructions and tips on how to configure them. Aggregation window duration You can set the aggregation window duration to choose how long data is accumulated in a streaming time window before it's aggregated. You can set it to anything between 30 seconds and 120 minutes. The default is one minute. Sliding window aggregation You can use sliding windows to create smoother charts. This is done by creating overlapping windows of data. Once enabled, set the slide by duration to control how much overlap time your aggregated windows have. The slide by duration must be shorter than the aggregation windows duration, and must divide equally between them. Important Immediately after you create a new Sliding Windows alert condition or perform any action that can cause an evaluation reset, your condition will need time build up an \"aggregated buffer\" for the duration of the first aggregation window. During that time, no violations will trigger. Once that single aggregation window has passed, a complete \"buffer\" will have been built and the condition will function normally. Streaming method Choose between three streaming aggregation methods to get the best evaluation results for your conditions. Delay/timer You can adjust the delay/timer to coordinate our streaming alerting algorithm with your data's behavior. If your data is sparse or inconsistent, you may want to use the event timer aggregation method. For the cadence method, the total supported latency is the sum of the aggregation window duration and the delay. If the data type comes from an APM language agent and is aggregated from many app instances (for example, Transactions, TransactionErrors, etc.), we recommend using the event flow method with the default settings. Important When creating NRQL conditions for data collected from Infrastructure Cloud Integrations such as AWS Cloudwatch or Azure, we recommend that you use the event timer method. Fill data gaps Gap filling lets you customize the values to use when your signals don't have any data. You can fill gaps in your data streams with one of these settings: None: (Default) Choose this if you don't want to take any action on empty aggregation windows. On evaluation, an empty aggregation window will reset the threshold duration timer. For example, if a condition says that all aggregation windows must have data points above the threshold for 5 minutes, and 1 of the 5 aggregation windows is empty, then the condition won't be in violation. Custom static value: Choose this if you'd like to insert a custom static value into the empty aggregation windows before they're evaluated. This option has an additional, required parameter of fillValue (as named in the API) that specifies what static value should be used. This defaults to 0. Last known value: This option inserts the last seen value before evaluation occurs. We maintain the state of the last seen value for 2 hours. Tip The alerts system fills gaps in actively reported signals. This signal history is dropped after 2 hours of inactivity. For gap filling, data points received after this period of inactivity are treated as new signals. To learn more about signal loss, gap filling, and how to request access to these features, see this Explorers Hub post. Options for editing data gap settings: In the NRQL conditions UI, go to Condition settings > Advanced signal settings > fill data gaps with and choose an option. If using our Nerdgraph API (preferred), this node is located at: actor : account : alerts : nrqlCondition : signal : fillOption | fillValue NerdGraph is our recommended API for this but if you're using our REST API, you can find this setting in the REST API explorer under the \"signal\" section of the Alert NRQL conditions API.", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 2.2108202, + "_score": 2.0948277, "_version": null, "_explanation": null, "sort": null, - "id": "61c5c9b928ccbcbd9607c57d", + "id": "603ef04864441fbc114e8883", "highlight": {} }, { "sections": [ - "Set your time zone", - "Change your default time zone", - "Exceptions where your time zone doesn't apply" + "User type: basic, core, and full platform users", + "What's the \"user type\"?", + "Overview of user type capabilities", + "Important", + "Detailed capability comparison table", + "Applied intelligence capabilities", + "Log management capabilities", + "How to pick a user type", + "Manage user type and upgrade requests", + "Change to \"full user\" language", + "Lacking access to something?" ], - "title": "Set your time zone", + "title": "User type: basic, core, and full platform users", "type": "docs", "tags": [ - "General account settings", - "Accounts and billing" + "New Relic One user management", + "Accounts and billing", + "Accounts" ], - "external_id": "3a7abaee77b5d140836c96007766fa8eb9109b6b", + "external_id": "9c5f86e8314ea896333ac36452994c7ee712e7c5", "image": "", - "url": "https://docs.newrelic.com/docs/accounts/accounts-billing/general-account-settings/default-time-zone-setting/", - "published_at": "2022-02-14T05:56:22Z", - "updated_at": "2022-02-14T05:56:21Z", + "url": "https://docs.newrelic.com/docs/accounts/accounts-billing/new-relic-one-user-management/user-type/", + "published_at": "2022-02-14T06:37:32Z", + "updated_at": "2022-02-14T06:37:32Z", "document_type": "page", "popularity": 1, - "body": "Your personal time zone setting controls most time-related settings in the New Relic UI, with a few exceptions. Change your default time zone To change your default time zone for your New Relic account: Go to one.newrelic.com. Select the account dropdown, then select User preferences. Change your time zone. Optional: If you're a user managed via automated user management, you can either use your identity provider's time zone or override that. When you change your time zone, this can take up to 24 hours to be reflected in the UI. Exceptions where your time zone doesn't apply Some New Relic features do not rely on the User preferences time zone settings. The following use Coordinated Universal Time (UTC) and aren't affected by user preferences: Alerts REST API v2 There may be other features where the time zone doesn't rely on your default time zone settings.", + "body": "In this doc you'll learn how we define user type, what capabilities each user type has, and how to decide on a user type. Want to learn about how users are calculated for billing purposes? See New Relic One pricing. What's the \"user type\"? A user’s user type is what determines the set of New Relic capabilities a user can theoretically access. In practice, users will often have roles assigned to them that limit their capabilities in various ways, but the user type represents their maximum theoretical set of capabilities. There are three user types: Basic user: several basic but powerful New Relic platform capabilities. Core user (limited availability): has more capabilities than a basic user. Full platform user: all capabilities. To skip to learning more about capabilities, see Capabilities. To learn more about the user type concept, keep reading. On the New Relic One pricing model, basic users are free, and core users and full platform users are billable. Your user count is tracked in the usage UI. Because user count is a billing factor, it's important to understand how user billing calculation works. If you're tasked with adding New Relic users, one of the key decisions to make is what user type to make them. If you're not sure at first, you can add them as basic users and later decide which users you want to upgrade. For how to adjust user type, see Manage user type. The user type is meant to be a fairly long-term setting based on a user's expected New Relic responsibilities over the next several months or longer. That intention is reflected in our billing calculations and downgrade rules. For more frequent or more granular adjustments to a user's New Relic access, you can assign them to more restrictive roles. Overview of user type capabilities Important Basic users have free preview access to core user capabilities starting January 12, 2022 and ending February 28, 2022. Here's an overview of the capabilities of each user type: Basic user Core user (limited availability) Full platform user Basic users are free and you can have an unlimited number of them. They can set up New Relic observability tools, run queries of your data, create charts and dashboards, use some basic alerting features, and more. For more detail, see the complete capabilities table. Core users can access more features than basic users but less than full platform users. They have access to some powerful developer-centric features like New Relic CodeStream, our logs UI, and the ability to build New Relic One apps. For more detail, see the complete capabilities table. Full platform users can access everything, including our more curated observability UI experiences, such as APM, infrastructure monitoring, browser monitoring, mobile monitoring, synthetic monitors, and more. For more details, see the complete capabilities table. Remember that the user type governs a user's theoretical access. In practice, users can have roles assigned that restrict their access further. Detailed capability comparison table Here's a detailed comparison of capabilities by user type. For tips on why you'd make a user one user type or another, see Decide on user type. What you get Basic user Core user (see availability) Full platform user Unlimited queries Custom charts and dashboards Report data from any source (agents, integrations, APIs) Alerts and applied intelligence1 Get notifications & resolve incidents Get notifications & resolve incidents Log management1 Search/view only New Relic Instant Observability (I/O) Install only Install only New Relic Explorer with Lookout and Navigator List view only NerdGraph (our GraphQL API) (for permitted features) New Relic Codestream integration Errors tracking with errors inbox New Relic One custom apps Application performance monitoring (APM) Kubernetes monitoring and Pixie Infrastructure monitoring Distributed tracing Infinite Tracing (Pro & Enterprise editions only) Network performance monitoring (NPM) Browser and mobile monitoring Synthetic monitoring Serverless monitoring Entity maps and service maps Workloads Manage data and manage other users 1Here are more details about how user type access works for some specific features: Applied intelligence capabilities Here are more details about how user type impacts access to applied intelligence features: What you get Basic user Core user (see availability) Full platform user Acknowledge and resolve issues Root cause analysis Incident/anomaly analysis Correlation assistant Issue maps Machine learning classification Log management capabilities Important On January 12, 2022, basic users had some log management capabilities removed. Their current capabilities are described below. Log-related capabilities: Basic users can view and search the log management UI. Core users and full platform users have access to all log management UI features and configuration capabilities, and can see logs in context for the UI experiences they have access to (for example, core users can see applicable log data in the errors inbox UI). How to pick a user type For New Relic One pricing, user type is a factor in billing. Before deciding on user types, you'll want to ensure you understand user-related calculations and downgrade rules. For an in-depth capability comparison, see the Capability table. Here are some tips for deciding on a user type: Reasons to make someone a full platform user: They play a key role in the development, testing, deployment, and maintenance phases of the application development lifecycle. They break/fix code regularly; they are responsible for triaging workflows, troubleshooting, or managing users and roles for their team. They have DevOps practices (for example, version control systems, and implementation of CI/CD). They need to use New Relic's curated dashboards and experiences (not just the ability to create their own custom queries and charts); in other words, they need full access to our platform. They need to be able to manage users and/or billing. Reasons to make someone a core user: They're developers who aren't tasked with reliability and uptime as their primary responsibility. They don’t require full platform access, but would benefit from some of the specific functionalities offered to core users, like: Debugging code issues directly from their IDE, using New Relic CodeStream. Viewing errors in one place from across your stack, using Errors Inbox. Using our logs management UI to spot problems and patterns in logs. The ability to build and deploy New Relic One apps, and use apps from the app catalog. Reasons to make someone a basic user: They play a key role in the planning phase of the application development lifecycle. They use and configure New Relic tools to get data into New Relic, and access, configure, and use alerts on such data (not necessarily responsible for triaging workflows, troubleshooting, or managing users and roles for their team). They want to see high-level analytics and business metrics for future planning (such as C-Suite executives). They don't need to use our curated experiences and dashboards, but would benefit from the ability to create their own custom queries and charts of data; in other words, they don't need full access to the platform. They don't manage users or billing. Manage user type and upgrade requests How you manage a user's user type depends on which user model your organization's users are on: New Relic One user model docs Original user model docs For rules around billing and downgrading users, see Billing and downgrade rules. Change to \"full user\" language In November of 2021, in the product and public docs, the \"full user\" user type name was changed to \"full platform user.\" For organizations that have the version of New Relic One pricing without core users: \"Full platform users\" are equivalent to what are referred to as \"full users\" and \"monthly provisioned users\" in your New Relic agreement. For more about agreement-level language, see Billable user agreement terminology. When querying your full platform user counts, you use FullUsersBillable and not FullPlatformUsersBillable. Lacking access to something? For questions related to lack of access to New Relic accounts or features, see Factors affecting access.", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 2.210215, + "_score": 2.093243, "_version": null, "_explanation": null, "sort": null, - "id": "6043f38a28ccbc97e62c6090", + "id": "61e2dd0a64441fa32500e1f5", "highlight": {} }, { "sections": [ - "NerdGraph tutorial: Configure Infinite Tracing", - "Tip", - "Update the random sampler on a trace observer", - "Update the data sources on a trace observer", - "Update the name on a trace observer" + "Billable user calculation and user downgrade rules", + "View user count in the UI", + "Pricing versions (with and without core users)", + "User calculation for the primary pricing version (with core users)", + "Important", + "User downgrade rules for primary pricing version (with core users)", + "Pay-as-you-go: downgrade rules", + "Annual pool of funds (longer term contract): downgrade rules", + "Rules for New Relic One pricing without core users", + "User downgrade rules" ], - "title": "NerdGraph tutorial: Configure Infinite Tracing", + "title": "Billable user calculation and user downgrade rules", "type": "docs", "tags": [ - "Examples", - "NerdGraph", - "APIs" + "New Relic One pricing and billing", + "Accounts and billing", + "Accounts" ], - "external_id": "71c1abfe8b14acdf316d0845adbdf56a1d9ae895", + "external_id": "b173a862fbec8fef9d39427ae690c16c7cee3c14", "image": "", - "url": "https://docs.newrelic.com/docs/apis/nerdgraph/examples/configure-infinite-tracing-graphql/", - "published_at": "2022-02-14T05:44:25Z", - "updated_at": "2022-02-14T05:44:25Z", + "url": "https://docs.newrelic.com/docs/accounts/accounts-billing/new-relic-one-pricing-billing/user-count-billing/", + "published_at": "2022-02-14T06:14:45Z", + "updated_at": "2022-02-14T06:14:45Z", "document_type": "page", "popularity": 1, - "body": "You can configure many of the settings for Infinite Tracing with the New Relic Edge app. You can also perform a variety of these configuration tasks using GraphQL. With our NerdGraph GraphiQL explorer you can execute and see the results of queries and mutations for Infinite Tracing configuration. This document explains some of the options that are available. Tip If you need help getting started with GraphQL, check out Introduction to New Relic NerdGraph. Update the random sampler on a trace observer As described in our docs on the tail-based sampling algorithms there are several ways Infinite Tracing chooses to sample a trace. The random sampler is configurable, allowing you to control the percent of traces kept. Tip If you need help about when it's appropriate to change the random filter, see Infinite Tracing: Random trace filter. The following example shows you how to update the value from the default of 1%: Go to the NerdGraph GraphiQL explorer at api.newrelic.com/graphiql. Execute the following query to find the trace observer that contains the random sampler to modify: { actor { account(id: YOUR_ACCOUNT_ID) { edge { tracing { traceObservers { id name providerRegion status traceFilters { randomTraceFilter { percentKept } } endpoints { agent { host } } } } } } } } Copy In the response, find the trace observer id. Here is an example where the value is 123456789: { \"data\": { \"actor\": { \"account\": { \"edge\": { \"tracing\": { \"traceObservers\": [ { \"endpoints\": [ { \"agent\": { \"host\": \"your-uuid-goes-here.aws-us-east-1.tracing.edge.nr-data.net\" } } ], \"id\": 123456789, \"name\": \"Production Workload, US-EAST-1\", \"providerRegion\": \"AWS_US_EAST_1\", \"status\": \"CREATED\", \"traceFilters\": { \"randomTraceFilter\": { \"percentKept\": 1 } } } ] } } } } } } Copy Execute a mutation using the id and the percent you want to keep. For example, to raise the value to 50% for the trace observer with id 123456789 that we found in the previous step, run the following: mutation { edgeUpdateTraceObservers( accountId: YOUR_ACCOUNT_ID traceObserverConfigs: { id: 123456789, randomTraceFilterConfig: { percentKept: 50 } } ) { responses { errors { message type } traceObserver { traceFilters { randomTraceFilter { percentKept } } id endpoints { agent { host } } name status } } } } Copy Here's the response confirming the change: { \"data\":{ \"edgeUpdateTraceObservers\":{ \"responses\":[ { \"errors\":null, \"traceObserver\":{ \"endpoints\":[ { \"agent\":{ \"host\":\"your-uuid-goes-here.aws-us-east-1.tracing.edge.nr-data.net\" } } ], \"id\":123456789, \"name\":\"Production Workload, US-EAST-1\", \"status\":\"CREATED\", \"traceFilters\":{ \"randomTraceFilter\":{ \"percentKept\":50 } } } } ] } } } Copy Update the data sources on a trace observer While some entities can be configured to send their tracing data directly to an Infinite Tracing trace observer, others can have their data routed by New Relic to a trace observer. Currently, we support this feature for mobile, browser, and Lambda entities. In the context of Infinite Tracing, we call these entities data sources. Tip If you need help about data sources in the trace observer, see Set up the trace observer. There are three ways you can update the data sources on a trace observer: ADD: If you'd like to add new data sources to the existing ones on a trace observer, use the ADD DataSourceGroupUpdateType. No data sources will be removed with this mutation. REPLACE: If you'd like to replace your existing data sources on a trace observer with a new set of values, use the REPLACE DataSourceGroupUpdateType. Any existing values not in the new set will have their status set to INACTIVE. REMOVE: If you'd like to remove one or more of your existing data sources on a trace observer, use the REMOVE DataSourceGroupUpdateType. Any values passed will have their status set to INACTIVE. The following example shows you how to add to the existing data sources: Go to the NerdGraph GraphiQL explorer at api.newrelic.com/graphiql. Execute the following query to find the trace observer that contains the data source group you would like to modify: { actor { account(id: YOUR_ACCOUNT_ID) { edge { tracing { traceObservers { id name dataSourceGroup { dataSources { entity { guid name entityType } status } } endpoints { agent { host } } } } } } } } Copy In the response, find the trace observer id. Here is an example with two trace observers. Let's say you want the one in US-EAST-1 called \"Production services\", whose id value is 259: { \"data\": { \"actor\": { \"account\": { \"edge\": { \"tracing\": { \"traceObservers\": [ { \"dataSourceGroup\": { \"dataSources\": [ { \"entity\": { \"entityType\": \"BROWSER_APPLICATION_ENTITY\", \"guid\": \"MXxCUk9XU0VSfEFQUExJQ0FUSU9OfDE\", \"name\": \"My 1st Browser App\" }, \"status\": \"ACTIVE\" }, { \"entity\": { \"entityType\": \"MOBILE_APPLICATION_ENTITY\", \"guid\": \"MXxNT0JJTEV8QVBQTElDQVRJT058Mg\", \"name\": \"My 1st iOS App\" }, \"status\": \"ACTIVE\" }, { \"entity\": { \"entityType\": \"INFRASTRUCTURE_AWS_LAMBDA_FUNCTION_ENTITY\", \"guid\": \"MXxJTkZSQXxOQXwz\", \"name\": \"My 1st Lambda Function\" }, \"status\": \"ACTIVE\" } ] }, \"endpoints\": [ { \"agent\": { \"host\": \"1541eb3c-9cb7-44fc-a764-fa2ab5d507ed.aws-us-east-1.tracing.edge.nr-data.net\" } } ], \"id\": 259, \"name\": \"Production services\" }, { \"dataSourceGroup\": { \"dataSources\": [ { \"entity\": { \"entityType\": \"BROWSER_APPLICATION_ENTITY\", \"guid\": \"MXxCUk9XU0VSfEFQUExJQ0FUSU9OfDEw\", \"name\": \"My Staging Browser App\" }, \"status\": \"ACTIVE\" }, { \"entity\": { \"entityType\": \"MOBILE_APPLICATION_ENTITY\", \"guid\": \"MXxNT0JJTEV8QVBQTElDQVRJT058MTE\", \"name\": \"My Staging Android App\" }, \"status\": \"ACTIVE\" }, { \"entity\": { \"entityType\": \"INFRASTRUCTURE_AWS_LAMBDA_FUNCTION_ENTITY\", \"guid\": \"MXxJTkZSQXxOQXwxMg\", \"name\": \"My Staging Lambda Function\" }, \"status\": \"ACTIVE\" } ] }, \"endpoints\": [ { \"agent\": { \"host\": \"cd8816a3-b2a2-421b-aa15-5095a056d902.aws-us-west-2.tracing.edge.nr-data.net\" } } ], \"id\": 632, \"name\": \"Staging Environment\" } ] } } } } } } Copy Execute a mutation using the ID, one or more entity guids, and whether you would like to ADD, REPLACE, or REMOVE them. For example, to add a new browser app and mobile app to the Production services trace observer with id 259, which we found in the previous step, run the following: mutation { edgeUpdateTraceObservers(traceObserverConfigs: {id: 259, dataSourceGroupConfig: {updateType: ADD, guids: [\"MXxCUk9XU0VSfEFQUExJQ0FUSU9OfDQ5\", \"MXxNT0JJTEV8QVBQTElDQVRJT058MTY\"]}}, accountId: 1) { responses { errors { message type } traceObserver { dataSourceGroup { dataSources { entity { guid name } status } } } } } } Copy Here's the response confirming the change: { \"data\": { \"edgeUpdateTraceObservers\": { \"responses\": [ { \"errors\": null, \"traceObserver\": { \"dataSourceGroup\": { \"dataSources\": [ { \"entity\": { \"guid\": \"MXxCUk9XU0VSfEFQUExJQ0FUSU9OfDE\", \"name\": \"My 1st Browser App\" }, \"status\": \"ACTIVE\" }, { \"entity\": { \"guid\": \"MXxNT0JJTEV8QVBQTElDQVRJT058Mg\", \"name\": \"My 1st iOS App\" }, \"status\": \"ACTIVE\" }, { \"entity\": { \"guid\": \"MXxJTkZSQXxOQXwz\", \"name\": \"My 1st Lambda Function\" }, \"status\": \"ACTIVE\" }, { \"entity\": { \"guid\": \"MXxCUk9XU0VSfEFQUExJQ0FUSU9OfDQ5\", \"name\": \"My 2nd Browser App\" }, \"status\": \"ACTIVE\" }, { \"entity\": { \"guid\": \"MXxNT0JJTEV8QVBQTElDQVRJT058MTY\", \"name\": \"My 2nd Mobile App\" }, \"status\": \"ACTIVE\" } ] } } } ] } } } Copy Update the name on a trace observer Here's how you can change the name of a trace observer: Go to the NerdGraph GraphiQL explorer at api.newrelic.com/graphiql. Find the trace observer whose name you'd like to update: { actor { account(id: YOUR_ACCOUNT_ID) { edge { tracing { traceObservers { id name providerRegion status endpoints { agent { host } } } } } } } } Copy In the response, find the trace observer id that is returned: { \"data\": { \"actor\": { \"account\": { \"edge\": { \"tracing\": { \"traceObservers\": [ { \"endpoints\": [ { \"agent\": { \"host\": \"your-uuid-goes-here.aws-us-east-1.tracing.edge.nr-data.net\" } } ], \"id\": 123456789, \"name\": \"Production Workload, US-EAST-1\", \"providerRegion\": \"AWS_US_EAST_1\", \"status\": \"CREATED\" } ] } } } } } } Copy Execute a mutation that includes the new value for the name. For example, to change the name to Global Workload, US-EAST-1 for the trace observer with id 123456789, run the following: mutation { edgeUpdateTraceObservers( accountId: YOUR_ACCOUNT_ID traceObserverConfigs: { id: 123456789, name: \"Global Workload, US-EAST-1\" } ) { responses { errors { message type } traceObserver { id endpoints { agent { host } } name status } } } } Copy Here's the response confirming the change: { \"data\":{ \"edgeUpdateTraceObservers\":{ \"responses\":[ { \"errors\":null, \"traceObserver\":{ \"endpoints\":[ { \"agent\":{ \"host\":\"your-uuid-goes-here.aws-us-east-1.tracing.edge.nr-data.net\" } } ], \"id\":123456789, \"name\":\"Global Workload, US-EAST-1\", \"status\":\"CREATED\" } } ] } } } Copy", + "body": "For New Relic One pricing, the count of billable users is a billing factor. View user count in the UI For how to view your user count in the UI, see Billing-related UI. Pricing versions (with and without core users) We have two versions of the New Relic One pricing model: Our primary New Relic One pricing version, also known as \"New Relic One pricing with core users.\" It was released January 12, 2022. This version gives you access to a third user type: the core user. It also has different billing calculations and user downgrade rules. The \"New Relic One without core users\" version. If your New Relic organization existed prior to January 12, 2022, and hasn't switched to the newer version, this is your version. Organizations on this version can add only basic users and full platform users; they don't have access to core users. This version will be increasingly deprecated over time as customers on this version switch to the core user version. See the rules for this version. Tips for determining which version you're on: If your organization has core users or has the ability to add core users, you're on our primary version. If you can only add basic users and full platform users, you're on the \"New Relic One without core users\" version. You can view your users by going to the User management UI. Another way to determine your version is by going to the Manage your plan UI. If you see \"New Relic One - Users\", that means you're on the \"New Relic One without core users\" version. For more about the differences between these versions, see Core users release. User calculation for the primary pricing version (with core users) Important These rules apply for organizations on our primary New Relic One pricing version (the version with core users). You can use the usage UI to get an overview of your billable user count. If you need more detail than the UI provides, you can also run usage-related NRQL queries. To determine an organization's count of billable users in a calendar month, we count the users during that month who had a billable user type of either full platform user or core user. A user’s billable user type is defined as the highest user type at which a user was set during a calendar month. We use UTC timezone to define the start and end of a calendar month. For an example of how this works in practice: If a user is set as a full platform user at any point during a calendar month, their billable user type for that month is \"full platform user,\" and won't change, even if they downgrade later that month. This is the case even if that user is changed to a full platform user only briefly. If you're planning on adding billable users or changing your users' user type, you should keep these rules in mind. Some tips: If you want to add a billable user or upgrade a user, you might choose to do that at the beginning of the month. If you want to downgrade a user, you might choose to do that at the end of the month. The count of your unique users is determined by email address. If there are multiple user records in an organization that have the same email address, for billing purposes those user records would count as a single user, and that user's billable user type would be their highest user type assigned during that month. The cost of your billable users depends on an organization’s pricing edition (Standard, Pro, or Enterprise) or on any custom deals you have with New Relic. When a New Relic organization first starts being billed, their billable user count is pro-rated based on when during the month they started. If an organization cancels their subscription, prorating is also applied for their last month. User downgrade rules for primary pricing version (with core users) Important This section applies only for organizations on the our primary New Relic One pricing version (the version with core users). The rules pertaining to how many times you can downgrade full platform users differ depending on your usage plan: Pay-as-you-go: downgrade rules For the pay-as-you-go usage plan, there are no rules limiting the downgrading of users but the billing impacts may impact when you decide to upgrade or downgrade users. Annual pool of funds (longer term contract): downgrade rules Before upgrading or downgrading a user, ensure you understand the billing impacts. For the annual pool of funds plan, we have rules regarding how many times a user can be downgraded from being a full platform user to a lower user type. (There are no rules regarding how often users change between core user and basic user.) During a New Relic organization's contract year (defined below), if a full platform user is changed to a lower user type and back to a full platform user twice, that user will be billed as a full platform user for the remainder of that contract year, regardless of user type adjustments. A contract year is defined as a year starting at your contract's starting point, or at the anniversary of that point. If your organization started out on a different pricing plan and switched to this version of pricing, the user type downgrade rule will apply from when you opted in until the renewal of your subscription term or, if applicable, the annual anniversary date of your commitment term, whichever is earlier. Rules for New Relic One pricing without core users The following rules apply only for organizations on the version of New Relic One pricing without core users: How billable users are determined. For a calendar month, an organization is billed based on a calculation of the number of full platform users for that month. Prorating in first and last month. The count of billable users is prorated based on when a New Relic organization starts their subscription, or based on when a user becomes a full platform user (added as a full platform user or converted to one). Users are billable when provisioned. A user counts as billable the moment they are set to a billable user type in New Relic. This applies regardless of whether that user has ever logged into or used New Relic. User count based on email address. If there are multiple user records in an organization that have the same email address, for billing purposes that would count as a single user. Caveat for our original user model. If your organization has users on our original user model: If a user is set as a basic user in one account in the organization and as a billable user in another account, the billable user status takes precedence. For user downgrade rules, see below: User downgrade rules The following user downgrade rules apply for organizations on the version of New Relic One pricing without core users: User type is meant to be a fairly long-term setting based on a user’s expected New Relic duties and responsibilities. For that reason, a full platform user may only be downgraded a maximum of two times in a 12-month period. If a user’s user type has changed more than this allowed number of changes, New Relic can charge that user as a full platform user.", "info": "", "_index": "520d1d5d14cc8a32e600034b", "_type": "520d1d5d14cc8a32e600034c", - "_score": 2.2091117, + "_score": 2.0915246, "_version": null, "_explanation": null, "sort": null, - "id": "6043fcb7e7b9d218485799d7", + "id": "61e2e3eb196a678b9a02b837", "highlight": {} } ]