diff --git a/build.gradle b/build.gradle
index ea0ce323..a1d4b4bb 100644
--- a/build.gradle
+++ b/build.gradle
@@ -21,6 +21,7 @@ buildscript {
// This isn't applying from repositories.gradle so repeating it here
repositories {
mavenCentral()
+ mavenLocal()
}
dependencies {
@@ -92,6 +93,7 @@ project.afterEvaluate {
repositories {
mavenCentral()
+ mavenLocal()
}
@@ -114,13 +116,12 @@ dependencies {
all*.exclude group: "org.elasticsearch", module: "securemock"
}
- compile 'org.jooq:jooq:3.10.8'
- compile 'org.bouncycastle:bcprov-jdk15on:1.60'
- compile 'org.bouncycastle:bcpkix-jdk15on:1.60'
- compile 'org.xerial:sqlite-jdbc:3.8.11.2'
compile 'com.google.guava:guava:27.0.1-jre'
+ compile 'org.jooq:jooq:3.10.8'
+ compile 'com.amazon.opendistro.elasticsearch:performanceanalyzer:1.0';
compile 'com.fasterxml.jackson.core:jackson-annotations:2.8.11'
compile 'com.fasterxml.jackson.core:jackson-databind:2.8.11'
+ compile 'com.fasterxml.jackson.core:jackson-databind:2.8.11'
// JDK9+ has to run powermock 2+. https://github.com/powermock/powermock/issues/888
testCompile group: 'org.powermock', name: 'powermock-api-mockito2', version: '2.0.0'
diff --git a/config/opendistro_performance_analyzer/log4j2.properties b/config/opendistro_performance_analyzer/log4j2.properties
deleted file mode 100644
index fa53dde2..00000000
--- a/config/opendistro_performance_analyzer/log4j2.properties
+++ /dev/null
@@ -1,14 +0,0 @@
-appender.stats_log_rolling.type = RollingFile
-appender.stats_log_rolling.name = stats_log_rolling
-appender.stats_log_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}performance_analyzer_plugin_stats.log
-appender.stats_log_rolling.layout.type = PatternLayout
-appender.stats_log_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}performance_analyzer_plugin_stats-%d{yyyy-MM-dd}.log
-appender.stats_log_rolling.policies.type = Policies
-appender.stats_log_rolling.policies.time.type = TimeBasedTriggeringPolicy
-appender.stats_log_rolling.policies.time.interval = 1
-appender.stats_log_rolling.policies.time.modulate = true
-
-logger.stats_log.name = stats_log
-logger.stats_log.level = info
-logger.stats_log.appenderRef.stats_log_rolling.ref = stats_log_rolling
-logger.stats_log.additivity = false
diff --git a/licenses/bcpkix-jdk15on-1.60.jar.sha1 b/licenses/bcpkix-jdk15on-1.60.jar.sha1
deleted file mode 100644
index 2217a947..00000000
--- a/licenses/bcpkix-jdk15on-1.60.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-d0c46320fbc07be3a24eb13a56cee4e3d38e0c75
\ No newline at end of file
diff --git a/licenses/bcpkix-jdk15on-LICENSE.txt b/licenses/bcpkix-jdk15on-LICENSE.txt
deleted file mode 100644
index b6a4e728..00000000
--- a/licenses/bcpkix-jdk15on-LICENSE.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-Please note this should be read in the same way as the MIT license.
-
-Please also note this licensing model is made possible through funding from donations and the sale of support contracts.
-
-LICENSE
-Copyright (c) 2000 - 2019 The Legion of the Bouncy Castle Inc. (https://www.bouncycastle.org)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/licenses/bcprov-jdk15on-1.60.jar.sha1 b/licenses/bcprov-jdk15on-1.60.jar.sha1
deleted file mode 100644
index e0604c45..00000000
--- a/licenses/bcprov-jdk15on-1.60.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-bd47ad3bd14b8e82595c7adaa143501e60842a84
\ No newline at end of file
diff --git a/licenses/bcprov-jdk15on-LICENSE.txt b/licenses/bcprov-jdk15on-LICENSE.txt
deleted file mode 100644
index b6a4e728..00000000
--- a/licenses/bcprov-jdk15on-LICENSE.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-Please note this should be read in the same way as the MIT license.
-
-Please also note this licensing model is made possible through funding from donations and the sale of support contracts.
-
-LICENSE
-Copyright (c) 2000 - 2019 The Legion of the Bouncy Castle Inc. (https://www.bouncycastle.org)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/licenses/bcprov-jdk15on-NOTICE.txt b/licenses/bcprov-jdk15on-NOTICE.txt
deleted file mode 100644
index e69de29b..00000000
diff --git a/licenses/performanceanalyzer-1.0.jar.sha1 b/licenses/performanceanalyzer-1.0.jar.sha1
new file mode 100644
index 00000000..e6035317
--- /dev/null
+++ b/licenses/performanceanalyzer-1.0.jar.sha1
@@ -0,0 +1 @@
+c1334d0a10bde9be7a31fd633afaa22bdfa9ed71
\ No newline at end of file
diff --git a/licenses/performanceanalyzer-LICENSE.txt b/licenses/performanceanalyzer-LICENSE.txt
new file mode 100644
index 00000000..2116d6d9
--- /dev/null
+++ b/licenses/performanceanalyzer-LICENSE.txt
@@ -0,0 +1,12 @@
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
diff --git a/licenses/bcpkix-jdk15on-NOTICE.txt b/licenses/performanceanalyzer-NOTICE.txt
similarity index 100%
rename from licenses/bcpkix-jdk15on-NOTICE.txt
rename to licenses/performanceanalyzer-NOTICE.txt
diff --git a/licenses/sqlite-jdbc-3.8.11.2.jar.sha1 b/licenses/sqlite-jdbc-3.8.11.2.jar.sha1
deleted file mode 100644
index 68c85690..00000000
--- a/licenses/sqlite-jdbc-3.8.11.2.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-dec208cba8debb0a8b9204b08d8d887d63041f0f
\ No newline at end of file
diff --git a/licenses/sqlite-jdbc-LICENSE.txt b/licenses/sqlite-jdbc-LICENSE.txt
deleted file mode 100644
index d6456956..00000000
--- a/licenses/sqlite-jdbc-LICENSE.txt
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/licenses/sqlite-jdbc-NOTICE.txt b/licenses/sqlite-jdbc-NOTICE.txt
deleted file mode 100644
index e69de29b..00000000
diff --git a/pa_bin/performance-analyzer-agent b/pa_bin/performance-analyzer-agent
index 63892ef6..356ae2fd 100755
--- a/pa_bin/performance-analyzer-agent
+++ b/pa_bin/performance-analyzer-agent
@@ -24,18 +24,18 @@ else
fi
if ! echo $* | grep -E '(^-d |-d$| -d |--daemonize$|--daemonize )' > /dev/null; then
- exec $JAVA_HOME/bin/java -Des.path.home=$ES_HOME -Dlog4j.configurationFile=$ES_HOME/plugins/opendistro_performance_analyzer/pa_config/log4j2.xml \
+ exec $JAVA_HOME/bin/java -Des.path.home=$ES_HOME -Dlog4j.configurationFile=$ES_HOME/opendistro_performance_analyzer/pa_config/log4j2.xml \
-DconfigFilePath=$3 \
-Xms64M -Xmx64M -XX:+UseSerialGC -XX:CICompilerCount=1 -XX:-TieredCompilation -XX:InitialCodeCacheSize=4096 \
-XX:InitialBootClassLoaderMetaspaceSize=30720 -XX:MaxRAM=400m \
- -cp $ES_HOME/lib/*:$ES_HOME/plugins/opendistro_performance_analyzer/* com.amazon.opendistro.elasticsearch.performanceanalyzer.PerformanceAnalyzerApp
+ -cp $ES_HOME/lib/*:$ES_HOME/opendistro_performance_analyzer/* com.amazon.opendistro.elasticsearch.performanceanalyzer.PerformanceAnalyzerApp
else
echo 'Starting deamon'
- exec $JAVA_HOME/bin/java -Des.path.home=$ES_HOME -Dlog4j.configurationFile=$ES_HOME/plugins/opendistro_performance_analyzer/pa_config/log4j2.xml \
+ exec $JAVA_HOME/bin/java -Des.path.home=$ES_HOME -Dlog4j.configurationFile=$ES_HOME/opendistro_performance_analyzer/pa_config/log4j2.xml \
-DconfigFilePath=$3 \
-Xms64M -Xmx64M -XX:+UseSerialGC -XX:CICompilerCount=1 -XX:-TieredCompilation -XX:InitialCodeCacheSize=4096 \
-XX:InitialBootClassLoaderMetaspaceSize=30720 -XX:MaxRAM=400m \
- -cp $ES_HOME/lib/*:$ES_HOME/plugins/opendistro_performance_analyzer/* com.amazon.opendistro.elasticsearch.performanceanalyzer.PerformanceAnalyzerApp &
+ -cp $ES_HOME/lib/*:$ES_HOME/opendistro_performance_analyzer/* com.amazon.opendistro.elasticsearch.performanceanalyzer.PerformanceAnalyzerApp &
pid=$!
PID_LOC=/tmp/performance-analyzer-agent
diff --git a/pa_config/log4j2.xml b/pa_config/log4j2.xml
index 692d595f..31386c9b 100644
--- a/pa_config/log4j2.xml
+++ b/pa_config/log4j2.xml
@@ -4,7 +4,7 @@
-
+
@@ -16,7 +16,7 @@
-
+
diff --git a/pa_config/supervisord.conf b/pa_config/supervisord.conf
index 65792c10..8a7b4eda 100644
--- a/pa_config/supervisord.conf
+++ b/pa_config/supervisord.conf
@@ -28,5 +28,5 @@ serverurl=/usr/share/supervisord.sock
files = /etc/supervisor/conf.d/*.conf
[program:performance_analyzer]
-command=/usr/share/elasticsearch/plugins/opendistro_performance_analyzer/pa_bin/performance-analyzer-agent /usr/share/elasticsearch
+command=/usr/share/elasticsearch/performance-analyzer-rca-1.0.0-SNAPSHOT/bin/performance-analyzer-rca /usr/share/elasticsearch
user=1000
diff --git a/release-notes b/release-notes
index 185b86eb..f837d38c 100644
--- a/release-notes
+++ b/release-notes
@@ -1,3 +1,4 @@
+Backporting fixes from master
## Version 1.2.0 (Version compatible with elasticsearch 7.2.0)
## New Features
This is the release of the Open Distro Performance Analyzer that will work with elasticsearch 7.2.0
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/CertificateUtils.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/CertificateUtils.java
deleted file mode 100644
index 1e9bf919..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/CertificateUtils.java
+++ /dev/null
@@ -1,62 +0,0 @@
-package com.amazon.opendistro.elasticsearch.performanceanalyzer;
-
-import java.io.FileReader;
-import java.security.KeyStore;
-import java.security.PrivateKey;
-import java.security.cert.Certificate;
-
-import org.apache.logging.log4j.Logger;
-import org.apache.logging.log4j.LogManager;
-import org.bouncycastle.asn1.pkcs.PrivateKeyInfo;
-import org.bouncycastle.cert.X509CertificateHolder;
-import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter;
-import org.bouncycastle.jce.provider.BouncyCastleProvider;
-import org.bouncycastle.openssl.PEMParser;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.config.PluginSettings;
-
-public class CertificateUtils {
-
- public static final String ALIAS_PRIVATE = "private";
- public static final String ALIAS_CERT = "cert";
- //The password is not used to encrypt keys on disk.
- public static final String IN_MEMORY_PWD = "opendistro";
- private static final String CERTIFICATE_FILE_PATH = "certificate-file-path";
- private static final String PRIVATE_KEY_FILE_PATH = "private-key-file-path";
- private static final Logger LOGGER = LogManager.getLogger(CertificateUtils.class);
-
- public static Certificate getCertificate(final FileReader certReader) throws Exception {
- try (PEMParser pemParser = new PEMParser(certReader)) {
- X509CertificateHolder certificateHolder = (X509CertificateHolder) pemParser.readObject();
- Certificate caCertificate = new JcaX509CertificateConverter()
- .setProvider("BC")
- .getCertificate(certificateHolder);
- return caCertificate;
- }
- }
-
- public static PrivateKey getPrivateKey(final FileReader keyReader) throws Exception {
- try (PEMParser pemParser = new PEMParser(keyReader)) {
- PrivateKeyInfo pki = (PrivateKeyInfo) pemParser.readObject();
- return BouncyCastleProvider.getPrivateKey(pki);
- }
- }
-
- public static KeyStore createKeyStore() throws Exception {
- String certFilePath = PluginSettings.instance().getSettingValue(CERTIFICATE_FILE_PATH);
- String keyFilePath = PluginSettings.instance().getSettingValue(PRIVATE_KEY_FILE_PATH);
- PrivateKey pk = getPrivateKey(new FileReader(keyFilePath));
- KeyStore ks = createEmptyStore();
- Certificate certificate = getCertificate(new FileReader(certFilePath));
- ks.setCertificateEntry(ALIAS_CERT, certificate);
- ks.setKeyEntry(ALIAS_PRIVATE, pk, IN_MEMORY_PWD.toCharArray(), new Certificate[]{certificate});
- return ks;
- }
-
- public static KeyStore createEmptyStore() throws Exception {
- KeyStore ks = KeyStore.getInstance("JKS");
- ks.load(null, IN_MEMORY_PWD.toCharArray());
- return ks;
- }
-}
-
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/ConfigStatus.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/ConfigStatus.java
deleted file mode 100644
index b92f957e..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/ConfigStatus.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer;
-
-public final class ConfigStatus {
- private boolean configMissingOrIncorrect = false;
- public static final ConfigStatus INSTANCE = new ConfigStatus();
-
- private ConfigStatus() {
- }
-
- public boolean haveValidConfig() {
- return !configMissingOrIncorrect;
- }
-
- public void setConfigurationInvalid() {
- configMissingOrIncorrect = true;
- }
-
-
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/DBUtils.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/DBUtils.java
deleted file mode 100644
index 5ef903d3..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/DBUtils.java
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer;
-
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.stream.Collectors;
-
-import org.jooq.Condition;
-import org.jooq.DSLContext;
-import org.jooq.Field;
-import org.jooq.Record;
-import org.jooq.Result;
-import org.jooq.SelectHavingStep;
-import org.jooq.TableLike;
-import org.jooq.impl.DSL;
-
-@SuppressWarnings("unchecked")
-public class DBUtils {
- public static boolean checkIfTableExists(DSLContext create, String tableName) {
- Result res = create.select()
- .from(DSL.table("sqlite_master"))
- .where(DSL.field("type").eq("table").and(
- DSL.field("name").eq(tableName)))
- .fetch();
- return (res.size() > 0);
- }
-
- public static Result fetchTables(DSLContext create) {
- return create.select()
- .from(DSL.table("sqlite_master"))
- .where(DSL.field("type").eq("table"))
- .fetch();
- }
-
- public static List> getStringFieldsFromList(Collection fieldNames) {
- return fieldNames.stream()
- .map(s -> DSL.field(DSL.name(s), String.class))
- .collect(Collectors.toList());
- }
-
- /**
- * Same implementation as getStringFieldsFromList, but return a list
- * allowing other kinds of fields other than String field.
- *
- * @param fieldNames a collection of field names
- *
- * @return a list of org.jooq.Field objects
- *
- */
- public static List> getFieldsFromList(
- Collection fieldNames) {
- return fieldNames.stream()
- .map(s -> DSL.field(DSL.name(s), String.class))
- .collect(Collectors.toList());
- }
-
- public static List> getDoubleFieldsFromList(Collection fieldNames) {
- return fieldNames.stream()
- .map(s -> DSL.field(DSL.name(s), Double.class))
- .collect(Collectors.toList());
- }
-
- public static List> getStringFieldsFromTable(Collection fieldNames,
- TableLike table) {
- return fieldNames.stream()
- .map(s -> table.field(s, String.class))
- .collect(Collectors.toList());
- }
-
- public static List> getSelectFieldsForMetricName(String metricName, List metrics, List dimensions) {
- List> selectFields = DBUtils.getFieldsFromList(dimensions);
- for (String metric : metrics) {
- if (metric.equals(metricName)) {
- selectFields.add(DSL.field(metric, Double.class).as(metric));
- } else {
- selectFields.add(DSL.val(null, Double.class).as(metric));
- }
- }
- return selectFields;
- }
-
- /**
- * Get records by field and return as a set.
- * @param table table select
- * @param field field
- * @param condition select condition
- * @param create db connection
- * @return records set
- */
- public static Set getRecordSetByField(SelectHavingStep table, Field field, Condition condition,
- final DSLContext create) {
- Result records = create.select(field)
- .from(table)
- .where(condition)
- .fetch();
-
- Set res = new HashSet<>();
- for (int i = 0; i < records.size(); i++) {
- res.add(records.get(i).get(0).toString());
- }
- return res;
- }
-
- public static String getAggFieldName(String fieldName, String aggName) {
- return aggName + "_" + fieldName;
- }
-
- public static Map> getDoubleFieldMapFromList(
- Collection fieldNames) {
- return fieldNames.stream().collect(Collectors.toMap(s -> s,
- s -> DSL.field(DSL.name(s), Double.class)));
- }
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/OSMetricsGeneratorFactory.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/OSMetricsGeneratorFactory.java
deleted file mode 100644
index 378ef96d..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/OSMetricsGeneratorFactory.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator.OSMetricsGenerator;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator.linux.LinuxOSMetricsGenerator;
-
-public class OSMetricsGeneratorFactory {
-
- private static final String OS_TYPE = System.getProperty("os.name");
-
- public static OSMetricsGenerator getInstance() {
-
- if (isLinux()) {
- return LinuxOSMetricsGenerator.getInstance();
- } else {
- ConfigStatus.INSTANCE.setConfigurationInvalid();
- }
-
- return null;
- }
-
- private static boolean isLinux() {
- return OS_TYPE.toLowerCase().contains("linux");
- }
-
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/PerformanceAnalyzerApp.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/PerformanceAnalyzerApp.java
deleted file mode 100644
index c9221253..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/PerformanceAnalyzerApp.java
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer;
-
-import java.io.File;
-
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.security.KeyStore;
-import java.util.concurrent.Executors;
-
-
-import javax.net.ssl.KeyManagerFactory;
-import javax.net.ssl.SSLContext;
-import javax.net.ssl.TrustManager;
-import java.security.cert.X509Certificate;
-import java.security.Security;
-import javax.net.ssl.X509TrustManager;
-import javax.net.ssl.HttpsURLConnection;
-import javax.net.ssl.HostnameVerifier;
-import javax.net.ssl.SSLSession;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.config.PluginSettings;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.config.TroubleshootingConfig;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.reader.ReaderMetricsProcessor;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.rest.QueryMetricsRequestHandler;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-
-import org.bouncycastle.jce.provider.BouncyCastleProvider;
-
-import com.sun.net.httpserver.HttpServer;
-import com.sun.net.httpserver.HttpsServer;
-import com.sun.net.httpserver.HttpsConfigurator;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.ScheduledMetricCollectorsExecutor;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.StatsCollector;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.StatExceptionCode;
-
-public class PerformanceAnalyzerApp {
- private static final int WEBSERVICE_DEFAULT_PORT = 9600;
- private static final String WEBSERVICE_PORT_CONF_NAME = "webservice-listener-port";
- private static final String WEBSERVICE_BIND_HOST_NAME = "webservice-bind-host";
- //Use system default for max backlog.
- private static final int INCOMING_QUEUE_LENGTH = 1;
- public static final String QUERY_URL = "/_opendistro/_performanceanalyzer/metrics";
- private static final Logger LOG = LogManager.getLogger(PerformanceAnalyzerApp.class);
- private static final ScheduledMetricCollectorsExecutor METRIC_COLLECTOR_EXECUTOR = new ScheduledMetricCollectorsExecutor(1, false);
-
- public static void main(String[] args) throws Exception {
- ESResources.INSTANCE.setPluginFileLocation(System.getProperty("es.path.home")
- + File.separator + "plugins" + File.separator + PerformanceAnalyzerPlugin.PLUGIN_NAME + File.separator);
-
- //Initialize settings before creating threads.
- PluginSettings settings = PluginSettings.instance();
-
- StatsCollector.STATS_TYPE = "agent-stats-metadata";
- METRIC_COLLECTOR_EXECUTOR.addScheduledMetricCollector(StatsCollector.instance());
- StatsCollector.instance().addDefaultExceptionCode(StatExceptionCode.READER_RESTART_PROCESSING);
- METRIC_COLLECTOR_EXECUTOR.start();
-
- Thread readerThread = new Thread(new Runnable() {
- public void run() {
- while (true) {
- try {
- ReaderMetricsProcessor mp = new ReaderMetricsProcessor(settings.getMetricsLocation());
- ReaderMetricsProcessor.setCurrentInstance(mp);
- mp.run();
- } catch (Throwable e) {
- if (TroubleshootingConfig.getEnableDevAssert()) {
- break;
- }
- LOG.error("Error in ReaderMetricsProcessor...restarting, ExceptionCode: {}",
- StatExceptionCode.READER_RESTART_PROCESSING.toString());
- StatsCollector.instance().logException(StatExceptionCode.READER_RESTART_PROCESSING);
- }
- }
- }
- });
- readerThread.start();
-
- int readerPort = getPortNumber();
- try {
- Security.addProvider(new BouncyCastleProvider());
- HttpServer server = null;
- if (settings.getHttpsEnabled()) {
- server = createHttpsServer(readerPort);
- }
- else {
- server = createHttpServer(readerPort);
- }
- server.createContext(QUERY_URL, new QueryMetricsRequestHandler());
- server.setExecutor(Executors.newCachedThreadPool());
- server.start();
- } catch (java.net.BindException ex) {
- LOG.error("Port {} is already in use...exiting", readerPort);
- Runtime.getRuntime().halt(1);
- } catch (Exception ex) {
- LOG.error("Exception in starting Reader Process: " + ex.toString());
- Runtime.getRuntime().halt(1);
- }
- }
-
- private static HttpServer createHttpsServer(int readerPort) throws Exception {
- HttpsServer server = null;
- String bindHost = getBindHost();
- if (bindHost != null && !bindHost.trim().isEmpty()) {
- LOG.info("Binding to Interface: {}", bindHost);
- server = HttpsServer.create(new InetSocketAddress(InetAddress.getByName(bindHost.trim()), readerPort),
- INCOMING_QUEUE_LENGTH);
- } else {
- LOG.info("Value Not Configured for: {} Using default value: binding to all interfaces", WEBSERVICE_BIND_HOST_NAME);
- server = HttpsServer.create(new InetSocketAddress(readerPort), INCOMING_QUEUE_LENGTH);
- }
-
- TrustManager[] trustAllCerts = new TrustManager[] {
- new X509TrustManager() {
-
- public X509Certificate[] getAcceptedIssuers() {
- return null;
- }
- public void checkClientTrusted(X509Certificate[] certs, String authType) {
-
- }
- public void checkServerTrusted(X509Certificate[] certs, String authType) {
-
- }
- }
- };
-
- HostnameVerifier allHostsValid = new HostnameVerifier() {
- public boolean verify(String hostname, SSLSession session) {
- return true;
- }
- };
-
- // Install the all-trusting trust manager
- SSLContext sslContext = SSLContext.getInstance("TLSv1.2");
-
- KeyStore ks = CertificateUtils.createKeyStore();
- KeyManagerFactory kmf = KeyManagerFactory.getInstance("NewSunX509");
- kmf.init(ks, CertificateUtils.IN_MEMORY_PWD.toCharArray());
- sslContext.init(kmf.getKeyManagers(), trustAllCerts, null);
-
- HttpsURLConnection.setDefaultSSLSocketFactory(sslContext.getSocketFactory());
- HttpsURLConnection.setDefaultHostnameVerifier(allHostsValid);
- server.setHttpsConfigurator(new HttpsConfigurator(sslContext));
- return server;
- }
-
- private static HttpServer createHttpServer(int readerPort) throws Exception {
- HttpServer server = null;
- String bindHost = getBindHost();
- if (bindHost != null && !bindHost.trim().isEmpty()) {
- LOG.info("Binding to Interface: {}", bindHost);
- server = HttpServer.create(new InetSocketAddress(InetAddress.getByName(bindHost.trim()), readerPort),
- INCOMING_QUEUE_LENGTH);
- } else {
- LOG.info("Value Not Configured for: {} Using default value: binding to all interfaces", WEBSERVICE_BIND_HOST_NAME);
- server = HttpServer.create(new InetSocketAddress(readerPort), INCOMING_QUEUE_LENGTH);
- }
-
- return server;
- }
-
- private static int getPortNumber() {
- String readerPortValue;
- try {
- readerPortValue = PluginSettings.instance().getSettingValue(WEBSERVICE_PORT_CONF_NAME);
-
- if (readerPortValue == null) {
- LOG.info("{} not configured; using default value: {}", WEBSERVICE_PORT_CONF_NAME, WEBSERVICE_DEFAULT_PORT);
- return WEBSERVICE_DEFAULT_PORT;
- }
-
- return Integer.parseInt(readerPortValue);
- } catch (Exception ex) {
- LOG.error("Invalid Configuration: {} Using default value: {} AND Error: {}",
- WEBSERVICE_PORT_CONF_NAME, WEBSERVICE_DEFAULT_PORT, ex.toString());
- return WEBSERVICE_DEFAULT_PORT;
- }
- }
-
- private static String getBindHost() {
- return PluginSettings.instance().getSettingValue(WEBSERVICE_BIND_HOST_NAME);
- }
-}
-
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/PerformanceAnalyzerPlugin.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/PerformanceAnalyzerPlugin.java
index aa106117..eb3ad698 100644
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/PerformanceAnalyzerPlugin.java
+++ b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/PerformanceAnalyzerPlugin.java
@@ -15,8 +15,6 @@
package com.amazon.opendistro.elasticsearch.performanceanalyzer;
-import static java.util.Collections.singletonList;
-
import java.io.File;
import java.security.AccessController;
import java.security.PrivilegedAction;
@@ -27,20 +25,6 @@
import java.util.Map;
import java.util.function.Supplier;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.http_action.config.PerformanceAnalyzerConfigAction;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.action.PerformanceAnalyzerActionFilter;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.config.PluginSettings;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.CircuitBreakerCollector;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.HeapMetricsCollector;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.MasterServiceMetrics;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.MasterServiceEventMetrics;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.NetworkE2ECollector;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.NetworkInterfaceCollector;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.NodeDetailsCollector;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.http_action.whoami.TransportWhoAmIAction;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.http_action.whoami.WhoAmIAction;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.listener.PerformanceAnalyzerSearchListener;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.transport.PerformanceAnalyzerTransportInterceptor;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
@@ -76,22 +60,43 @@
import org.elasticsearch.transport.TransportInterceptor;
import org.elasticsearch.watcher.ResourceWatcherService;
+
+import com.amazon.opendistro.elasticsearch.performanceanalyzer.action.PerformanceAnalyzerActionFilter;
+import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.CircuitBreakerCollector;
import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.DisksCollector;
+import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.HeapMetricsCollector;
+import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.MasterServiceEventMetrics;
+import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.MasterServiceMetrics;
import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.MetricsPurgeActivity;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.NodeStatsMetricsCollector;
+import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.NetworkInterfaceCollector;
+import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.NodeDetailsCollector;
import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.OSMetricsCollector;
import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.ScheduledMetricCollectorsExecutor;
import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.StatsCollector;
import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.ThreadPoolMetricsCollector;
+import com.amazon.opendistro.elasticsearch.performanceanalyzer.config.PluginSettings;
+import com.amazon.opendistro.elasticsearch.performanceanalyzer.http_action.config.PerformanceAnalyzerConfigAction;
+import com.amazon.opendistro.elasticsearch.performanceanalyzer.http_action.whoami.TransportWhoAmIAction;
+import com.amazon.opendistro.elasticsearch.performanceanalyzer.http_action.whoami.WhoAmIAction;
+import com.amazon.opendistro.elasticsearch.performanceanalyzer.listener.PerformanceAnalyzerSearchListener;
+import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.MetricsConfiguration;
+import com.amazon.opendistro.elasticsearch.performanceanalyzer.reader_writer_shared.EventLog;
+import com.amazon.opendistro.elasticsearch.performanceanalyzer.transport.PerformanceAnalyzerTransportInterceptor;
+import com.amazon.opendistro.elasticsearch.performanceanalyzer.reader_writer_shared.EventLogFileHandler;
+import com.amazon.opendistro.elasticsearch.performanceanalyzer.util.Utils;
+import com.amazon.opendistro.elasticsearch.performanceanalyzer.writer.EventLogQueueProcessor;
+
+import static java.util.Collections.singletonList;
-public class PerformanceAnalyzerPlugin extends Plugin implements ActionPlugin, NetworkPlugin, SearchPlugin {
+public final class PerformanceAnalyzerPlugin extends Plugin implements ActionPlugin, NetworkPlugin, SearchPlugin {
private static final Logger LOG = LogManager.getLogger(PerformanceAnalyzerPlugin.class);
public static final String PLUGIN_NAME = "opendistro_performance_analyzer";
+ public static final int QUEUE_PURGE_INTERVAL_MS = 1000;
private static SecurityManager sm = null;
static {
SecurityManager sm = System.getSecurityManager();
-
+ Utils.configureMetrics();
if(sm != null) {
// unprivileged code such as scripts do not have SpecialPermission
sm.checkPermission(new SpecialPermission());
@@ -131,24 +136,30 @@ public PerformanceAnalyzerPlugin(final Settings settings, final java.nio.file.Pa
ESResources.INSTANCE.setConfigPath(configPath);
ESResources.INSTANCE.setPluginFileLocation(new Environment(settings, configPath).
pluginsFile().toAbsolutePath().toString() + File.separator + PLUGIN_NAME + File.separator);
- //Initialize plugin settings. Accessing plugin settings before this
+ //initialize plugin settings. Accessing plugin settings before this
//point will break, as the plugin location will not be initialized.
PluginSettings.instance();
scheduledMetricCollectorsExecutor = new ScheduledMetricCollectorsExecutor();
scheduledMetricCollectorsExecutor.addScheduledMetricCollector(new ThreadPoolMetricsCollector());
- scheduledMetricCollectorsExecutor.addScheduledMetricCollector(new NodeStatsMetricsCollector());
scheduledMetricCollectorsExecutor.addScheduledMetricCollector(new CircuitBreakerCollector());
scheduledMetricCollectorsExecutor.addScheduledMetricCollector(new OSMetricsCollector());
scheduledMetricCollectorsExecutor.addScheduledMetricCollector(new HeapMetricsCollector());
+
scheduledMetricCollectorsExecutor.addScheduledMetricCollector(new MetricsPurgeActivity());
+
scheduledMetricCollectorsExecutor.addScheduledMetricCollector(new NodeDetailsCollector());
scheduledMetricCollectorsExecutor.addScheduledMetricCollector(new MasterServiceMetrics());
scheduledMetricCollectorsExecutor.addScheduledMetricCollector(new MasterServiceEventMetrics());
scheduledMetricCollectorsExecutor.addScheduledMetricCollector(new DisksCollector());
- scheduledMetricCollectorsExecutor.addScheduledMetricCollector(new NetworkE2ECollector());
scheduledMetricCollectorsExecutor.addScheduledMetricCollector(new NetworkInterfaceCollector());
scheduledMetricCollectorsExecutor.addScheduledMetricCollector(StatsCollector.instance());
scheduledMetricCollectorsExecutor.start();
+
+ EventLog eventLog = new EventLog();
+ EventLogFileHandler eventLogFileHandler = new EventLogFileHandler(eventLog, PluginSettings.instance().getMetricsLocation());
+ new EventLogQueueProcessor(eventLogFileHandler,
+ MetricsConfiguration.SAMPLING_INTERVAL,
+ QUEUE_PURGE_INTERVAL_MS).scheduleExecutor();
}
// - http level: bulk, search
@@ -186,7 +197,8 @@ public List getRestHandlers(final Settings s
final SettingsFilter settingsFilter,
final IndexNameExpressionResolver indexNameExpressionResolver,
final Supplier nodesInCluster) {
- PerformanceAnalyzerConfigAction performanceanalyzerConfigAction = new PerformanceAnalyzerConfigAction(settings, restController);
+ PerformanceAnalyzerConfigAction performanceanalyzerConfigAction = new PerformanceAnalyzerConfigAction(settings,
+ restController, scheduledMetricCollectorsExecutor);
PerformanceAnalyzerConfigAction.setInstance(performanceanalyzerConfigAction);
return singletonList(performanceanalyzerConfigAction);
}
@@ -205,10 +217,10 @@ public Collection createComponents(Client client, ClusterService cluster
@Override
public Map> getTransports(Settings settings, ThreadPool threadPool,
- PageCacheRecycler pageCacheRecycler,
- CircuitBreakerService circuitBreakerService,
- NamedWriteableRegistry namedWriteableRegistry,
- NetworkService networkService) {
+ PageCacheRecycler pageCacheRecycler,
+ CircuitBreakerService circuitBreakerService,
+ NamedWriteableRegistry namedWriteableRegistry,
+ NetworkService networkService) {
ESResources.INSTANCE.setSettings(settings);
ESResources.INSTANCE.setCircuitBreakerService(circuitBreakerService);
return Collections.emptyMap();
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/CachedStats.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/CachedStats.java
deleted file mode 100644
index accd1b05..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/CachedStats.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors;
-
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.ShardStatsValue;
-
-class CachedStats {
- private static final Set CACHABLE_VALUES = new HashSet<>(Arrays.asList(
- ShardStatsValue.INDEXING_THROTTLE_TIME.toString(),
- ShardStatsValue.CACHE_QUERY_HIT.toString(),
- ShardStatsValue.CACHE_QUERY_MISS.toString(),
- ShardStatsValue.CACHE_FIELDDATA_EVICTION.toString(),
- ShardStatsValue.CACHE_REQUEST_HIT.toString(),
- ShardStatsValue.CACHE_REQUEST_MISS.toString(),
- ShardStatsValue.CACHE_REQUEST_EVICTION.toString(),
- ShardStatsValue.REFRESH_EVENT.toString(),
- ShardStatsValue.REFRESH_TIME.toString(),
- ShardStatsValue.FLUSH_EVENT.toString(),
- ShardStatsValue.FLUSH_TIME.toString(),
- ShardStatsValue.MERGE_EVENT.toString(),
- ShardStatsValue.MERGE_TIME.toString()
- ));
- private Map cachedValues = new HashMap<>();
-
- long getValue(String statsName) {
- return cachedValues.getOrDefault(statsName, 0L);
- }
-
- void putValue(String statsName, long value) {
- cachedValues.put(statsName, value);
- }
-
- static Set getCachableValues() {
- return CACHABLE_VALUES;
- }
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/DiskMetrics.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/DiskMetrics.java
deleted file mode 100644
index 231da7d6..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/DiskMetrics.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.DiskDimension;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.DiskValue;
-import com.fasterxml.jackson.annotation.JsonProperty;
-
-public class DiskMetrics extends MetricStatus {
- public String name;
-
- public double utilization; // fraction, 0-1
-
- public double await; // ms
-
- public double serviceRate; // MBps
-
- public DiskMetrics(String name, double utilization, double await,
- double serviceRate) {
- super();
- this.name = name;
- this.utilization = utilization;
- this.await = await;
- this.serviceRate = serviceRate;
- }
-
- public DiskMetrics() {
- super();
- }
-
- @JsonProperty(DiskDimension.Constants.NAME_VALUE)
- public String getName() {
- return name;
- }
-
- @JsonProperty(DiskValue.Constants.UTIL_VALUE)
- public double getUtilization() {
- return utilization;
- }
-
- @JsonProperty(DiskValue.Constants.WAIT_VALUE)
- public double getAwait() {
- return await;
- }
-
- @JsonProperty(DiskValue.Constants.SRATE_VALUE)
- public double getServiceRate() {
- return serviceRate;
- }
-}
\ No newline at end of file
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/DisksCollector.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/DisksCollector.java
deleted file mode 100644
index eadbdfa6..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/DisksCollector.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.OSMetricsGeneratorFactory;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.MetricsConfiguration;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.MetricsProcessor;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.PerformanceAnalyzerMetrics;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator.DiskMetricsGenerator;
-
-public class DisksCollector extends PerformanceAnalyzerMetricsCollector implements MetricsProcessor {
-
- private static final int sTimeInterval = MetricsConfiguration.CONFIG_MAP
- .get(DisksCollector.class).samplingInterval;
- private StringBuilder value = new StringBuilder();
-
- public DisksCollector() {
- super(sTimeInterval, "DisksCollector");
- }
-
- @Override
- public String getMetricsPath(long startTime, String... keysPath) {
- // throw exception if keys.length is not equal to 0
- if (keysPath.length != 0) {
- throw new RuntimeException("keys length should be 0");
- }
-
- return PerformanceAnalyzerMetrics.generatePath(startTime, PerformanceAnalyzerMetrics.sDisksPath);
- }
-
- @Override
- public void collectMetrics(long startTime) {
- DiskMetricsGenerator diskMetricsGenerator = OSMetricsGeneratorFactory.getInstance().getDiskMetricsGenerator();
- diskMetricsGenerator.addSample();
-
- saveMetricValues(getMetrics(diskMetricsGenerator), startTime);
- }
-
- private Map getMetricsMap(DiskMetricsGenerator diskMetricsGenerator) {
-
- Map map = new HashMap<>();
-
- for (String disk : diskMetricsGenerator.getAllDisks()) {
- DiskMetrics diskMetrics = new DiskMetrics();
- diskMetrics.name = disk;
- diskMetrics.await = diskMetricsGenerator.getAwait(disk);
- diskMetrics.serviceRate = diskMetricsGenerator.getServiceRate(disk);
- diskMetrics.utilization = diskMetricsGenerator.getDiskUtilization(disk);
-
- map.put(disk, diskMetrics);
- }
-
- return map;
- }
-
- private String getMetrics(DiskMetricsGenerator diskMetricsGenerator) {
-
- Map map = getMetricsMap(diskMetricsGenerator);
- value.setLength(0);
- value.append(PerformanceAnalyzerMetrics.getJsonCurrentMilliSeconds())
- .append(PerformanceAnalyzerMetrics.sMetricNewLineDelimitor);
- for (Map.Entry entry : map.entrySet()) {
- value.append(entry.getValue().serialize())
- .append(PerformanceAnalyzerMetrics.sMetricNewLineDelimitor);
- }
- return value.toString();
- }
-}
-
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/HeapMetricsCollector.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/HeapMetricsCollector.java
deleted file mode 100644
index ced57d6f..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/HeapMetricsCollector.java
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors;
-
-import java.lang.management.MemoryUsage;
-import java.util.Map;
-import java.util.function.Supplier;
-
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.jvm.GCMetrics;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.jvm.HeapMetrics;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.GCType;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.HeapDimension;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.HeapValue;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.MetricsConfiguration;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.MetricsProcessor;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.PerformanceAnalyzerMetrics;
-import com.fasterxml.jackson.annotation.JsonProperty;
-
-import com.fasterxml.jackson.annotation.JsonProperty;
-
-
-public class HeapMetricsCollector extends PerformanceAnalyzerMetricsCollector implements MetricsProcessor {
- private static final Logger LOG = LogManager.getLogger(HeapMetricsCollector.class);
- public static final int SAMPLING_TIME_INTERVAL = MetricsConfiguration.CONFIG_MAP.get(HeapMetricsCollector.class).samplingInterval;
- private static final int KEYS_PATH_LENGTH = 0;
- private StringBuilder value;
-
-
- public HeapMetricsCollector() {
- super(SAMPLING_TIME_INTERVAL, "HeapMetrics");
- value = new StringBuilder();
- }
-
- @Override
- public void collectMetrics(long startTime) {
- GCMetrics.runGCMetrics();
-
- value.setLength(0);
- value.append(PerformanceAnalyzerMetrics.getJsonCurrentMilliSeconds())
- .append(PerformanceAnalyzerMetrics.sMetricNewLineDelimitor);
- value.append(new HeapStatus(GCType.TOT_YOUNG_GC.toString(),
- GCMetrics.getTotYoungGCCollectionCount(),
- GCMetrics.getTotYoungGCCollectionTime()).serialize()).append(PerformanceAnalyzerMetrics.sMetricNewLineDelimitor);
-
- value.append(new HeapStatus(GCType.TOT_FULL_GC.toString(),
- GCMetrics.getTotFullGCCollectionCount(),
- GCMetrics.getTotFullGCCollectionTime()).serialize()).append(PerformanceAnalyzerMetrics.sMetricNewLineDelimitor);
-
- for (Map.Entry> entry : HeapMetrics
- .getMemoryUsageSuppliers().entrySet()) {
- MemoryUsage memoryUsage = entry.getValue().get();
-
- value.append(
- new HeapStatus(entry.getKey(),
- memoryUsage.getCommitted(),
- memoryUsage.getInit(),
- memoryUsage.getMax(),
- memoryUsage.getUsed()).serialize()).append(PerformanceAnalyzerMetrics.sMetricNewLineDelimitor);
- }
-
- saveMetricValues(value.toString(), startTime);
- }
-
- @Override
- public String getMetricsPath(long startTime, String... keysPath) {
- // throw exception if keys.length is not equal to 0
- if (keysPath.length != KEYS_PATH_LENGTH) {
- throw new RuntimeException("keys length should be " + KEYS_PATH_LENGTH);
- }
-
- return PerformanceAnalyzerMetrics.generatePath(startTime, PerformanceAnalyzerMetrics.sHeapPath);
- }
-
- public static class HeapStatus extends MetricStatus {
- // GC type like survivor
- private final String type;
-
- // -2 means this metric is undefined for a memory pool. For example,
- // The memory pool Eden has no collectionCount metric.
- private static final long UNDEFINED = -2;
-
- // the total number of collections that have occurred
- private long collectionCount = UNDEFINED;
-
- // the approximate accumulated collection elapsed time in milliseconds
- private long collectionTime = UNDEFINED;
-
- // the amount of memory in bytes that is committed for the Java virtual machine to use
- private long committed = UNDEFINED;
-
- // the amount of memory in bytes that the Java virtual machine initially requests from the operating system for memory management
- private long init = UNDEFINED;
-
- // the maximum amount of memory in bytes that can be used for memory management
- private long max = UNDEFINED;
-
- // the amount of used memory in bytes
- private long used = UNDEFINED;
-
- public HeapStatus(String type,
- long collectionCount,
- long collectionTime) {
-
-
- this.type = type;
- this.collectionCount = collectionCount;
- this.collectionTime = collectionTime;
- }
-
- public HeapStatus(String type,
- long committed,
- long init,
- long max,
- long used) {
-
- this.type = type;
- this.committed = committed;
- this.init = init;
- this.max = max;
- this.used = used;
-
- }
-
- @JsonProperty(HeapDimension.Constants.TYPE_VALUE)
- public String getType() {
- return type;
- }
-
- @JsonProperty(HeapValue.Constants.COLLECTION_COUNT_VALUE)
- public long getCollectionCount() {
- return collectionCount;
- }
-
- @JsonProperty(HeapValue.Constants.COLLECTION_TIME_VALUE)
- public long getCollectionTime() {
- return collectionTime;
- }
-
- @JsonProperty(HeapValue.Constants.COMMITTED_VALUE)
- public long getCommitted() {
- return committed;
- }
-
- @JsonProperty(HeapValue.Constants.INIT_VALUE)
- public long getInit() {
- return init;
- }
-
- @JsonProperty(HeapValue.Constants.MAX_VALUE)
- public long getMax() {
- return max;
- }
-
- @JsonProperty(HeapValue.Constants.USED_VALUE)
- public long getUsed() {
- return used;
- }
- }
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/MasterServiceMetrics.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/MasterServiceMetrics.java
index 797e1ed5..80a8123e 100644
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/MasterServiceMetrics.java
+++ b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/MasterServiceMetrics.java
@@ -84,4 +84,3 @@ public int getPendingTasksCount() {
}
}
}
-
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/MetricStatus.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/MetricStatus.java
deleted file mode 100644
index 7e33f198..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/MetricStatus.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.util.JsonConverter;
-
-public class MetricStatus {
-
- /**
- * converts any object to a JSON string and return that string
- * @return A string containing a JSON representation of the object
- */
- public String serialize() {
- return JsonConverter.writeValueAsString(this);
- }
-
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/MetricsPurgeActivity.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/MetricsPurgeActivity.java
deleted file mode 100644
index 05da2d28..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/MetricsPurgeActivity.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors;
-
-import java.io.File;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.config.PluginSettings;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.MetricsConfiguration;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.PerformanceAnalyzerMetrics;
-
-public class MetricsPurgeActivity extends PerformanceAnalyzerMetricsCollector {
- public MetricsPurgeActivity() {
- super(MetricsConfiguration.CONFIG_MAP.get(MetricsPurgeActivity.class).samplingInterval,
- "MetricsPurgeActivity");
- }
-
- private static int purgeInterval = MetricsConfiguration.CONFIG_MAP.get(MetricsPurgeActivity.class).deletionInterval;
-
- @Override
- public void collectMetrics(long startTime) {
- File root = new File(PluginSettings.instance().getMetricsLocation());
-
- String[] children = root.list();
- if (children == null) {
- return;
- }
- for (int i = 0; i < children.length; i++) {
- if (Long.parseLong(children[i]) < PerformanceAnalyzerMetrics.getTimeInterval(startTime - purgeInterval)) {
- PerformanceAnalyzerMetrics.removeMetrics(new File(root, children[i]));
- }
- }
- }
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/NetInterfaceSummary.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/NetInterfaceSummary.java
deleted file mode 100644
index 9c4a5b3d..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/NetInterfaceSummary.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.IPDimension;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.IPValue;
-import com.fasterxml.jackson.annotation.JsonProperty;
-
-// all metrics are per-time-unit
-public class NetInterfaceSummary extends MetricStatus {
-
- public enum Direction {
- in, out;
- }
-
- private Direction direction;
- private double packetRate4;
- private double dropRate4;
- private double packetRate6;
- private double dropRate6;
- private double bps;
-
- public NetInterfaceSummary(Direction direction,
- double packetRate4,
- double dropRate4,
- double packetRate6,
- double dropRate6,
- double bps) {
- this.direction = direction;
- this.packetRate4 = packetRate4;
- this.dropRate4 = dropRate4;
- this.packetRate6 = packetRate6;
- this.dropRate6 = dropRate6;
- this.bps = bps;
- }
-
- @JsonProperty(IPDimension.Constants.DIRECTION_VALUE)
- public Direction getDirection() {
- return direction;
- }
-
- @JsonProperty(IPValue.Constants.PACKET_RATE4_VALUE)
- public double getPacketRate4() {
- return packetRate4;
- }
-
- @JsonProperty(IPValue.Constants.DROP_RATE4_VALUE)
- public double getDropRate4() {
- return dropRate4;
- }
-
- @JsonProperty(IPValue.Constants.PACKET_RATE6_VALUE)
- public double getPacketRate6() {
- return packetRate6;
- }
-
- @JsonProperty(IPValue.Constants.DROP_RATE6_VALUE)
- public double getDropRate6() {
- return dropRate6;
- }
-
- @JsonProperty(IPValue.Constants.THROUGHPUT_VALUE)
- public double getBps() {
- return bps;
- }
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/NetworkE2ECollector.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/NetworkE2ECollector.java
deleted file mode 100644
index dff6e89f..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/NetworkE2ECollector.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.OSMetricsGeneratorFactory;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.MetricsConfiguration;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.MetricsProcessor;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.PerformanceAnalyzerMetrics;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator.TCPMetricsGenerator;
-
-public class NetworkE2ECollector extends PerformanceAnalyzerMetricsCollector implements MetricsProcessor {
- private static final int sTimeInterval = MetricsConfiguration.CONFIG_MAP.get(NetworkE2ECollector.class).samplingInterval;
-
-
- public NetworkE2ECollector() {
- super(sTimeInterval, "NetworkE2ECollector");
- }
-
- @Override
- public void collectMetrics(long startTime) {
- TCPMetricsGenerator tcpMetricsGenerator = OSMetricsGeneratorFactory.getInstance().getTCPMetricsGenerator();
- tcpMetricsGenerator.addSample();
-
- String value = PerformanceAnalyzerMetrics.getJsonCurrentMilliSeconds()
- + PerformanceAnalyzerMetrics.sMetricNewLineDelimitor
- + getMetrics(tcpMetricsGenerator);
-
- saveMetricValues(value, startTime);
- }
-
- @Override
- public String getMetricsPath(long startTime, String... keysPath) {
- // throw exception if keys.length is not equal to 0
- if (keysPath.length != 0) {
- throw new RuntimeException("keys length should be 0");
- }
-
- return PerformanceAnalyzerMetrics.generatePath(startTime, PerformanceAnalyzerMetrics.sTCPPath);
- }
-
- private Map getMetricsMap(TCPMetricsGenerator tcpMetricsGenerator) {
- Map map = new HashMap<>();
-
- for (String dest : tcpMetricsGenerator.getAllDestionationIps()) {
- TCPStatus tcpStatus = new TCPStatus(
- dest,
- tcpMetricsGenerator.getNumberOfFlows(dest),
- tcpMetricsGenerator.getTransmitQueueSize(dest),
- tcpMetricsGenerator.getReceiveQueueSize(dest),
- tcpMetricsGenerator.getCurrentLost(dest),
- tcpMetricsGenerator.getSendCongestionWindow(dest),
- tcpMetricsGenerator.getSlowStartThreshold(dest)
- );
-
- map.put(dest, tcpStatus);
- }
-
- return map;
- }
-
- private String getMetrics(TCPMetricsGenerator tcpMetricsGenerator) {
-
- Map map = getMetricsMap(tcpMetricsGenerator);
- StringBuilder value = new StringBuilder();
- value.setLength(0);
- for (TCPStatus tcpStatus : map.values()) {
-
- value.append(tcpStatus.serialize())
- .append(PerformanceAnalyzerMetrics.sMetricNewLineDelimitor);
- }
-
- return value.toString();
- }
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/NetworkInterfaceCollector.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/NetworkInterfaceCollector.java
deleted file mode 100644
index 899fa459..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/NetworkInterfaceCollector.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors;
-
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.PerformanceAnalyzerMetrics;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.OSMetricsGeneratorFactory;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.MetricsConfiguration;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator.IPMetricsGenerator;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.MetricsProcessor;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.StatsCollector;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.StatExceptionCode;
-
-
-public class NetworkInterfaceCollector extends PerformanceAnalyzerMetricsCollector
-implements MetricsProcessor {
- private static final int sTimeInterval = MetricsConfiguration.CONFIG_MAP.get(NetworkInterfaceCollector.class).samplingInterval;
- private static final Logger LOG = LogManager.getLogger(NetworkInterfaceCollector.class);
- private StringBuilder ret = new StringBuilder();
-
- public NetworkInterfaceCollector() {
- super(sTimeInterval, "NetworkInterfaceCollector");
- }
-
- @Override
- public void collectMetrics(long startTime) {
-
- IPMetricsGenerator IPMetricsGenerator = OSMetricsGeneratorFactory.getInstance().getIPMetricsGenerator();
- IPMetricsGenerator.addSample();
-
- saveMetricValues(getMetrics(IPMetricsGenerator) + PerformanceAnalyzerMetrics.sMetricNewLineDelimitor, startTime);
- }
-
- @Override
- public String getMetricsPath(long startTime, String... keysPath) {
- // throw exception if keys.length is not equal to 0
- if (keysPath.length != 0) {
- throw new RuntimeException("keys length should be 0");
- }
-
- return PerformanceAnalyzerMetrics.generatePath(startTime, PerformanceAnalyzerMetrics.sIPPath);
- }
-
-
- private String getMetrics(IPMetricsGenerator IPMetricsGenerator) {
-
- ret.setLength(0);
- ret.append(PerformanceAnalyzerMetrics.getJsonCurrentMilliSeconds())
- .append(PerformanceAnalyzerMetrics.sMetricNewLineDelimitor);
-
- try {
- NetInterfaceSummary inNetwork = new NetInterfaceSummary(
- NetInterfaceSummary.Direction.in,
- IPMetricsGenerator.getInPacketRate4(),
- IPMetricsGenerator.getInDropRate4(),
- IPMetricsGenerator.getInPacketRate6(),
- IPMetricsGenerator.getInDropRate6(),
- IPMetricsGenerator.getInBps());
-
- NetInterfaceSummary outNetwork = new NetInterfaceSummary(
- NetInterfaceSummary.Direction.out,
- IPMetricsGenerator.getOutPacketRate4(),
- IPMetricsGenerator.getOutDropRate4(),
- IPMetricsGenerator.getOutPacketRate6(),
- IPMetricsGenerator.getOutDropRate6(),
- IPMetricsGenerator.getOutBps());
-
- ret.append(inNetwork.serialize()).append(PerformanceAnalyzerMetrics.sMetricNewLineDelimitor);
- ret.append(outNetwork.serialize()).append(PerformanceAnalyzerMetrics.sMetricNewLineDelimitor);
- }catch (Exception e) {
- LOG.debug("Exception in NetworkInterfaceCollector: {} with ExceptionCode: {}",
- () -> e.toString(), () -> StatExceptionCode.NETWORK_COLLECTION_ERROR.toString());
- StatsCollector.instance().logException(StatExceptionCode.NETWORK_COLLECTION_ERROR);
- }
-
- return ret.toString();
- }
-}
-
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/OSMetricsCollector.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/OSMetricsCollector.java
deleted file mode 100644
index ffe8c92c..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/OSMetricsCollector.java
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors;
-
-import java.util.Map;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.OSMetricsGeneratorFactory;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.jvm.ThreadList;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.OSMetrics;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.MetricsConfiguration;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.MetricsProcessor;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.PerformanceAnalyzerMetrics;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator.CPUPagingActivityGenerator;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator.DiskIOMetricsGenerator;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator.OSMetricsGenerator;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator.SchedMetricsGenerator;
-
-public class OSMetricsCollector extends PerformanceAnalyzerMetricsCollector implements MetricsProcessor {
- public static final int SAMPLING_TIME_INTERVAL = MetricsConfiguration.CONFIG_MAP.get(ThreadList.class).samplingInterval;
- private static final int KEYS_PATH_LENGTH = 1;
- private StringBuilder value;
- private OSMetricsGenerator osMetricsGenerator;
-
- public enum MetaDataFields {
- threadName
- }
-
- public OSMetricsCollector() {
- super(SAMPLING_TIME_INTERVAL, "OSMetrics");
- value = new StringBuilder();
- osMetricsGenerator = OSMetricsGeneratorFactory.getInstance();
- }
-
- @Override
- public void collectMetrics(long startTime) {
-
- CPUPagingActivityGenerator threadCPUPagingActivityGenerator = osMetricsGenerator.getPagingActivityGenerator();
- threadCPUPagingActivityGenerator.addSample();
-
- SchedMetricsGenerator schedMetricsGenerator = osMetricsGenerator.getSchedMetricsGenerator();
- schedMetricsGenerator.addSample();
-
- Map threadStates = ThreadList.getNativeTidMap();
-
- DiskIOMetricsGenerator diskIOMetricsGenerator = osMetricsGenerator.getDiskIOMetricsGenerator();
- diskIOMetricsGenerator.addSample();
-
- for (String threadId : osMetricsGenerator.getAllThreadIds()) {
- value.setLength(0);
- value.append(PerformanceAnalyzerMetrics.getCurrentTimeMetric())
- .append(PerformanceAnalyzerMetrics.sMetricNewLineDelimitor)
- .append(OSMetrics.CPU_UTILIZATION)
- .append(PerformanceAnalyzerMetrics.sKeyValueDelimitor)
- .append(threadCPUPagingActivityGenerator
- .getCPUUtilization(threadId));
-
- if (threadCPUPagingActivityGenerator.hasPagingActivity(threadId)) {
- value.append(PerformanceAnalyzerMetrics.sMetricNewLineDelimitor)
- .append(OSMetrics.PAGING_MAJ_FLT_RATE)
- .append(PerformanceAnalyzerMetrics.sKeyValueDelimitor)
- .append(threadCPUPagingActivityGenerator
- .getMajorFault(threadId));
- value.append(PerformanceAnalyzerMetrics.sMetricNewLineDelimitor)
- .append(OSMetrics.PAGING_MIN_FLT_RATE)
- .append(PerformanceAnalyzerMetrics.sKeyValueDelimitor)
- .append(threadCPUPagingActivityGenerator
- .getMinorFault(threadId));
- value.append(PerformanceAnalyzerMetrics.sMetricNewLineDelimitor)
- .append(OSMetrics.PAGING_RSS)
- .append(PerformanceAnalyzerMetrics.sKeyValueDelimitor)
- .append(threadCPUPagingActivityGenerator
- .getResidentSetSize(threadId));
- }
-
- if (schedMetricsGenerator.hasSchedMetrics(threadId)) {
- value.append(PerformanceAnalyzerMetrics.sMetricNewLineDelimitor)
- .append(OSMetrics.SCHED_RUNTIME)
- .append(PerformanceAnalyzerMetrics.sKeyValueDelimitor)
- .append(schedMetricsGenerator.getAvgRuntime(threadId));
- value.append(PerformanceAnalyzerMetrics.sMetricNewLineDelimitor)
- .append(OSMetrics.SCHED_WAITTIME)
- .append(PerformanceAnalyzerMetrics.sKeyValueDelimitor)
- .append(schedMetricsGenerator.getAvgWaittime(threadId));
- value.append(PerformanceAnalyzerMetrics.sMetricNewLineDelimitor)
- .append(OSMetrics.SCHED_CTX_RATE)
- .append(PerformanceAnalyzerMetrics.sKeyValueDelimitor)
- .append(schedMetricsGenerator
- .getContextSwitchRate(threadId));
- }
-
- ThreadList.ThreadState threadState = threadStates
- .get(Long.valueOf(threadId));
- if (threadState != null) {
- value.append(PerformanceAnalyzerMetrics.sMetricNewLineDelimitor)
- .append(OSMetrics.HEAP_ALLOC_RATE)
- .append(PerformanceAnalyzerMetrics.sKeyValueDelimitor)
- .append(threadState.heapAllocRate);
- value.append(PerformanceAnalyzerMetrics.sMetricNewLineDelimitor)
- .append(MetaDataFields.threadName.toString())
- .append(PerformanceAnalyzerMetrics.sKeyValueDelimitor)
- .append(threadState.threadName);
- value.append(PerformanceAnalyzerMetrics.sMetricNewLineDelimitor)
- .append(OSMetrics.THREAD_BLOCKED_TIME)
- .append(PerformanceAnalyzerMetrics.sKeyValueDelimitor)
- .append(threadState.avgBlockedTime);
- value.append(PerformanceAnalyzerMetrics.sMetricNewLineDelimitor)
- .append(OSMetrics.THREAD_BLOCKED_EVENT)
- .append(PerformanceAnalyzerMetrics.sKeyValueDelimitor)
- .append(threadState.blockedCount);
- }
-
- if (diskIOMetricsGenerator.hasDiskIOMetrics(threadId)) {
- value.append(PerformanceAnalyzerMetrics.sMetricNewLineDelimitor)
- .append(OSMetrics.IO_READ_THROUGHPUT)
- .append(PerformanceAnalyzerMetrics.sKeyValueDelimitor)
- .append(diskIOMetricsGenerator
- .getAvgReadThroughputBps(threadId));
- value.append(PerformanceAnalyzerMetrics.sMetricNewLineDelimitor)
- .append(OSMetrics.IO_WRITE_THROUGHPUT)
- .append(PerformanceAnalyzerMetrics.sKeyValueDelimitor)
- .append(diskIOMetricsGenerator
- .getAvgWriteThroughputBps(threadId));
- value.append(PerformanceAnalyzerMetrics.sMetricNewLineDelimitor)
- .append(OSMetrics.IO_TOT_THROUGHPUT)
- .append(PerformanceAnalyzerMetrics.sKeyValueDelimitor)
- .append(diskIOMetricsGenerator
- .getAvgTotalThroughputBps(threadId));
- value.append(PerformanceAnalyzerMetrics.sMetricNewLineDelimitor)
- .append(OSMetrics.IO_READ_SYSCALL_RATE)
- .append(PerformanceAnalyzerMetrics.sKeyValueDelimitor)
- .append(diskIOMetricsGenerator
- .getAvgReadSyscallRate(threadId));
- value.append(PerformanceAnalyzerMetrics.sMetricNewLineDelimitor)
- .append(OSMetrics.IO_WRITE_SYSCALL_RATE)
- .append(PerformanceAnalyzerMetrics.sKeyValueDelimitor)
- .append(diskIOMetricsGenerator
- .getAvgWriteSyscallRate(threadId));
- value.append(PerformanceAnalyzerMetrics.sMetricNewLineDelimitor)
- .append(OSMetrics.IO_TOTAL_SYSCALL_RATE)
- .append(PerformanceAnalyzerMetrics.sKeyValueDelimitor)
- .append(diskIOMetricsGenerator
- .getAvgTotalSyscallRate(threadId));
- }
-
- saveMetricValues(value.toString(), startTime, threadId);
- }
- }
-
- @Override
- public String getMetricsPath(long startTime, String... keysPath) {
- // throw exception if keys.length is not equal to 1...which is thread ID
- if (keysPath.length != KEYS_PATH_LENGTH) {
- throw new RuntimeException("keys length should be " + KEYS_PATH_LENGTH);
- }
- return PerformanceAnalyzerMetrics.generatePath(startTime, PerformanceAnalyzerMetrics.sThreadsPath,
- keysPath[0], PerformanceAnalyzerMetrics.sOSPath);
- }
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/PerformanceAnalyzerMetricsCollector.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/PerformanceAnalyzerMetricsCollector.java
deleted file mode 100644
index 0e66832a..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/PerformanceAnalyzerMetricsCollector.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.PerformanceAnalyzerPlugin;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import java.util.concurrent.atomic.AtomicBoolean;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.StatsCollector;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.StatExceptionCode;
-
-public abstract class PerformanceAnalyzerMetricsCollector implements Runnable {
- private static final Logger LOG = LogManager.getLogger(PerformanceAnalyzerMetricsCollector.class);
- private int timeInterval;
- private long startTime;
- private String collectorName;
-
- protected PerformanceAnalyzerMetricsCollector(int timeInterval, String collectorName) {
- this.timeInterval = timeInterval;
- this.collectorName = collectorName;
- }
-
- private AtomicBoolean bInProgress = new AtomicBoolean(false);
-
- public int getTimeInterval() {
- return timeInterval;
- }
-
- public boolean inProgress() {
- return bInProgress.get();
- }
-
- public String getCollectorName() {
- return collectorName;
- }
-
- abstract void collectMetrics(long startTime);
-
- public void setStartTime(long startTime) {
- this.startTime = startTime;
- bInProgress.set(true);
- }
-
- public void run() {
- try {
- PerformanceAnalyzerPlugin.invokePrivileged(() -> collectMetrics(startTime));
- } catch (Exception ex) {
- //- should not be any...but in case, absorbing here
- //- logging...we shouldn't be doing as it will slow down; as well as fill up the log. Need to find a way to catch these
- LOG.debug("Error In Collect Metrics: {} with ExceptionCode: {}",
- () -> ex.toString(), () -> StatExceptionCode.OTHER_COLLECTION_ERROR.toString());
- StatsCollector.instance().logException(StatExceptionCode.OTHER_COLLECTION_ERROR);
- } finally {
- bInProgress.set(false);
- }
- }
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/ScheduledMetricCollectorsExecutor.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/ScheduledMetricCollectorsExecutor.java
deleted file mode 100644
index 783ace0e..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/ScheduledMetricCollectorsExecutor.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors;
-
-import java.util.Map;
-import java.util.HashMap;
-import java.util.concurrent.ArrayBlockingQueue;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.http_action.config.PerformanceAnalyzerConfigAction;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-
-
-public class ScheduledMetricCollectorsExecutor extends Thread {
- private static final Logger LOG = LogManager.getLogger(ScheduledMetricCollectorsExecutor.class);
- private final int collectorThreadCount;
- private static final int DEFAULT_COLLECTOR_THREAD_COUNT = 5;
- private static final int COLLECTOR_THREAD_KEEPALIVE_SECS = 1000;
- private final boolean checkFeatureDisabledFlag;
-
- private int minTimeIntervalToSleep = Integer.MAX_VALUE;
- private Map metricsCollectors;
- private ThreadPoolExecutor metricsCollectorsTP;
-
- public ScheduledMetricCollectorsExecutor(int collectorThreadCount, boolean checkFeatureDisabledFlag) {
- metricsCollectors = new HashMap<>();
- metricsCollectorsTP = null;
- this.collectorThreadCount = collectorThreadCount;
- this.checkFeatureDisabledFlag = checkFeatureDisabledFlag;
- }
-
- public ScheduledMetricCollectorsExecutor() {
- this(DEFAULT_COLLECTOR_THREAD_COUNT, true);
- }
-
- public void addScheduledMetricCollector(PerformanceAnalyzerMetricsCollector task) {
- metricsCollectors.put(task, System.currentTimeMillis() + task.getTimeInterval());
- if (task.getTimeInterval() < minTimeIntervalToSleep) {
- minTimeIntervalToSleep = task.getTimeInterval();
- }
- }
-
- public void run() {
- if (metricsCollectorsTP == null) {
- metricsCollectorsTP = new ThreadPoolExecutor(collectorThreadCount,
- collectorThreadCount,
- COLLECTOR_THREAD_KEEPALIVE_SECS,
- TimeUnit.SECONDS,
- new ArrayBlockingQueue<>(metricsCollectors.size()));
- }
-
- long prevStartTimestamp = System.currentTimeMillis();
-
- while (true) {
- try {
- long millisToSleep = minTimeIntervalToSleep - System.currentTimeMillis() + prevStartTimestamp;
- if (millisToSleep > 0) {
- Thread.sleep(millisToSleep);
- }
- } catch (Exception ex) {
- LOG.error("Exception in Thread Sleep", ex);
- }
-
- prevStartTimestamp = System.currentTimeMillis();
-
- if (!checkFeatureDisabledFlag ||
- PerformanceAnalyzerConfigAction.getInstance() != null && PerformanceAnalyzerConfigAction.getInstance().isFeatureEnabled()) {
- long currentTime = System.currentTimeMillis();
-
- for (Map.Entry entry : metricsCollectors.entrySet()) {
- if (entry.getValue() <= currentTime) {
- PerformanceAnalyzerMetricsCollector collector = entry.getKey();
- metricsCollectors.put(collector, entry.getValue() + collector.getTimeInterval());
- if (!collector.inProgress()) {
- collector.setStartTime(currentTime);
- metricsCollectorsTP.execute(collector);
- } else {
- LOG.info("Collector {} is still in progress, so skipping this Interval", collector.getCollectorName());
- }
- }
- }
- }
- }
- }
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/StatExceptionCode.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/StatExceptionCode.java
deleted file mode 100644
index 2429792c..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/StatExceptionCode.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors;
-
-public enum StatExceptionCode {
- TOTAL_ERROR("TotalError"),
- METRICS_WRITE_ERROR("MetricsWriteError"),
- METRICS_REMOVE_ERROR("MetricsRemoveError"),
- JVM_ATTACH_ERROR("JvmAttachErrror"),
- MASTER_METRICS_ERROR("MasterMetricsError"),
- DISK_METRICS_ERROR("DiskMetricsError"),
- THREAD_IO_ERROR("ThreadIOError"),
- SCHEMA_PARSER_ERROR("SchemaParserError"),
- JSON_PARSER_ERROR("JsonParserError"),
- NETWORK_COLLECTION_ERROR("NetworkCollectionError"),
- NODESTATS_COLLECTION_ERROR("NodeStatsCollectionError"),
- OTHER_COLLECTION_ERROR("OtherCollectionError"),
- REQUEST_ERROR("RequestError"),
- REQUEST_REMOTE_ERROR("RequestRemoteError"),
- READER_PARSER_ERROR("ReaderParserError"),
- READER_RESTART_PROCESSING("ReaderRestartProcessing"),
- OTHER("Other");
-
- private final String value;
-
- StatExceptionCode(String value) {
- this.value = value;
- }
-
- @Override
- public String toString() {
- return value;
- }
-}
-
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/StatsCollector.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/StatsCollector.java
deleted file mode 100644
index 57399aeb..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/StatsCollector.java
+++ /dev/null
@@ -1,228 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors;
-
-import java.io.FileInputStream;
-import java.io.InputStream;
-import java.text.SimpleDateFormat;
-import java.util.Date;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Vector;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.ESResources;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.config.PluginSettings;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.MetricsConfiguration;
-import com.google.common.annotations.VisibleForTesting;
-
-public class StatsCollector extends PerformanceAnalyzerMetricsCollector {
- private static final String LOG_ENTRY_INIT = "------------------------------------------------------------------------";
- private static final String LOG_ENTRY_END = "EOE";
- private static final String LOG_LINE_BREAK = "\n";
- private static final double MILLISECONDS_TO_SECONDS_DIVISOR = 1000D;
-
- private static final Logger STATS_LOGGER = LogManager.getLogger("stats_log");
- private static final Logger GENERAL_LOG = LogManager.getLogger(StatsCollector.class);
- private static StatsCollector statsCollector = null;
- public static String STATS_TYPE = "plugin-stats-metadata";
-
- private final Map metadata;
- private Map counters = new ConcurrentHashMap<>();
- private Date objectCreationTime = new Date();
-
- private List defaultExceptionCodes = new Vector<>();
-
- public static StatsCollector instance() {
- if(statsCollector == null) {
- synchronized(StatsCollector.class) {
- if(statsCollector == null) {
- statsCollector = new StatsCollector(loadMetadata(PluginSettings.instance().getSettingValue(STATS_TYPE, STATS_TYPE)));
- }
- }
- }
-
- return statsCollector;
- }
-
- @VisibleForTesting
- Map getCounters() {
- return counters;
- }
- public void logException() {
- logException(StatExceptionCode.OTHER);
- }
-
- public void logException(StatExceptionCode statExceptionCode) {
- incCounter(statExceptionCode.toString());
- incErrorCounter();
- }
-
- public void logMetric(final String metricName) {
- incCounter(metricName);
- }
-
- public void logStatsRecord(Map counters, Map statsdata,
- Map latencies, long startTimeMillis, long endTimeMillis) {
- writeStats(metadata, counters, statsdata, latencies, startTimeMillis, endTimeMillis);
- }
-
- private static Map loadMetadata(String fileLocation) {
- Map retVal = new ConcurrentHashMap<>();
-
- if(fileLocation != null) {
- Properties props = new Properties();
-
- try (InputStream input = new FileInputStream(
- ESResources.INSTANCE.getPluginFileLocation() + PluginSettings.CONFIG_FILES_PATH + fileLocation); ) {
- // load properties file
- props.load(input);
- } catch(Exception ex) {
- GENERAL_LOG.error("Error in loading metadata for fileLocation: {}", fileLocation);
- }
-
- props.forEach((key, value) -> retVal.put((String)key, (String)value));
- }
-
- return retVal;
- }
-
- private StatsCollector(Map metadata) {
- super(MetricsConfiguration.CONFIG_MAP.get(StatsCollector.class).samplingInterval,
- "StatsCollector");
- this.metadata = metadata;
- defaultExceptionCodes.add(StatExceptionCode.TOTAL_ERROR);
- }
-
- public void addDefaultExceptionCode(StatExceptionCode statExceptionCode) {
- defaultExceptionCodes.add(statExceptionCode);
- }
-
- @Override
- public void collectMetrics(long startTime) {
- Map currentCounters = counters;
- counters = new ConcurrentHashMap<>();
-
- //currentCounters.putIfAbsent(StatExceptionCode.TOTAL_ERROR.toString(), new AtomicInteger(0));
-
- for(StatExceptionCode statExceptionCode : defaultExceptionCodes) {
- currentCounters.putIfAbsent(statExceptionCode.toString(), new AtomicInteger(0));
- }
-
- writeStats(metadata, currentCounters, null, null, objectCreationTime.getTime(), new Date().getTime());
- objectCreationTime = new Date();
- }
-
- private void incCounter(String counterName) {
- AtomicInteger val = counters.putIfAbsent(counterName, new AtomicInteger(1));
- if (val != null) {
- val.getAndIncrement();
- }
- }
-
- private void incErrorCounter() {
- AtomicInteger all_val = counters.putIfAbsent(StatExceptionCode.TOTAL_ERROR.toString(), new AtomicInteger(1));
- if (all_val != null) {
- all_val.getAndIncrement();
- }
- }
-
- private static void writeStats(Map metadata, Map counters,
- Map statsdata, Map latencies,
- long startTimeMillis, long endTimeMillis) {
- StringBuilder builder = new StringBuilder();
- builder.append(LOG_ENTRY_INIT + LOG_LINE_BREAK);
- logValues(metadata, builder);
- logValues(statsdata, builder);
- logTimeMetrics(startTimeMillis, endTimeMillis, builder);
-
- Map tmpLatencies;
-
- if(latencies == null) {
- tmpLatencies = new ConcurrentHashMap<>();
- } else {
- tmpLatencies = new ConcurrentHashMap<>(latencies);
- }
-
- tmpLatencies.put("total-time", (double)endTimeMillis-startTimeMillis);
- addEntry("Timing", getLatencyMetrics(tmpLatencies), builder);
-
-
- addEntry("Counters", getCountersString(counters), builder);
- builder.append(LOG_ENTRY_END);// + LOG_LINE_BREAK);
- STATS_LOGGER.info(builder.toString());
- }
-
- private static String getCountersString(Map counters) {
- StringBuilder builder = new StringBuilder();
- if (counters == null || counters.isEmpty()) {
- return "";
- }
- for (Map.Entry counter : counters.entrySet()) {
- builder.append(counter.getKey()).append("=").append(counter.getValue().get()).append(",");
- }
- builder.delete(builder.length() - 1, builder.length());
- return builder.toString();
- }
-
- private static void logTimeMetrics(long startTimeMillis, long endTimeMillis, StringBuilder builder) {
- // Date Example: Wed, 20 Mar 2013 15:07:51 GMT
- SimpleDateFormat dateFormat = new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss z", Locale.ROOT);
- addEntry("StartTime", String.format(Locale.ROOT, "%.3f", startTimeMillis / MILLISECONDS_TO_SECONDS_DIVISOR), builder);
- addEntry("EndTime", dateFormat.format(new Date(endTimeMillis)), builder);
- addEntry("Time", (endTimeMillis - startTimeMillis) + " msecs", builder);
- }
-
- private static void logValues(Map values, StringBuilder sb) {
- if(values == null) {
- return;
- }
- for (Map.Entry entry : values.entrySet()) {
- addEntry(entry.getKey(), entry.getValue(), sb);
- }
- }
-
- private static void addEntry(String key, Object value, StringBuilder sb) {
- sb.append(key).append('=').append(value).append(LOG_LINE_BREAK);
- }
-
- private static String getLatencyMetrics(Map values) {
- StringBuilder builder = new StringBuilder();
- if (values == null || values.isEmpty()) {
- return "";
- }
- for (Map.Entry value : values.entrySet()) {
- getTimingInfo(value.getKey(), value.getValue(), builder);
- }
- builder.delete(builder.length() - 1, builder.length());
- return builder.toString();
- }
-
- private static void getTimingInfo(String timerName, double latency, StringBuilder builder) {
- getTimingInfo(timerName, latency, builder, 1);
- }
-
- private static void getTimingInfo(String timerName, double latency, StringBuilder builder, int attempts) {
- builder.append(timerName).append(":").append(latency).append("/").append(attempts).append(",");
- }
-}
-
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/TCPStatus.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/TCPStatus.java
deleted file mode 100644
index b3228abb..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/collectors/TCPStatus.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.TCPDimension;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.TCPValue;
-import com.fasterxml.jackson.annotation.JsonProperty;
-
-public class TCPStatus extends MetricStatus {
-
- private String dest;
-
- private int numFlows;
-
- private double txQ;
-
- private double rxQ;
-
- private double curLost;
-
- private double sndCWND;
-
- // make this field private so that Jackson uses getter method name
- private double ssThresh;
-
- public TCPStatus(String dest, int numFlows, double txQ, double rxQ,
- double curLost, double sndCWND, double sSThresh) {
- super();
- this.dest = dest;
- this.numFlows = numFlows;
- this.txQ = txQ;
- this.rxQ = rxQ;
- this.curLost = curLost;
- this.sndCWND = sndCWND;
- this.ssThresh = sSThresh;
- }
-
- @JsonProperty(TCPDimension.Constants.DEST_VALUE)
- public String getDest() {
- return dest;
- }
-
- @JsonProperty(TCPValue.Constants.NUM_FLOWS_VALUE)
- public int getNumFlows() {
- return numFlows;
- }
-
- @JsonProperty(TCPValue.Constants.TXQ_VALUE)
- public double getTxQ() {
- return txQ;
- }
-
- @JsonProperty(TCPValue.Constants.RXQ_VALUE)
- public double getRxQ() {
- return rxQ;
- }
-
- @JsonProperty(TCPValue.Constants.CUR_LOST_VALUE)
- public double getCurLost() {
- return curLost;
- }
-
- @JsonProperty(TCPValue.Constants.SEND_CWND_VALUE)
- public double getSndCWND() {
- return sndCWND;
- }
-
- @JsonProperty(TCPValue.Constants.SSTHRESH_VALUE)
- public double getSSThresh() {
- return ssThresh;
- }
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/config/ConfigFatalException.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/config/ConfigFatalException.java
deleted file mode 100644
index c87a6984..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/config/ConfigFatalException.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.config;
-
-class ConfigFatalException extends Exception {
- ConfigFatalException(String message) {
- super(message);
- }
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/config/ConfigFileException.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/config/ConfigFileException.java
deleted file mode 100644
index 9c88cc5f..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/config/ConfigFileException.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.config;
-
-class ConfigFileException extends Exception {
- ConfigFileException(Throwable cause) {
- super(cause);
- }
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/config/PluginSettings.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/config/PluginSettings.java
deleted file mode 100644
index b6163029..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/config/PluginSettings.java
+++ /dev/null
@@ -1,215 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.config;
-
-import java.io.File;
-import java.util.Properties;
-
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.apache.logging.log4j.message.ParameterizedMessage;
-import org.apache.logging.log4j.util.Supplier;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.ConfigStatus;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.PerformanceAnalyzerPlugin;
-
-public class PluginSettings {
- private static final Logger LOG = LogManager.getLogger(PluginSettings.class);
-
- private static PluginSettings instance;
- public static final String CONFIG_FILES_PATH = "pa_config/";
- private static final String DEFAULT_CONFIG_FILE_PATH = "pa_config/performance-analyzer.properties";
- private static final String METRICS_LOCATION_KEY = "metrics-location";
- private static final String METRICS_LOCATION_DEFAULT = "/dev/shm/performanceanalyzer/";
- private static final String DELETION_INTERVAL_KEY = "metrics-deletion-interval";
- private static final int DELETION_INTERVAL_DEFAULT = 1;
- private static final int DELETION_INTERVAL_MIN = 1;
- private static final int DELETION_INTERVAL_MAX = 60;
- private static final String HTTPS_ENABLED = "https-enabled";
-
- /**
- * Determines whether the metricsdb files should be cleaned up.
- */
- public static final String DB_FILE_CLEANUP_CONF_NAME = "cleanup-metrics-db-files";
-
- private String metricsLocation;
- private int metricsDeletionInterval;
-
- /**
- * If set to true, the metricsdb files are cleaned up, or else the on-disk files are left out.
- */
- private boolean shouldCleanupMetricsDBFiles;
- private boolean httpsEnabled;
- private Properties settings;
- private final String configFilePath;
-
- static {
- PerformanceAnalyzerPlugin.invokePrivilegedAndLogError(() -> createInstance());
- }
-
- public String getMetricsLocation() {
- return metricsLocation;
- }
-
- public void setMetricsLocation(final String metricsLocation) {
- this.metricsLocation = metricsLocation;
- }
-
- public int getMetricsDeletionInterval() {
- return metricsDeletionInterval * 60 * 1000;
- }
-
- public String getSettingValue(String settingName) {
- return settings.getProperty(settingName);
- }
-
- public String getSettingValue(String settingName, String defaultValue) {
- return settings.getProperty(settingName, defaultValue);
- }
-
- private void loadHttpsEnabled() throws Exception {
- String httpsEnabledString = settings.getProperty(HTTPS_ENABLED, "False");
- if (httpsEnabledString == null) {
- httpsEnabled = false;
- }
- try {
- httpsEnabled = Boolean.parseBoolean(httpsEnabledString);
- } catch (Exception ex) {
- LOG.error("Unable to parse httpsEnabled property with value {}", httpsEnabledString);
- httpsEnabled = false;
- }
- }
-
- public boolean getHttpsEnabled() {
- return this.httpsEnabled;
- }
-
- public boolean shouldCleanupMetricsDBFiles() {
- return shouldCleanupMetricsDBFiles;
- }
-
- private PluginSettings(String cfPath) {
- metricsLocation = METRICS_LOCATION_DEFAULT;
- metricsDeletionInterval = DELETION_INTERVAL_DEFAULT;
- if (cfPath == null || cfPath.isEmpty()) {
- this.configFilePath = DEFAULT_CONFIG_FILE_PATH;
- } else {
- this.configFilePath = cfPath;
- }
-
- settings = new Properties();
- try {
- settings = getSettingsFromFile(this.configFilePath);
- loadMetricsDeletionIntervalFromConfig();
- loadMetricsLocationFromConfig();
- loadHttpsEnabled();
- loadMetricsDBFilesCleanupEnabled();
- } catch (ConfigFileException e) {
- LOG.error("Loading config file {} failed with error: {}. Using default values.",
- this.configFilePath, e.toString());
- } catch (ConfigFatalException e) {
- LOG.error("Having issue to load all config items. Disabling plugin.", e);
- ConfigStatus.INSTANCE.setConfigurationInvalid();
- } catch (Exception e) {
- LOG.error("Unexpected exception while initializing config. Disabling plugin.", e);
- ConfigStatus.INSTANCE.setConfigurationInvalid();
- }
- LOG.info("Config: metricsLocation: {}, metricsDeletionInterval: {}, httpsEnabled: {}," +
- " cleanup-metrics-db-files: {}",
- metricsLocation, metricsDeletionInterval, httpsEnabled, shouldCleanupMetricsDBFiles);
- }
-
- public static PluginSettings instance() {
- return instance;
- }
-
- private static void createInstance() {
- String cfPath = System.getProperty("configFilePath");
- instance = new PluginSettings(cfPath);
- }
-
- private static Properties getSettingsFromFile(String filePath) throws ConfigFileException {
- try {
- return SettingsHelper.getSettings(filePath);
- } catch (Exception e) {
- throw new ConfigFileException(e);
- }
- }
-
- private void loadMetricsLocationFromConfig()
- throws ConfigFatalException {
- if (!settings.containsKey(METRICS_LOCATION_KEY)) {
- LOG.info("Cannot find metrics-location, using default value. {}", METRICS_LOCATION_DEFAULT);
- }
-
- metricsLocation = settings.getProperty(METRICS_LOCATION_KEY, METRICS_LOCATION_DEFAULT);
- validateOrCreateDir(metricsLocation);
- }
-
- private static void validateOrCreateDir(String path) throws ConfigFatalException {
- File dict = new File(path);
-
- boolean dictCreated = true;
- if (!dict.exists()) {
- dictCreated = dict.mkdir();
- LOG.info("Trying to create directory {}.", path);
- }
-
- boolean valid = dictCreated && dict.isDirectory() && dict.canWrite();
- if (!valid) {
- LOG.error("Invalid metrics location {}." +
- " Created: {} (Expect True), Directory: {} (Expect True)," +
- " CanWrite: {} (Expect True)",
- path, dict.exists(), dict.isDirectory(), dict.canWrite());
- throw new ConfigFatalException("Having issue to use path: " + path);
- }
- }
-
- private void loadMetricsDeletionIntervalFromConfig() {
- if (!settings.containsKey(DELETION_INTERVAL_KEY)) {
- return;
- }
-
- try {
- int interval = Integer.parseInt(settings.getProperty(DELETION_INTERVAL_KEY));
- if (interval < DELETION_INTERVAL_MIN || interval > DELETION_INTERVAL_MAX) {
- LOG.error("metrics-deletion-interval out of range. Value should in ({}-{}). Using default value {}.",
- DELETION_INTERVAL_MIN, DELETION_INTERVAL_MAX, metricsDeletionInterval);
- return;
- }
- metricsDeletionInterval = interval;
- } catch (NumberFormatException e) {
- LOG.error(
- (Supplier>) () -> new ParameterizedMessage(
- "Invalid metrics-deletion-interval. Using default value {}.",
- metricsDeletionInterval),
- e);
- }
- }
- private void loadMetricsDBFilesCleanupEnabled() {
- String cleanupEnabledString = settings.getProperty(DB_FILE_CLEANUP_CONF_NAME, "True");
- try {
- shouldCleanupMetricsDBFiles = Boolean.parseBoolean(cleanupEnabledString);
- } catch (Exception ex) {
- LOG.error("Unable to parse {} property with value {}. Only true/false expected.",
- DB_FILE_CLEANUP_CONF_NAME, cleanupEnabledString);
-
- // In case of exception, we go with the safe default that the files will always be cleaned up.
- shouldCleanupMetricsDBFiles = true;
- }
- }
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/config/SettingsHelper.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/config/SettingsHelper.java
deleted file mode 100644
index 6d9292a1..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/config/SettingsHelper.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.config;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.ESResources;
-
-import java.io.InputStream;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.util.Properties;
-
-public class SettingsHelper {
- public static Properties getSettings(final String fileRelativePath) throws IOException {
- Properties prop = new Properties();
-
- try (InputStream input = new FileInputStream(ESResources.INSTANCE.getPluginFileLocation() + fileRelativePath); ) {
- // load a properties file
- prop.load(input);
- }
-
- return prop;
- }
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/config/TroubleshootingConfig.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/config/TroubleshootingConfig.java
deleted file mode 100644
index 9b57bb52..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/config/TroubleshootingConfig.java
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.config;
-
-public class TroubleshootingConfig {
- public static final boolean enableDevAssert = false;
-
- public static final boolean getEnableDevAssert() {
- return enableDevAssert;
- }
-}
-
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/http_action/config/PerformanceAnalyzerConfigAction.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/http_action/config/PerformanceAnalyzerConfigAction.java
index 1dfa37df..42d1ed6f 100644
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/http_action/config/PerformanceAnalyzerConfigAction.java
+++ b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/http_action/config/PerformanceAnalyzerConfigAction.java
@@ -16,6 +16,8 @@
package com.amazon.opendistro.elasticsearch.performanceanalyzer.http_action.config;
import com.amazon.opendistro.elasticsearch.performanceanalyzer.PerformanceAnalyzerPlugin;
+import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.ScheduledMetricCollectorsExecutor;
+
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest;
@@ -50,6 +52,7 @@ public class PerformanceAnalyzerConfigAction extends BaseRestHandler {
private static PerformanceAnalyzerConfigAction instance = null;
private boolean isInitialized = false;
private boolean featureEanbledDefaultValue = true;
+ private ScheduledMetricCollectorsExecutor scheduledMetricCollectorsExecutor;
public static PerformanceAnalyzerConfigAction getInstance() {
return instance;
@@ -61,10 +64,12 @@ public static void setInstance(PerformanceAnalyzerConfigAction performanceanalyz
private static final String METRIC_ENABLED_CONF_FILENAME = "performance_analyzer_enabled.conf";
@Inject
- public PerformanceAnalyzerConfigAction(Settings settings, RestController controller) {
+ public PerformanceAnalyzerConfigAction(Settings settings, RestController controller,
+ ScheduledMetricCollectorsExecutor scheduledMetricCollectorsExecutor) {
super(settings);
controller.registerHandler(org.elasticsearch.rest.RestRequest.Method.GET, "/_opendistro/_performanceanalyzer/config", this);
controller.registerHandler(org.elasticsearch.rest.RestRequest.Method.POST, "/_opendistro/_performanceanalyzer/config", this);
+ this.scheduledMetricCollectorsExecutor = scheduledMetricCollectorsExecutor ;
this.featureEnabled = getFeatureEnabledFromConf();
LOG.info("PerformanceAnalyzer Enabled: {}", this.featureEnabled);
}
@@ -82,6 +87,9 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli
bValue, this.featureEnabled);
if (this.featureEnabled != bValue) {
this.featureEnabled = (Boolean) value;
+ if( scheduledMetricCollectorsExecutor != null) {
+ scheduledMetricCollectorsExecutor.setEnabled(this.featureEnabled);
+ }
saveFeatureEnabledToConf(this.featureEnabled);
}
}
@@ -138,10 +146,16 @@ private boolean getFeatureEnabledFromConf() {
String nextLine = sc.nextLine();
featureEnabled = Boolean.parseBoolean(nextLine);
isInitialized = true;
+ if ( scheduledMetricCollectorsExecutor != null) {
+ scheduledMetricCollectorsExecutor.setEnabled(featureEnabled);
+ }
} catch (java.nio.file.NoSuchFileException ex) {
saveFeatureEnabledToConf(featureEanbledDefaultValue);
isInitialized = true;
featureEnabled = featureEanbledDefaultValue;
+ if( scheduledMetricCollectorsExecutor != null) {
+ scheduledMetricCollectorsExecutor.setEnabled(featureEnabled);
+ }
} catch (Exception e) {
LOG.error("Error reading Feature Enabled from Conf file", e);
featureEnabled = featureEanbledDefaultValue;
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/hwnet/Disks.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/hwnet/Disks.java
deleted file mode 100644
index a714c96b..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/hwnet/Disks.java
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.hwnet;
-
-import java.io.File;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.PerformanceAnalyzerPlugin;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator.DiskMetricsGenerator;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator.linux.LinuxDiskMetricsGenerator;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.os.SchemaFileParser;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.DiskMetrics;
-
-public class Disks {
- private static Map> diskKVMap = new HashMap<>();
- private static Map> olddiskKVMap = new HashMap<>();
- private static long kvTimestamp = 0;
- private static long oldkvTimestamp = 0;
- private static Set diskList = new HashSet<>();
- private static final Logger LOG = LogManager.getLogger(Disks.class);
- private static LinuxDiskMetricsGenerator linuxDiskMetricsHandler = new LinuxDiskMetricsGenerator();
-
- private static String statKeys[] = {
- "majno", //1
- "minno",
- "name",
- "rdone",
- "rmerged",
- "rsectors",
- "rtime",
- "wdone",
- "wmerged",
- "wsectors", //10
- "wtime",
- "inprogressIO",
- "IOtime",
- "weightedIOtime"
- };
-
- private static SchemaFileParser.FieldTypes statTypes[] = {
- SchemaFileParser.FieldTypes.INT, //1
- SchemaFileParser.FieldTypes.INT,
- SchemaFileParser.FieldTypes.STRING,
- SchemaFileParser.FieldTypes.ULONG,
- SchemaFileParser.FieldTypes.ULONG,
- SchemaFileParser.FieldTypes.ULONG,
- SchemaFileParser.FieldTypes.ULONG,
- SchemaFileParser.FieldTypes.ULONG,
- SchemaFileParser.FieldTypes.ULONG,
- SchemaFileParser.FieldTypes.ULONG, //10
- SchemaFileParser.FieldTypes.ULONG,
- SchemaFileParser.FieldTypes.ULONG,
- SchemaFileParser.FieldTypes.ULONG,
- SchemaFileParser.FieldTypes.ULONG
- };
-
- static {
- PerformanceAnalyzerPlugin.invokePrivileged(() -> listDisks());
- oldkvTimestamp = System.currentTimeMillis();
- kvTimestamp = oldkvTimestamp;
- }
-
- private static StringBuilder value = new StringBuilder();
-
- private static void listDisks() {
- try {
- File file = new File("/sys/block");
- for (File dfile : file.listFiles()) {
- if (!dfile.getCanonicalPath().contains("/virtual/")) {
- diskList.add(dfile.getName());
- }
- }
- } catch(Exception e) {
- LOG.debug("Exception in calling listDisks with details: {}", () -> e.toString());
- }
- }
-
- public static DiskMetricsGenerator getDiskMetricsHandler() {
- return linuxDiskMetricsHandler;
- }
-
- public static void addSample() {
- olddiskKVMap.clear();
- olddiskKVMap.putAll(diskKVMap);
- diskKVMap.clear();
-
- SchemaFileParser parser =
- new SchemaFileParser("/proc/diskstats", statKeys, statTypes);
- List> sampleList = parser.parseMultiple();
-
- for (Map sample: sampleList) {
- String diskname = (String)(sample.get("name"));
- if (!diskList.contains(diskname)) {
- diskKVMap.put(diskname, sample);
- }
- }
-
- oldkvTimestamp = kvTimestamp;
- kvTimestamp = System.currentTimeMillis();
-
- calculateDiskMetrics();
- }
-
- private static void calculateDiskMetrics() {
-
- linuxDiskMetricsHandler.setDiskMetricsMap(getMetricsMap());
- }
-
- public static Map getMetricsMap() {
- Map map = new HashMap<>();
- if (kvTimestamp > oldkvTimestamp) {
- for (String disk : diskKVMap.keySet()) {
- Map m = diskKVMap.get(disk);
- Map mold = olddiskKVMap.get(disk);
- if (mold != null) {
- DiskMetrics dm = new DiskMetrics();
- dm.name = (String) m.get("name");
- double rwdeltatime = 1.0 * ((long) m.get("rtime") + (long) m.get("wtime")
- - (long) mold.get("rtime") - (long) mold.get("wtime"));
- double rwdeltaiops = 1.0 * ((long) m.get("rdone") + (long) m.get("wdone")
- - (long) mold.get("rdone") - (long) mold.get("wdone"));
- double rwdeltasectors = 1.0 * ((long) m.get("rsectors") + (long) m.get("wsectors")
- - (long) mold.get("rsectors") - (long) mold.get("wsectors"));
-
- dm.utilization = rwdeltatime / (kvTimestamp - oldkvTimestamp);
- dm.await = (rwdeltaiops > 0) ? rwdeltatime / rwdeltaiops : 0;
- dm.serviceRate = (rwdeltatime > 0) ? rwdeltasectors * 512 * 1.0e-3 / rwdeltatime : 0;
-
- map.put(disk, dm);
- }
- }
- }
- return map;
- }
-
- public static void runOnce() {
- addSample();
- System.out.println("disks: "+ getMetricsMap());
- }
-}
-
-
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/hwnet/NetworkE2E.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/hwnet/NetworkE2E.java
deleted file mode 100644
index bfb8986a..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/hwnet/NetworkE2E.java
+++ /dev/null
@@ -1,222 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.hwnet;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileReader;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator.linux.LinuxTCPMetricsGenerator;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.os.OSGlobals;
-import com.google.common.annotations.VisibleForTesting;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.StatsCollector;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.StatExceptionCode;
-
-public class NetworkE2E {
- /* Data sources:
- /proc/net/tcp, /proc/net/tcp6 and /proc/pid/fd/*
- intersection of these gives a list of flows
- owned by the process. net/tcp gives metrics
- (by src-dest pair) around queues, retx's
- and TCP sndwnd.
- */
-
- private static final Logger LOG = LogManager.getLogger(
- NetworkE2E.class);
- private static String pid = OSGlobals.getPid();
-
- static class TCPFlowMetrics {
- String destIP;
-
- long txQueue;
- long rxQueue;
- long currentLost;
- long sendCWND;
- long SSThresh;
- }
-
- static class destTCPFlowMetrics {
- long txQueueTot;
- long rxQueueTot;
- long currentLostTot;
- long sendCWNDTot;
- long SSThreshTot;
- int numFlows;
- destTCPFlowMetrics(TCPFlowMetrics m) {
- txQueueTot = m.txQueue;
- rxQueueTot = m.rxQueue;
- currentLostTot = m.currentLost;
- sendCWNDTot = m.sendCWND;
- SSThreshTot = m.SSThresh;
- numFlows = 1;
- }
- }
-
- private static Set inodeSocketList
- = new HashSet<>();
- private static Map inodeFlowMetricsMap
- = new HashMap<>();
- private static Map destnodeFlowMetricsMap
- = new HashMap<>();
- private static LinuxTCPMetricsGenerator linuxTCPMetricsHandler = new LinuxTCPMetricsGenerator();
-
- private static StringBuilder value = new StringBuilder();
-
- static void listSockets() {
- File self = new File("/proc/" + pid + "/fd");
- File[] filesList = self.listFiles();
- for (File f : filesList) {
- // no check for file, as this dir is all files/symlinks
- String target = null;
- try {
- Path targetp = Files.readSymbolicLink(Paths.get(f.getCanonicalPath()));
- target = targetp.toString();
- } catch (Exception e) {
- continue;
- }
- if (target.contains("socket:")) {
- target = target.split("socket:\\[")[1];
- target = target.split("\\]")[0];
- inodeSocketList.add(target);
- }
- }
- }
-
- private static void generateMap(String line, String ver) {
- String[] toks = line.trim().split("\\s+");
- if (!inodeSocketList.contains(toks[9])) { // inode
- return;
- }
- TCPFlowMetrics m = new TCPFlowMetrics();
- m.destIP = toks[2].split(":")[0];
- m.txQueue = Long.decode("0x" + toks[4].split(":")[0]);
- m.rxQueue = Long.decode("0x" + toks[4].split(":")[1]);
- m.currentLost = Long.decode("0x" + toks[6]);
- if (toks.length > 16) {
- m.sendCWND = Long.parseLong(toks[15]);
- m.SSThresh = Long.parseLong(toks[16]);
- } else {
- m.sendCWND = -1;
- m.SSThresh = -1;
- }
- inodeFlowMetricsMap.put(toks[9], m);
- }
-
- private static void mapTCPMetrics(String ver) {
- int ln = 0;
- try (FileReader fileReader = new FileReader(new File(ver));
- BufferedReader bufferedReader = new BufferedReader(fileReader)) {
- String line = null;
- while ((line = bufferedReader.readLine()) != null) {
- if (ln != 0) { // first line is keys
- generateMap(line, ver);
- }
- ln++;
- }
- } catch (Exception e) {
- LOG.debug("Error in mapTCPMetrics: {} with ExceptionCode: {}",
- () -> e, () -> StatExceptionCode.NETWORK_COLLECTION_ERROR.toString());
- StatsCollector.instance().logException(StatExceptionCode.NETWORK_COLLECTION_ERROR);
- }
- }
-
- private static void mapTCPMetrics() {
- mapTCPMetrics("/proc/net/tcp");
- mapTCPMetrics("/proc/net/tcp6");
- }
-
- private static void clearAll() {
- inodeSocketList.clear();
- inodeFlowMetricsMap.clear();
- destnodeFlowMetricsMap.clear();
- }
-
- private static void computeSummary() {
- for (String inode : inodeFlowMetricsMap.keySet()) {
- TCPFlowMetrics m = inodeFlowMetricsMap.get(inode);
- destTCPFlowMetrics exist = destnodeFlowMetricsMap.get(m.destIP);
- if (exist == null) {
- destnodeFlowMetricsMap.put(m.destIP, new destTCPFlowMetrics(m));
- } else {
- // check for "-1"s and add to total only if it is not -1
- exist.numFlows++;
- exist.txQueueTot += (m.txQueue != -1 ? m.txQueue : 0);
- exist.rxQueueTot += (m.rxQueue != -1 ? m.rxQueue : 0);
- exist.currentLostTot += (m.currentLost != -1 ? m.currentLost : 0);
- exist.sendCWNDTot += (m.sendCWND != -1 ? m.sendCWND : 0);
- exist.SSThreshTot += (m.SSThresh != -1 ? m.SSThresh : 0);
- }
- }
-
- calculateTCPMetrics();
- }
-
- protected static void calculateTCPMetrics() {
-
- Map localMap = new HashMap<>();
- for (String dest : destnodeFlowMetricsMap.keySet()) {
- destTCPFlowMetrics m = destnodeFlowMetricsMap.get(dest);
-
- double[] metrics = new double[6];
- metrics[0] = m.numFlows;
- metrics[1] = m.txQueueTot * 1.0 / m.numFlows;
- metrics[2] = m.rxQueueTot * 1.0 / m.numFlows;
- metrics[3] = m.currentLostTot * 1.0 / m.numFlows;
- metrics[4] = m.sendCWNDTot * 1.0 / m.numFlows;
- metrics[5] = m.SSThreshTot * 1.0 / m.numFlows;
-
- localMap.put(dest, metrics);
- }
-
- linuxTCPMetricsHandler.setTCPMetrics(localMap);
- }
-
- public static LinuxTCPMetricsGenerator getTCPMetricsHandler() {
-
- return linuxTCPMetricsHandler;
- }
-
- public static void addSample() {
- clearAll();
- listSockets();
- mapTCPMetrics();
- computeSummary();
- }
-
- public static void runOnce() {
- clearAll();
- listSockets();
- mapTCPMetrics();
- computeSummary();
- }
-
- @VisibleForTesting
- protected static void setDestnodeFlowMetricsMap(
- Map destnodeFlowMetricsMap) {
- NetworkE2E.destnodeFlowMetricsMap = destnodeFlowMetricsMap;
- }
-}
-
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/hwnet/NetworkInterface.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/hwnet/NetworkInterface.java
deleted file mode 100644
index 8b5a3304..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/hwnet/NetworkInterface.java
+++ /dev/null
@@ -1,378 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.hwnet;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileReader;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.NetInterfaceSummary;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator.linux.LinuxIPMetricsGenerator;
-import com.google.common.annotations.VisibleForTesting;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.StatsCollector;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.StatExceptionCode;
-
-public class NetworkInterface {
- private static final Logger LOG = LogManager.getLogger(NetworkInterface.class);
-
- /* Data sources:
- /proc/net/snmp, /prov/net/snmp6, /proc/net/dev
- measures tcp and ip-layer pathologies.
- SNMP fields of interest (see RFCs 2011 and 1213):
- - [ip6]inReceives: total including errors
- - [ip6]inDelivers: sent to next layer (including ICMP)
- - [ip6]outRequests: sent from previous layer
- - [ip6]outDiscards + [ip6]outNoRoutes: sender-side drops
- */
-
- static class NetInterfaceMetrics {
- Map PHYmetrics =
- new HashMap<>();
- Map IPmetrics =
- new HashMap<>();
- // these three are currently unused;
- // leaving them commented for now.
- /*Map TCPmetrics =
- new HashMap<>();
- Map UDPmetrics =
- new HashMap<>();
- Map ICMPmetrics =
- new HashMap<>();*/
- public void clearAll() {
- PHYmetrics.clear();
- IPmetrics.clear();
- /*TCPmetrics.clear();
- UDPmetrics.clear();
- ICMPmetrics.clear();*/
- }
- public void putAll(NetInterfaceMetrics m) {
- PHYmetrics.putAll(m.PHYmetrics);
- IPmetrics.putAll(m.IPmetrics);
- /*TCPmetrics.putAll(m.TCPmetrics);
- UDPmetrics.putAll(m.UDPmetrics);
- ICMPmetrics.putAll(m.ICMPmetrics);*/
- }
- }
- private static NetInterfaceMetrics currentMetrics = new NetInterfaceMetrics();
- private static NetInterfaceMetrics oldMetrics = new NetInterfaceMetrics();
- private static Map currentMetrics6 = new HashMap<>();
- private static Map oldMetrics6 = new HashMap<>();
- private static long kvTimestamp = 0;
- private static long oldkvTimestamp = 0;
-
- private static StringBuilder ret = new StringBuilder();
-
- private static String[] IPkeys = null;
-// static private String[] TCPkeys = null;
-// static private String[] UDPkeys = null;
-// static private String[] ICMPkeys = null;
-
- private static LinuxIPMetricsGenerator linuxIPMetricsGenerator = new LinuxIPMetricsGenerator();
-
- static {
- addSampleHelper();
- }
-
- public static LinuxIPMetricsGenerator getLinuxIPMetricsGenerator() {
- return linuxIPMetricsGenerator;
- }
-
-
- protected static void calculateNetworkMetrics() {
-
- if (kvTimestamp <= oldkvTimestamp) {
- linuxIPMetricsGenerator.setInNetworkInterfaceSummary(null);
- linuxIPMetricsGenerator.setOutNetworkInterfaceSummary(null);
- return;
- }
-
- Map curphy = currentMetrics.PHYmetrics;
- Map curipv4 = currentMetrics.IPmetrics;
- Map oldphy = oldMetrics.PHYmetrics;
- Map oldipv4 = oldMetrics.IPmetrics;
-
- long nin = curipv4.get("InReceives") - oldipv4.get("InReceives");
- long nout = curipv4.get("OutRequests") - oldipv4.get("OutRequests");
- long delivin = curipv4.get("InDelivers") - oldipv4.get("InDelivers");
- long dropout = curipv4.get("OutDiscards") + curipv4.get("OutNoRoutes")
- - oldipv4.get("OutDiscards") - oldipv4.get("OutNoRoutes");
- long nin6 = currentMetrics6.get("Ip6InReceives") - oldMetrics6.get("Ip6InReceives");
- long nout6 = currentMetrics6.get("Ip6OutRequests") - oldMetrics6.get("Ip6OutRequests");
- long delivin6 = currentMetrics6.get("Ip6InDelivers") - oldMetrics6.get("Ip6InDelivers");
- long dropout6 = currentMetrics6.get("Ip6OutDiscards") + currentMetrics6.get("Ip6OutNoRoutes")
- - oldMetrics6.get("Ip6OutDiscards") - oldMetrics6.get("Ip6OutNoRoutes");
-
- long timeDelta = kvTimestamp - oldkvTimestamp;
- double inbps = 8*1.0e3*(curphy.get("inbytes") - oldphy.get("inbytes")) / timeDelta;
- double outbps = 8*1.0e3*(curphy.get("outbytes") - oldphy.get("outbytes")) / timeDelta;
- double inPacketRate4 = 1.0e3*(nin) / timeDelta;
- double outPacketRate4 = 1.0e3*(nout) / timeDelta;
- double inDropRate4 = 1.0e3*(nin - delivin) / timeDelta;
- double outDropRate4 = 1.0e3*(dropout) / timeDelta;
- double inPacketRate6 = 1.0e3*(nin6) / timeDelta;
- double outPacketRate6 = 1.0e3*(nout6) / timeDelta;
- double inDropRate6 = 1.0e3*(nin6 - delivin6) / timeDelta;
- double outDropRate6 = 1.0e3*(dropout6) / timeDelta;
-
- NetInterfaceSummary inNetwork = new NetInterfaceSummary(
- NetInterfaceSummary.Direction.in,
- inPacketRate4,
- inDropRate4,
- inPacketRate6,
- inDropRate6,
- inbps
- );
-
- NetInterfaceSummary outNetwork = new NetInterfaceSummary(
- NetInterfaceSummary.Direction.out,
- outPacketRate4,
- outDropRate4,
- outPacketRate6,
- outDropRate6,
- outbps
- );
-
- linuxIPMetricsGenerator.setInNetworkInterfaceSummary(inNetwork);
- linuxIPMetricsGenerator.setOutNetworkInterfaceSummary(outNetwork);
- }
-
- private static void getKeys(String line) {
- if (IPkeys != null) {
- //{ && TCPkeys != null &&
- //UDPkeys != null && ICMPkeys != null) {
- return;
- }
- if (line.startsWith("Ip:")) {
- IPkeys = line.split("\\s+");
- } /*else if (line.startsWith("Icmp:")) {
- ICMPkeys = line.split("\\s+");
- } else if (line.startsWith("Tcp:")) {
- TCPkeys = line.split("\\s+");
- } else if (line.startsWith("Udp:")) {
- UDPkeys = line.split("\\s+");
- }*/
- }
-
- private static void generateMap(String line) {
- Map map = null;
- String[] keys = null;
- if (line.startsWith("Ip:")) {
- map = currentMetrics.IPmetrics;
- keys = IPkeys;
- } /*else if (line.startsWith("Icmp:")) {
- map = currentMetrics.ICMPmetrics;
- keys = ICMPkeys;
- } else if (line.startsWith("Tcp:")) {
- map = currentMetrics.TCPmetrics;
- keys = TCPkeys;
- } else if (line.startsWith("Udp:")) {
- map = currentMetrics.UDPmetrics;
- keys = UDPkeys;
- }*/
- if (keys != null) {
- generateMap(line, keys, map);
- }
- }
-
- private static void generateMap(String line, String[] keys, Map map) {
- String[] values = line.split("\\s+");
- int count = values.length;
- map.put(keys[0], 0L);
- for (int i = 1; i < count; i++) {
- map.put(keys[i], Long.parseLong(values[i]));
- }
- }
-
- private static void addSample4() {
- int ln = 0;
-
- oldMetrics.clearAll();
- oldMetrics.putAll(currentMetrics);
- currentMetrics.clearAll();
- oldkvTimestamp = kvTimestamp;
- kvTimestamp = System.currentTimeMillis();
-
- try (FileReader fileReader = new FileReader(new File("/proc/net/snmp"));
- BufferedReader bufferedReader = new BufferedReader(fileReader);) {
- String line = null;
- while ((line = bufferedReader.readLine()) != null) {
- if (ln % 2 == 0) { // keys
- getKeys(line);
- } else {
- generateMap(line);
- }
- ln++;
- }
- } catch (Exception e) {
- LOG.debug("Exception in calling addSample4 with details: {} with ExceptionCode: {}",
- () -> e.toString(), () -> StatExceptionCode.NETWORK_COLLECTION_ERROR.toString());
- StatsCollector.instance().logException(StatExceptionCode.NETWORK_COLLECTION_ERROR);
- }
- }
-
- private static void addSample6() {
- oldMetrics6.clear();
- oldMetrics6.putAll(currentMetrics6);
- currentMetrics6.clear();
-
- try (FileReader fileReader = new FileReader(new File("/proc/net/snmp6"));
- BufferedReader bufferedReader = new BufferedReader(fileReader);) {
- String line = null;
- while ((line = bufferedReader.readLine()) != null) {
- String[] toks = line.split("[ \\t]+");
- if (toks.length > 1) {
- currentMetrics6.put(toks[0], Long.parseLong(toks[1]));
- }
- }
- } catch (Exception e) {
- LOG.debug("Exception in calling addSample6 with details: {} with ExceptionCode: {}",
- () -> e.toString(), () -> StatExceptionCode.NETWORK_COLLECTION_ERROR.toString());
- StatsCollector.instance().logException(StatExceptionCode.NETWORK_COLLECTION_ERROR);
- }
- }
-
- // this assumes that addSample4() is called
- private static void addDeviceStats() {
- try (FileReader fileReader = new FileReader(new File("/proc/net/dev"));
- BufferedReader bufferedReader = new BufferedReader(fileReader);) {
- String line = null;
- long intotbytes = 0, outtotbytes = 0;
- long intotpackets = 0, outtotpackets = 0;
- while ((line = bufferedReader.readLine()) != null) {
- if (line.contains("Receive") || line.contains("packets")) {
- continue;
- }
- String[] toks = line.trim().split(" +");
- intotbytes += Long.parseLong(toks[1]);
- intotpackets += Long.parseLong(toks[2]);
- outtotbytes += Long.parseLong(toks[9]);
- outtotpackets += Long.parseLong(toks[10]);
- }
- currentMetrics.PHYmetrics.put("inbytes", intotbytes);
- currentMetrics.PHYmetrics.put("inpackets", intotpackets);
- currentMetrics.PHYmetrics.put("outbytes", outtotbytes);
- currentMetrics.PHYmetrics.put("outpackets", outtotpackets);
- } catch (Exception e) {
- LOG.debug("Exception in calling addDeviceStats with details: {} with ExceptionCode: {}",
- () -> e.toString(), () -> StatExceptionCode.NETWORK_COLLECTION_ERROR.toString());
- StatsCollector.instance().logException(StatExceptionCode.NETWORK_COLLECTION_ERROR);
- }
- }
-
- public static void addSample() {
- addSampleHelper();
- calculateNetworkMetrics();
- }
-
- private static synchronized void addSampleHelper() {
- addSample4();
- addSample6();
- addDeviceStats();
- }
-
- public static void runOnce() {
- addSample();
- }
-
- @VisibleForTesting
- Map getCurrentPhyMetric() {
- return currentMetrics.PHYmetrics;
- }
-
- @VisibleForTesting
- Map getCurrentIpMetric() {
- return currentMetrics.IPmetrics;
- }
-
- @VisibleForTesting
- Map getOldPhyMetric() {
- return oldMetrics.PHYmetrics;
- }
-
- @VisibleForTesting
- Map getOldIpMetric() {
- return oldMetrics.IPmetrics;
- }
-
- @VisibleForTesting
- Map getCurrentMetrics6() {
- return currentMetrics6;
- }
-
- @VisibleForTesting
- Map getOldMetrics6() {
- return oldMetrics6;
- }
-
- @VisibleForTesting
- void putCurrentPhyMetric(String key, Long value) {
- currentMetrics.PHYmetrics.put(key, value);
- }
-
- @VisibleForTesting
- void putCurrentIpMetric(String key, Long value) {
- currentMetrics.IPmetrics.put(key, value);
- }
-
- @VisibleForTesting
- void putOldPhyMetric(String key, Long value) {
- oldMetrics.PHYmetrics.put(key, value);
- }
-
- @VisibleForTesting
- void putOldIpMetric(String key, Long value) {
- oldMetrics.IPmetrics.put(key, value);
- }
-
- @VisibleForTesting
- void putCurrentMetrics6(String key, Long value) {
- currentMetrics6.put(key, value);
- }
-
- @VisibleForTesting
- void putOldMetrics6(String key, Long value) {
- oldMetrics6.put(key, value);
- }
-
- @VisibleForTesting
- static void setKvTimestamp(long value) {
- NetworkInterface.kvTimestamp = value;
- }
-
- @VisibleForTesting
- static void setOldkvTimestamp(long oldkvTimestamp) {
- NetworkInterface.oldkvTimestamp = oldkvTimestamp;
- }
-
- @VisibleForTesting
- static long getKvTimestamp() {
- return kvTimestamp;
- }
-
- @VisibleForTesting
- static long getOldkvTimestamp() {
- return oldkvTimestamp;
- }
-
-}
-
-
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/jvm/GCMetrics.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/jvm/GCMetrics.java
deleted file mode 100644
index a086c8ad..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/jvm/GCMetrics.java
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.jvm;
-
-import java.lang.management.GarbageCollectorMXBean;
-import java.lang.management.ManagementFactory;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-
-public class GCMetrics {
- private static GarbageCollectorMXBean fullGC = null;
- private static GarbageCollectorMXBean youngGC = null;
-
- private static long totYoungGCCollectionCount = 0;
- private static long totYoungGCCollectionTime = 0;
- private static long totFullGCCollectionCount = 0;
- private static long totFullGCCollectionTime = 0;
-
- private static long lastYoungGCCollectionCount = 0;
- private static long lastYoungGCCollectionTime = 0;
- private static long lastFullGCCollectionCount = 0;
- private static long lastFullGCCollectionTime = 0;
- private static final Logger LOGGER = LogManager.getLogger(GCMetrics.class);
-
- static {
- for (GarbageCollectorMXBean item : ManagementFactory.getGarbageCollectorMXBeans()) {
- if ("ConcurrentMarkSweep".equals(item.getName())
- || "MarkSweepCompact".equals(item.getName())
- || "PS MarkSweep".equals(item.getName())
- || "G1 Old Generation".equals(item.getName())
- || "Garbage collection optimized for short pausetimes Old Collector".equals(item.getName())
- || "Garbage collection optimized for throughput Old Collector".equals(item.getName())
- || "Garbage collection optimized for deterministic pausetimes Old Collector".equals(item.getName())
- ) {
- fullGC = item;
- } else if ("ParNew".equals(item.getName())
- || "Copy".equals(item.getName())
- || "PS Scavenge".equals(item.getName())
- || "G1 Young Generation".equals(item.getName())
- || "Garbage collection optimized for short pausetimes Young Collector".equals(item.getName())
- || "Garbage collection optimized for throughput Young Collector".equals(item.getName())
- || "Garbage collection optimized for deterministic pausetimes Young Collector".equals(item.getName())
- ) {
- youngGC = item;
- } else {
- LOGGER.error("MX bean missing: {}", () -> item.getName());
- }
- }
- }
-
- public static long getTotYoungGCCollectionCount() {
- return totYoungGCCollectionCount;
- }
-
- public static long getTotYoungGCCollectionTime() {
- return totYoungGCCollectionTime;
- }
-
- public static long getTotFullGCCollectionCount() {
- return totFullGCCollectionCount;
- }
-
- public static long getTotFullGCCollectionTime() {
- return totFullGCCollectionTime;
- }
-
- private static long getYoungGCCollectionCount() {
- if (youngGC == null) {
- return 0;
- }
- return youngGC.getCollectionCount();
- }
-
- private static long getYoungGCCollectionTime() {
- if (youngGC == null) {
- return 0;
- }
- return youngGC.getCollectionTime();
- }
-
- private static long getFullGCCollectionCount() {
- if (fullGC == null) {
- return 0;
- }
- return fullGC.getCollectionCount();
- }
-
- private static long getFullGCCollectionTime() {
- if (fullGC == null) {
- return 0;
- }
- return fullGC.getCollectionTime();
- }
-
- public static void runGCMetrics() {
- long YoungGCCollectionCount = getYoungGCCollectionCount();
- long YoungGCCollectionTime = getYoungGCCollectionTime();
- long FullGCCollectionCount = getFullGCCollectionCount();
- long FullGCCollectionTime = getFullGCCollectionTime();
-
- totYoungGCCollectionCount =
- YoungGCCollectionCount - lastYoungGCCollectionCount;
- totYoungGCCollectionTime =
- YoungGCCollectionTime - lastYoungGCCollectionTime;
- totFullGCCollectionCount =
- FullGCCollectionCount - lastFullGCCollectionCount;
- totFullGCCollectionTime =
- FullGCCollectionTime - lastFullGCCollectionTime;
-
- lastYoungGCCollectionCount = YoungGCCollectionCount;
- lastYoungGCCollectionTime = YoungGCCollectionTime;
- lastFullGCCollectionCount = FullGCCollectionCount;
- lastFullGCCollectionTime = FullGCCollectionTime;
- }
-
- static void printGCMetrics() {
- if (lastYoungGCCollectionCount >= 0) {
- System.out.println("GC:: yC:" + getTotYoungGCCollectionCount() +
- " yT:" + getTotYoungGCCollectionTime() +
- " oC:" + getTotFullGCCollectionCount() +
- " oT:" + getTotFullGCCollectionTime());
- }
- }
-}
-
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/jvm/HeapMetrics.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/jvm/HeapMetrics.java
deleted file mode 100644
index 9197665c..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/jvm/HeapMetrics.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.jvm;
-
-import java.lang.management.ManagementFactory;
-import java.lang.management.MemoryMXBean;
-import java.lang.management.MemoryPoolMXBean;
-import java.lang.management.MemoryUsage;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.function.Supplier;
-
-public class HeapMetrics {
- private static final Map> memoryUsageSuppliers;
-
- static {
- memoryUsageSuppliers = new HashMap<>();
- MemoryMXBean memoryMXBean = ManagementFactory.getMemoryMXBean();
- if (memoryMXBean != null) {
- memoryUsageSuppliers.put("Heap", () -> memoryMXBean.getHeapMemoryUsage());
- memoryUsageSuppliers.put("NonHeap", () -> memoryMXBean.getNonHeapMemoryUsage());
- }
-
- List list = ManagementFactory.getMemoryPoolMXBeans();
- for (MemoryPoolMXBean item : list) {
- if ("CMS Perm Gen".equals(item.getName())
- || "Perm Gen".equals(item.getName())
- || "PS Perm Gen".equals(item.getName())
- || "G1 Perm Gen".equals(item.getName())
- || "Metaspace".equals(item.getName())) {
- memoryUsageSuppliers.put("PermGen", () -> item.getUsage());
- } else if ("CMS Old Gen".equals(item.getName())
- || "Tenured Gen".equals(item.getName())
- || "PS Old Gen".equals(item.getName())
- || "G1 Old Gen".equals(item.getName())) {
- memoryUsageSuppliers.put("OldGen", () -> item.getUsage());
- } else if ("Par Eden Space".equals(item.getName())
- || "Eden Space".equals(item.getName())
- || "PS Eden Space".equals(item.getName())
- || "G1 Eden".equals(item.getName())) {
- memoryUsageSuppliers.put("Eden", () -> item.getUsage());
- } else if ("Par Survivor Space".equals(item.getName())
- || "Survivor Space".equals(item.getName())
- || "PS Survivor Space".equals(item.getName())
- || "G1 Survivor".equals(item.getName())) {
- memoryUsageSuppliers.put("Survivor", () -> item.getUsage());
- }
- }
- }
-
- public static Map> getMemoryUsageSuppliers() {
- return memoryUsageSuppliers;
- }
-}
-
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/jvm/ThreadList.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/jvm/ThreadList.java
deleted file mode 100644
index ba9d94e1..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/jvm/ThreadList.java
+++ /dev/null
@@ -1,329 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.jvm;
-
-import java.io.BufferedReader;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.lang.management.ManagementFactory;
-import java.lang.management.ThreadInfo;
-import java.lang.management.ThreadMXBean;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.PerformanceAnalyzerPlugin;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.OSMetricsGeneratorFactory;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.MetricsConfiguration;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.StatsCollector;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.StatExceptionCode;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-
-import com.sun.tools.attach.VirtualMachine;
-
-import sun.tools.attach.HotSpotVirtualMachine;
-
-
-/** Traverses and prints the stack traces for all Java threads in the
- * remote VM */
-public class ThreadList {
- private static final Map jTidNameMap = new ConcurrentHashMap<>();
- private static final Map nativeTidMap = new ConcurrentHashMap<>();
- private static final Map oldNativeTidMap = new ConcurrentHashMap<>();
- private static final Map jTidMap = new ConcurrentHashMap<>();
- private static final Map nameMap = new ConcurrentHashMap<>();
- private static final String pid = OSMetricsGeneratorFactory.getInstance().getPid();
- static final Logger LOGGER = LogManager.getLogger(ThreadList.class);
- static final int samplingInterval = MetricsConfiguration.CONFIG_MAP.get(ThreadList.class).samplingInterval;
- private static final long minRunInterval = samplingInterval;
- private static final ThreadMXBean threadBean = ManagementFactory.getThreadMXBean();
- private static final Pattern linePattern = Pattern.compile("\"([^\"]*)\"");
- private static long lastRunTime = 0;
-
- public static class ThreadState {
- public long javaTid;
- public long nativeTid;
- public long heapUsage;
- public String threadName;
- public String tState;
- public Thread.State state;
- public long blockedCount;
- public long blockedTime;
-
- public double heapAllocRate;
- public double avgBlockedTime;
-
- ThreadState() {
- javaTid = -1;
- nativeTid = -1;
- heapUsage = -1;
- heapAllocRate = 0;
- blockedCount = 0;
- blockedTime = 0;
- avgBlockedTime = 0;
- threadName = "";
- tState = "";
- }
-
- @Override
- public String toString() {
- return new StringBuilder().append("javatid:").append(javaTid).append(" nativetid:")
- .append(nativeTid).append(" name:").append(threadName).append(" state:")
- .append(tState).append("(").append(state).append(")").append(" heaprate: ").append(heapAllocRate)
- .append(" bTime: ").append(avgBlockedTime).append(":").append(blockedCount).toString();
- }
- }
-
- public static Map getNativeTidMap() {
- synchronized (ThreadList.class) {
- if (System.currentTimeMillis() > lastRunTime + minRunInterval) {
- runThreadDump(pid, new String[0]);
- }
- //- sending a copy so that if runThreadDump next iteration clears it; caller still has the state at the call time
- //- not too expensive as this is only being called from Scheduled Collectors (only once in few seconds)
- return new HashMap<>(nativeTidMap);
- }
- }
-
-
- public static ThreadState getThreadState(long threadId) {
- ThreadState retVal = jTidMap.get(threadId);
-
- if (retVal != null) {
- return retVal;
- }
-
- synchronized (ThreadList.class) {
- retVal = jTidMap.get(threadId);
-
- if (retVal != null) {
- return retVal;
- }
-
- runThreadDump(pid, new String[0]);
- }
-
- return jTidMap.get(threadId);
- }
-
- // Attach to pid and perform a thread dump
- private static void runAttachDump(String pid, String[] args) {
- VirtualMachine vm = null;
- try {
- vm = VirtualMachine.attach(pid);
- } catch (Exception ex) {
- LOGGER.debug("Error in Attaching to VM with exception: {} with ExceptionCode: {}",
- () -> ex.toString(), () -> StatExceptionCode.JVM_ATTACH_ERROR.toString());
- StatsCollector.instance().logException(StatExceptionCode.JVM_ATTACH_ERROR);
- return;
- }
-
- try (InputStream in = ((HotSpotVirtualMachine) vm).remoteDataDump((Object[]) args);) {
- createMap(in);
- } catch (Exception ex) {
- LOGGER.debug("Cannot list threads with exception: {} with ExceptionCode: {}",
- () -> ex.toString(), () -> StatExceptionCode.JVM_ATTACH_ERROR.toString());
- StatsCollector.instance().logException(StatExceptionCode.JVM_ATTACH_ERROR);
- }
-
- try {
- vm.detach();
- } catch (Exception ex) {
- LOGGER.debug("Failed in VM Detach with exception: {} with ExceptionCode: {}",
- () -> ex.toString(), () -> StatExceptionCode.JVM_ATTACH_ERROR.toString());
- StatsCollector.instance().logException(StatExceptionCode.JVM_ATTACH_ERROR);
- }
- }
-
- //ThreadMXBean-based info for tid, name and allocs
- private static void runMXDump() {
- long[] ids = threadBean.getAllThreadIds();
- ThreadInfo[] infos = threadBean.getThreadInfo(ids);
- for (ThreadInfo info : infos) {
- long id = info.getThreadId();
- String name = info.getThreadName();
- Thread.State state = info.getThreadState();
-
- // following captures cumulative allocated bytes + TLAB used bytes
- // and it is cumulative
- long mem = ((com.sun.management.ThreadMXBean) threadBean).getThreadAllocatedBytes(id);
-
- ThreadState t = jTidMap.get(id);
- if (t == null) {
- continue;
- }
- t.heapUsage = mem;
- t.state = state;
- t.blockedCount = info.getBlockedCount();
- t.blockedTime = info.getBlockedTime();
- ThreadHistory.add(t.nativeTid,
- (state == Thread.State.BLOCKED) ? samplingInterval : 0);
-
- long curRunTime = System.currentTimeMillis();
- ThreadState oldt = oldNativeTidMap.get(t.nativeTid);
- if (curRunTime > lastRunTime && oldt != null) {
- t.heapAllocRate = Math.max(t.heapUsage - oldt.heapUsage, 0) * 1.0e3
- /(curRunTime - lastRunTime);
- if (t.blockedTime != -1 && t.blockedCount > oldt.blockedCount) {
- t.avgBlockedTime = 1.0e-3 * (t.blockedTime - oldt.blockedTime)
- / (t.blockedCount - oldt.blockedCount);
- } else {
- CircularLongArray arr = ThreadHistory.tidHistoryMap.get(t.nativeTid);
- // NOTE: this is an upper bound
- if (arr != null) {
- t.avgBlockedTime = 1.0 * arr.getAvgValue() / samplingInterval;
- }
- }
- }
- jTidNameMap.put(id, name);
- }
- ThreadHistory.cleanup();
- }
-
- static void runThreadDump(String pid, String[] args) {
- jTidNameMap.clear();
- oldNativeTidMap.putAll(nativeTidMap);
- nativeTidMap.clear();
- jTidMap.clear();
- nameMap.clear();
-
- //TODO: make this map update atomic
- PerformanceAnalyzerPlugin.invokePrivileged(() -> runAttachDump(pid, args));
- runMXDump();
-
- lastRunTime = System.currentTimeMillis();
- }
-
- private static void parseLine(String line) {
- String[] tokens = line.split(" os_prio=[0-9]* ");
- ThreadState t = new ThreadState();
- t.javaTid = -1;
-
- Matcher m = linePattern.matcher(tokens[0]);
- if (!m.find()) {
- t.threadName = tokens[0];
- } else {
- t.threadName = m.group(1);
- if (!tokens[0].equals("\"" + t.threadName + "\"")) {
- t.javaTid = Long.parseLong(
- tokens[0].split(Pattern.quote("\"" + t.threadName + "\" "))[1].
- split(" ")[0].
- split("#")[1]);
- }
- }
-
- tokens = tokens[1].split(" ");
- for (String token : tokens) {
- String[] keyValuePare = token.split("=");
- if (keyValuePare.length < 2) {
- continue;
- }
- if (t.javaTid == -1 && keyValuePare[0].equals("tid")) {
- t.javaTid = Long.decode(keyValuePare[1]);
- }
- if (keyValuePare[0].equals("nid")) {
- t.nativeTid = Long.decode(keyValuePare[1]);
- }
- }
- t.tState = tokens[2]; //TODO: stuff like "in Object.wait()"
- nativeTidMap.put(t.nativeTid, t);
- jTidMap.put(t.javaTid, t);
- nameMap.put(t.threadName, t); //XXX: we assume no collisions
- }
-
- private static void createMap(InputStream in) throws Exception {
- BufferedReader br = new BufferedReader(new InputStreamReader(in));
- String line = null;
- while ((line = br.readLine()) != null) {
- if (line.contains("tid=")) {
- parseLine(line);
- }
- }
- }
-
- // currently stores thread states to track locking periods
- static class ThreadHistory {
- public static Map tidHistoryMap
- = new HashMap<>();
- private static final int HISTORY_SIZE = 60; // 60 * samplingInterval
- public static void add(long tid, long value) {
- CircularLongArray arr = tidHistoryMap.get(tid);
- if (arr == null) {
- arr = new CircularLongArray(HISTORY_SIZE);
- arr.add(value);
- tidHistoryMap.put(tid, arr);
- } else {
- arr.add(value);
- }
- }
- public static void cleanup() {
- long curTime = System.currentTimeMillis();
- for (Iterator> it =
- tidHistoryMap.entrySet().iterator();
- it.hasNext();) {
- Map.Entry me = it.next();
- CircularLongArray arr = me.getValue();
- // delete items updated older than 300s
- if (curTime - arr.lastWriteTimestamp > HISTORY_SIZE * samplingInterval * 1.0e3) {
- it.remove();
- }
- }
- }
- }
-
- // models a fixed-capacity queue that is append-only
- // not thread-safe
- static class CircularLongArray {
- ArrayList list = null;
- public long lastWriteTimestamp;
- private long totalValue;
- private int startidx;
- private int capacity;
- CircularLongArray(int capacity) {
- list = new ArrayList<>(capacity);
- this.capacity = capacity;
- totalValue = 0;
- startidx = 0;
- lastWriteTimestamp = 0;
- }
- public boolean add(long e) {
- lastWriteTimestamp = System.currentTimeMillis();
- if (list.size() < capacity) {
- // can only happen if startidx == 0
- if (startidx != 0) {
- return false;
- } else {
- totalValue += e;
- return list.add(e);
- }
- }
- totalValue -= list.get(startidx);
- totalValue += e;
- list.set(startidx, e);
- startidx = (startidx + 1) % capacity;
- return true;
- }
- public double getAvgValue() {
- return list.size() == 0 ? 0 : 1.0 * totalValue / list.size();
- }
- }
-}
-
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics/AllMetrics.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics/AllMetrics.java
deleted file mode 100644
index 876263b4..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics/AllMetrics.java
+++ /dev/null
@@ -1,909 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics;
-
-/**
- * Contract between reader and writer. Writer write using the same values of
- * these enums as json keys (See all MetricStatus's subclasses in
- * com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors), while reader creates db tables using these
- * keys as column names and extract values using these keys. You should
- * make sure the the field names in the MetricStatus's subclasses and enum
- * names match. Also, when you change anything, modify JsonKeyTest accordingly.
- * We use camelCase instead of the usual capital case for enum members because
- * they have better readability for the above use cases.
- *
- */
-public class AllMetrics {
- // metric name (not complete, only metrics use the json format and contains
- // numeric values. Will add more when needed)
- public enum MetricName {
- CIRCUIT_BREAKER, HEAP_METRICS, DISK_METRICS, TCP_METRICS, IP_METRICS,
- THREAD_POOL, SHARD_STATS, MASTER_PENDING
- }
-
- // we don't store node details as a metric on reader side database. We
- // use the information as part of http response.
- public enum NodeDetailColumns {
- ID(Constants.ID_VALUE),
- HOST_ADDRESS(Constants.HOST_ADDRESS_VALUE);
-
- private final String value;
-
- NodeDetailColumns(String value) {
- this.value = value;
- }
-
- @Override
- public String toString() {
- return value;
- }
-
- public static class Constants {
- public static final String ID_VALUE = "ID";
- public static final String HOST_ADDRESS_VALUE = "HOST_ADDRESS";
- }
- }
-
- // contents of metrics
- public enum GCType {
- TOT_YOUNG_GC(Constants.TOT_YOUNG_GC_VALUE),
- TOT_FULL_GC(Constants.TOT_FULL_GC_VALUE),
- SURVIVOR(Constants.SURVIVOR_VALUE),
- PERM_GEN(Constants.PERM_GEN_VALUE),
- OLD_GEN(Constants.OLD_GEN_VALUE),
- EDEN(Constants.EDEN_VALUE),
- NON_HEAP(Constants.NON_HEAP_VALUE),
- HEAP(Constants.HEAP_VALUE);
-
- private final String value;
-
- GCType(String value) {
- this.value = value;
- }
-
- @Override
- public String toString() {
- return value;
- }
-
- public static class Constants {
- public static final String TOT_YOUNG_GC_VALUE = "totYoungGC";
- public static final String TOT_FULL_GC_VALUE = "totFullGC";
- public static final String SURVIVOR_VALUE = "Survivor";
- public static final String PERM_GEN_VALUE = "PermGen";
- public static final String OLD_GEN_VALUE = "OldGen";
- public static final String EDEN_VALUE = "Eden";
- public static final String NON_HEAP_VALUE = "NonHeap";
- public static final String HEAP_VALUE = "Heap";
- }
- }
-
- // column names of database table
- public enum CircuitBreakerDimension implements MetricDimension {
- CB_TYPE(Constants.TYPE_VALUE);
-
- private final String value;
-
- CircuitBreakerDimension(String value) {
- this.value = value;
- }
-
- @Override
- public String toString() {
- return value;
- }
-
- public static class Constants {
- public static final String TYPE_VALUE = "CBType";
- }
- }
-
- // cannot use limit as it is a keyword in sql
- public enum CircuitBreakerValue implements MetricValue {
- CB_ESTIMATED_SIZE(Constants.ESTIMATED_VALUE),
- CB_TRIPPED_EVENTS(Constants.TRIPPED_VALUE),
- CB_CONFIGURED_SIZE(Constants.LIMIT_CONFIGURED_VALUE);
-
- private final String value;
-
- CircuitBreakerValue(String value) {
- this.value = value;
- }
-
- @Override
- public String toString() {
- return value;
- }
-
- public static class Constants {
- public static final String ESTIMATED_VALUE = "CB_EstimatedSize";
-
- public static final String TRIPPED_VALUE = "CB_TrippedEvents";
-
- public static final String LIMIT_CONFIGURED_VALUE =
- "CB_ConfiguredSize";
- }
- }
-
- public enum HeapDimension implements MetricDimension {
- MEM_TYPE(Constants.TYPE_VALUE);
-
- private final String value;
-
- HeapDimension(String value) {
- this.value = value;
- }
-
- @Override
- public String toString() {
- return value;
- }
-
- public static class Constants {
- public static final String TYPE_VALUE = "MemType";
- }
- }
-
- public enum HeapValue implements MetricValue {
- GC_COLLECTION_EVENT(Constants.COLLECTION_COUNT_VALUE),
- GC_COLLECTION_TIME(Constants.COLLECTION_TIME_VALUE),
- HEAP_COMMITTED(Constants.COMMITTED_VALUE),
- HEAP_INIT(Constants.INIT_VALUE),
- HEAP_MAX(Constants.MAX_VALUE),
- HEAP_USED(Constants.USED_VALUE);
-
- private final String value;
-
- HeapValue(String value) {
- this.value = value;
- }
-
- @Override
- public String toString() {
- return value;
- }
-
- public static class Constants {
- public static final String COLLECTION_COUNT_VALUE = "GC_Collection_Event";
-
- public static final String COLLECTION_TIME_VALUE = "GC_Collection_Time";
-
- public static final String COMMITTED_VALUE = "Heap_Committed";
-
- public static final String INIT_VALUE = "Heap_Init";
-
- public static final String MAX_VALUE = "Heap_Max";
-
- public static final String USED_VALUE = "Heap_Used";
- }
- }
-
- public enum DiskDimension implements MetricDimension {
- DISK_NAME(Constants.NAME_VALUE);
-
- private final String value;
-
- DiskDimension(String value) {
- this.value = value;
- }
-
- @Override
- public String toString() {
- return value;
- }
-
- public static class Constants {
- public static final String NAME_VALUE = "DiskName";
- }
- }
-
- public enum DiskValue implements MetricValue {
- DISK_UTILIZATION(Constants.UTIL_VALUE),
- DISK_WAITTIME(Constants.WAIT_VALUE),
- DISK_SERVICE_RATE(Constants.SRATE_VALUE);
-
- private final String value;
-
- DiskValue(String value) {
- this.value = value;
- }
-
- @Override
- public String toString() {
- return value;
- }
-
- public static class Constants {
- public static final String UTIL_VALUE = "Disk_Utilization";
-
- public static final String WAIT_VALUE = "Disk_WaitTime";
-
- public static final String SRATE_VALUE = "Disk_ServiceRate";
- }
- }
-
- public enum TCPDimension implements MetricDimension {
- DEST_ADDR(Constants.DEST_VALUE);
-
- private final String value;
-
- TCPDimension(String value) {
- this.value = value;
- }
-
- @Override
- public String toString() {
- return value;
- }
-
- public static class Constants {
- public static final String DEST_VALUE = "DestAddr";
- }
- }
-
- public enum TCPValue implements MetricValue {
- Net_TCP_NUM_FLOWS(Constants.NUM_FLOWS_VALUE),
- Net_TCP_TXQ(Constants.TXQ_VALUE),
- Net_TCP_RXQ(Constants.RXQ_VALUE),
- Net_TCP_LOST(Constants.CUR_LOST_VALUE),
- Net_TCP_SEND_CWND(Constants.SEND_CWND_VALUE),
- Net_TCP_SSTHRESH(Constants.SSTHRESH_VALUE);
-
- private final String value;
-
- TCPValue(String value) {
- this.value = value;
- }
-
- @Override
- public String toString() {
- return value;
- }
-
- public static class Constants {
- public static final String NUM_FLOWS_VALUE = "Net_TCP_NumFlows";
-
- public static final String TXQ_VALUE = "Net_TCP_TxQ";
-
- public static final String RXQ_VALUE = "Net_TCP_RxQ";
-
- public static final String CUR_LOST_VALUE = "Net_TCP_Lost";
-
- public static final String SEND_CWND_VALUE = "Net_TCP_SendCWND";
-
- public static final String SSTHRESH_VALUE = "Net_TCP_SSThresh";
- }
- }
-
- public enum IPDimension implements MetricDimension {
- DIRECTION(Constants.DIRECTION_VALUE);
-
- private final String value;
-
- IPDimension(String value) {
- this.value = value;
- }
-
- @Override
- public String toString() {
- return value;
- }
-
- public static class Constants {
- public static final String DIRECTION_VALUE = "Direction";
- }
- }
-
- public enum IPValue implements MetricValue {
- NET_PACKET_RATE4(Constants.PACKET_RATE4_VALUE),
- NET_PACKET_DROP_RATE4(Constants.DROP_RATE4_VALUE),
- NET_PACKET_RATE6(Constants.PACKET_RATE6_VALUE),
- NET_PACKET_DROP_RATE6(Constants.DROP_RATE6_VALUE),
- NET_THROUGHPUT(Constants.THROUGHPUT_VALUE);
-
- private final String value;
-
- IPValue(String value) {
- this.value = value;
- }
-
- @Override
- public String toString() {
- return value;
- }
-
- public static class Constants {
- public static final String PACKET_RATE4_VALUE = "Net_PacketRate4";
- public static final String DROP_RATE4_VALUE = "Net_PacketDropRate4";
- public static final String PACKET_RATE6_VALUE = "Net_PacketRate6";
- public static final String DROP_RATE6_VALUE = "Net_PacketDropRate6";
- public static final String THROUGHPUT_VALUE = "Net_Throughput";
- }
- }
-
- public enum ThreadPoolDimension implements MetricDimension {
- THREAD_POOL_TYPE(Constants.TYPE_VALUE);
-
- private final String value;
-
- ThreadPoolDimension(String value) {
- this.value = value;
- }
-
- @Override
- public String toString() {
- return value;
- }
-
- public static class Constants {
- public static final String TYPE_VALUE = "ThreadPoolType";
- }
- }
-
- public enum ThreadPoolValue implements MetricValue {
- THREADPOOL_QUEUE_SIZE(Constants.QUEUE_SIZE_VALUE),
- THREADPOOL_REJECTED_REQS(Constants.REJECTED_VALUE),
- THREADPOOL_TOTAL_THREADS(Constants.THREADS_COUNT_VALUE),
- THREADPOOL_ACTIVE_THREADS(Constants.THREADS_ACTIVE_VALUE);
-
- private final String value;
-
- ThreadPoolValue(String value) {
- this.value = value;
- }
-
- @Override
- public String toString() {
- return value;
- }
-
- public static class Constants {
- public static final String QUEUE_SIZE_VALUE = "ThreadPool_QueueSize";
- public static final String REJECTED_VALUE = "ThreadPool_RejectedReqs";
- public static final String THREADS_COUNT_VALUE = "ThreadPool_TotalThreads";
- public static final String THREADS_ACTIVE_VALUE = "ThreadPool_ActiveThreads";
- }
- }
-
- // extra dimension values come from other places (e.g., file path) instead
- // of metric files themselves
- public enum ShardStatsDerivedDimension implements MetricDimension {
- INDEX_NAME(Constants.INDEX_NAME_VALUE),
- SHARD_ID(Constants.SHARD_ID_VALUE);
-
- private final String value;
-
- ShardStatsDerivedDimension(String value) {
- this.value = value;
- }
-
- @Override
- public String toString() {
- return value;
- }
-
- public static class Constants {
- public static final String INDEX_NAME_VALUE =
- CommonDimension.INDEX_NAME.toString();
-
- public static final String SHARD_ID_VALUE =
- CommonDimension.SHARD_ID.toString();
- }
- }
-
- public enum ShardStatsValue implements MetricValue {
- INDEXING_THROTTLE_TIME(Constants.INDEXING_THROTTLE_TIME_VALUE),
- CACHE_QUERY_HIT(Constants.QUEY_CACHE_HIT_COUNT_VALUE),
- CACHE_QUERY_MISS(Constants.QUERY_CACHE_MISS_COUNT_VALUE),
- CACHE_QUERY_SIZE(Constants.QUERY_CACHE_IN_BYTES_VALUE),
- CACHE_FIELDDATA_EVICTION(Constants.FIELDDATA_EVICTION_VALUE),
- CACHE_FIELDDATA_SIZE(Constants.FIELD_DATA_IN_BYTES_VALUE),
- CACHE_REQUEST_HIT(Constants.REQUEST_CACHE_HIT_COUNT_VALUE),
- CACHE_REQUEST_MISS(Constants.REQUEST_CACHE_MISS_COUNT_VALUE),
- CACHE_REQUEST_EVICTION(Constants.REQUEST_CACHE_EVICTION_VALUE),
- CACHE_REQUEST_SIZE(Constants.REQUEST_CACHE_IN_BYTES_VALUE),
- REFRESH_EVENT(Constants.REFRESH_COUNT_VALUE),
- REFRESH_TIME(Constants.REFRESH_TIME_VALUE),
- FLUSH_EVENT(Constants.FLUSH_COUNT_VALUE),
- FLUSH_TIME(Constants.FLUSH_TIME_VALUE),
- MERGE_EVENT(Constants.MERGE_COUNT_VALUE),
- MERGE_TIME(Constants.MERGE_TIME_VALUE),
- MERGE_CURRENT_EVENT(Constants.MERGE_CURRENT_VALUE),
- INDEXING_BUFFER(Constants.INDEX_BUFFER_BYTES_VALUE),
- SEGMENTS_TOTAL(Constants.SEGMENTS_COUNT_VALUE),
- SEGMENTS_MEMORY(Constants.SEGMENTS_MEMORY_VALUE),
- TERMS_MEMORY(Constants.TERMS_MEMORY_VALUE),
- STORED_FIELDS_MEMORY(Constants.STORED_FIELDS_MEMORY_VALUE),
- TERM_VECTOR_MEMORY(Constants.TERM_VECTOR_MEMORY_VALUE),
- NORMS_MEMORY(Constants.NORMS_MEMORY_VALUE),
- POINTS_MEMORY(Constants.POINTS_MEMORY_VALUE),
- DOC_VALUES_MEMORY(Constants.DOC_VALUES_MEMORY_VALUE),
- INDEX_WRITER_MEMORY(Constants.INDEX_WRITER_MEMORY_VALUE),
- VERSION_MAP_MEMORY(Constants.VERSION_MAP_MEMORY_VALUE),
- BITSET_MEMORY(Constants.BITSET_MEMORY_VALUE);
-
- private final String value;
-
- ShardStatsValue(String value) {
- this.value = value;
- }
-
- @Override
- public String toString() {
- return value;
- }
-
- public static class Constants {
- public static final String INDEXING_THROTTLE_TIME_VALUE = "Indexing_ThrottleTime";
-
- public static final String QUEY_CACHE_HIT_COUNT_VALUE = "Cache_Query_Hit";
-
- public static final String QUERY_CACHE_MISS_COUNT_VALUE = "Cache_Query_Miss";
-
- public static final String QUERY_CACHE_IN_BYTES_VALUE = "Cache_Query_Size";
-
- public static final String FIELDDATA_EVICTION_VALUE = "Cache_FieldData_Eviction";
-
- public static final String FIELD_DATA_IN_BYTES_VALUE = "Cache_FieldData_Size";
-
- public static final String REQUEST_CACHE_HIT_COUNT_VALUE = "Cache_Request_Hit";
-
- public static final String REQUEST_CACHE_MISS_COUNT_VALUE = "Cache_Request_Miss";
-
- public static final String REQUEST_CACHE_EVICTION_VALUE = "Cache_Request_Eviction";
-
- public static final String REQUEST_CACHE_IN_BYTES_VALUE = "Cache_Request_Size";
-
- public static final String REFRESH_COUNT_VALUE = "Refresh_Event";
-
- public static final String REFRESH_TIME_VALUE = "Refresh_Time";
-
- public static final String FLUSH_COUNT_VALUE = "Flush_Event";
-
- public static final String FLUSH_TIME_VALUE = "Flush_Time";
-
- public static final String MERGE_COUNT_VALUE = "Merge_Event";
-
- public static final String MERGE_TIME_VALUE = "Merge_Time";
-
- public static final String MERGE_CURRENT_VALUE = "Merge_CurrentEvent";
-
- public static final String INDEX_BUFFER_BYTES_VALUE = "Indexing_Buffer";
-
- public static final String SEGMENTS_COUNT_VALUE = "Segments_Total";
-
- public static final String SEGMENTS_MEMORY_VALUE = "Segments_Memory";
-
- public static final String TERMS_MEMORY_VALUE = "Terms_Memory";
-
- public static final String STORED_FIELDS_MEMORY_VALUE = "StoredFields_Memory";
-
- public static final String TERM_VECTOR_MEMORY_VALUE = "TermVectors_Memory";
-
- public static final String NORMS_MEMORY_VALUE = "Norms_Memory";
-
- public static final String POINTS_MEMORY_VALUE = "Points_Memory";
-
- public static final String DOC_VALUES_MEMORY_VALUE = "DocValues_Memory";
-
- public static final String INDEX_WRITER_MEMORY_VALUE = "IndexWriter_Memory";
-
- public static final String VERSION_MAP_MEMORY_VALUE = "VersionMap_Memory";
-
- public static final String BITSET_MEMORY_VALUE = "Bitset_Memory";
- }
- }
-
- public enum MasterPendingValue implements MetricValue {
- MASTER_PENDING_QUEUE_SIZE(Constants.PENDING_TASKS_COUNT_VALUE);
-
- private final String value;
-
- MasterPendingValue(String value) {
- this.value = value;
- }
-
- @Override
- public String toString() {
- return value;
- }
-
- public static class Constants {
- public static final String PENDING_TASKS_COUNT_VALUE = "Master_PendingQueueSize";
- }
- }
-
-
- public enum OSMetrics {
- CPU_UTILIZATION(Constants.CPU_VALUE),
- PAGING_MAJ_FLT_RATE(Constants.PAGING_MAJFLT_VALUE),
- PAGING_MIN_FLT_RATE(Constants.PAGING_MINFLT_VALUE),
- PAGING_RSS(Constants.RSS_VALUE),
- SCHED_RUNTIME(Constants.RUNTIME_VALUE),
- SCHED_WAITTIME(Constants.WAITTIME_VALUE),
- SCHED_CTX_RATE(Constants.CTXRATE_VALUE),
- HEAP_ALLOC_RATE(Constants.HEAP_ALLOC_VALUE),
- IO_READ_THROUGHPUT(Constants.READ_THROUGHPUT_VALUE),
- IO_WRITE_THROUGHPUT(Constants.WRITE_THROUGHPUT_VALUE),
- IO_TOT_THROUGHPUT(Constants.TOTAL_THROUGHPUT_VALUE),
- IO_READ_SYSCALL_RATE(Constants.READ_SYSCALL_RATE_VALUE),
- IO_WRITE_SYSCALL_RATE(Constants.WRITE_SYSCALL_RATE_VALUE),
- IO_TOTAL_SYSCALL_RATE(Constants.TOTAL_SYSCALL_RATE_VALUE),
- THREAD_BLOCKED_TIME(Constants.BLOCKED_TIME_VALUE),
- THREAD_BLOCKED_EVENT(Constants.BLOCKED_COUNT_VALUE);
-
- private final String value;
-
- OSMetrics(String value) {
- this.value = value;
- }
-
- @Override
- public String toString() {
- return value;
- }
-
- public static class Constants {
- public static final String CPU_VALUE = "CPU_Utilization";
- public static final String PAGING_MAJFLT_VALUE = "Paging_MajfltRate";
- public static final String PAGING_MINFLT_VALUE = "Paging_MinfltRate";
- public static final String RSS_VALUE = "Paging_RSS";
- public static final String RUNTIME_VALUE = "Sched_Runtime";
- public static final String WAITTIME_VALUE = "Sched_Waittime";
- public static final String CTXRATE_VALUE = "Sched_CtxRate";
- public static final String HEAP_ALLOC_VALUE = "Heap_AllocRate";
- public static final String READ_THROUGHPUT_VALUE = "IO_ReadThroughput";
- public static final String WRITE_THROUGHPUT_VALUE = "IO_WriteThroughput";
- public static final String TOTAL_THROUGHPUT_VALUE = "IO_TotThroughput";
- public static final String READ_SYSCALL_RATE_VALUE = "IO_ReadSyscallRate";
- public static final String WRITE_SYSCALL_RATE_VALUE = "IO_WriteSyscallRate";
- public static final String TOTAL_SYSCALL_RATE_VALUE = "IO_TotalSyscallRate";
- public static final String BLOCKED_TIME_VALUE = "Thread_Blocked_Time";
- public static final String BLOCKED_COUNT_VALUE = "Thread_Blocked_Event";
- }
- }
-
- public enum MasterMetricDimensions implements MetricDimension {
- MASTER_TASK_PRIORITY("MasterTaskPriority"),
- MASTER_TASK_TYPE("MasterTaskType"),
- MASTER_TASK_METADATA("MasterTaskMetadata"),
- MASTER_TASK_QUEUE_TIME("MasterTaskQueueTime"),
- MASTER_TASK_RUN_TIME("MasterTaskRunTime"),
- MASTER_TASK_INSERT_ORDER("MasterTaskInsertOrder");
-
- private final String value;
-
- MasterMetricDimensions(String value) {
- this.value = value;
- }
-
- @Override
- public String toString() {
- return value;
- }
- }
-
- public enum MasterMetricValues implements MetricValue {
- //-todo : Migrate to CommonMetric.Constants
- MASTER_TASK_QUEUE_TIME("Master_Task_Queue_Time"),
- MASTER_TASK_RUN_TIME("Master_Task_Run_Time"),
- START_TIME("StartTime"),
- FINISH_TIME("FinishTime");
-
- private final String value;
-
- MasterMetricValues(String value) {
- this.value = value;
- }
-
- @Override
- public String toString() {
- return value;
- }
- }
-
- public enum HttpDimension implements MetricDimension {
- EXCEPTION(Constants.EXCEPTION_VALUE),
- HTTP_RESP_CODE(Constants.HTTP_RESP_CODE_VALUE),
- INDICES(Constants.INDICES_VALUE);
-
- private final String value;
-
- HttpDimension(String value) {
- this.value = value;
- }
-
- @Override
- public String toString() {
- return value;
- }
-
- public static class Constants {
- public static final String INDICES_VALUE = "Indices";
- public static final String EXCEPTION_VALUE = CommonDimension.EXCEPTION.toString();
- public static final String HTTP_RESP_CODE_VALUE = "HTTPRespCode";
- }
- }
-
- public enum HttpMetric implements MetricValue {
- START_TIME(Constants.START_TIME_VALUE),
- HTTP_REQUEST_DOCS(Constants.HTTP_REQUEST_DOCS_VALUE),
- FINISH_TIME(Constants.FINISH_TIME_VALUE),
- HTTP_TOTAL_REQUESTS(Constants.HTTP_TOTAL_REQUESTS_VALUE);
-
- private final String value;
-
- HttpMetric(String value) {
- this.value = value;
- }
-
- @Override
- public String toString() {
- return value;
- }
-
- public static class Constants {
- public static final String START_TIME_VALUE = CommonMetric.START_TIME.toString();
- public static final String FINISH_TIME_VALUE = CommonMetric.FINISH_TIME.toString();
- public static final String HTTP_REQUEST_DOCS_VALUE = "HTTP_RequestDocs";
- public static final String HTTP_TOTAL_REQUESTS_VALUE = "HTTP_TotalRequests";
-
- }
- }
-
- public enum ShardBulkDimension implements MetricDimension {
- INDEX_NAME(Constants.INDEXNAME_VALUE),
- SHARD_ID(Constants.SHARDID_VALUE),
- PRIMARY(Constants.PRIMARY_VALUE),
- EXCEPTION(Constants.EXCEPTION_VALUE),
- FAILED(Constants.FAILED_VALUE);
-
- private final String value;
-
- ShardBulkDimension(String value) {
- this.value = value;
- }
-
- @Override
- public String toString() {
- return value;
- }
-
- public static class Constants {
- public static final String INDEXNAME_VALUE = CommonDimension.INDEX_NAME.toString();
- public static final String SHARDID_VALUE = CommonDimension.SHARD_ID.toString();
- public static final String PRIMARY_VALUE = "Primary";
- public static final String EXCEPTION_VALUE = CommonDimension.EXCEPTION.toString();
- public static final String FAILED_VALUE = CommonDimension.FAILED.toString();
- }
- }
-
- public enum ShardBulkMetric implements MetricValue {
- START_TIME(Constants.START_TIME_VALUE),
- ITEM_COUNT(Constants.ITEM_COUNT_VALUE),
- FINISH_TIME(Constants.FINISH_TIME_VALUE),
- LATENCY(Constants.LATENCY_VALUE),
- DOC_COUNT(Constants.DOC_COUNT);
-
- private final String value;
-
- ShardBulkMetric(String value) {
- this.value = value;
- }
-
- @Override
- public String toString() {
- return value;
- }
-
- public static class Constants {
- public static final String START_TIME_VALUE = CommonMetric.START_TIME.toString();
- public static final String ITEM_COUNT_VALUE = "ItemCount";
- public static final String FINISH_TIME_VALUE = CommonMetric.FINISH_TIME.toString();
- public static final String LATENCY_VALUE = CommonMetric.LATENCY.toString();
- public static final String DOC_COUNT = "ShardBulkDocs";
- }
- }
-
- public enum ShardOperationMetric implements MetricValue {
- SHARD_OP_COUNT(Constants.SHARD_OP_COUNT_VALUE);
-
- private final String value;
-
- ShardOperationMetric(String value) {
- this.value = value;
- }
-
- @Override
- public String toString() {
- return value;
- }
-
- public static class Constants {
- public static final String SHARD_OP_COUNT_VALUE = "ShardEvents";
- }
- }
-
- public enum CommonDimension implements MetricDimension {
- INDEX_NAME(Constants.INDEX_NAME_VALUE),
- OPERATION(Constants.OPERATION_VALUE),
- SHARD_ROLE(Constants.SHARD_ROLE_VALUE),
- SHARD_ID(Constants.SHARDID_VALUE),
- EXCEPTION(Constants.EXCEPTION_VALUE),
- FAILED(Constants.FAILED_VALUE);
-
- private final String value;
-
- CommonDimension(String value) {
- this.value = value;
- }
-
- @Override
- public String toString() {
- return value;
- }
-
- public static class Constants {
- public static final String INDEX_NAME_VALUE = "IndexName";
- public static final String SHARDID_VALUE = "ShardID";
- public static final String OPERATION_VALUE = "Operation";
- public static final String SHARD_ROLE_VALUE = "ShardRole";
- public static final String EXCEPTION_VALUE = "Exception";
- public static final String FAILED_VALUE = "Failed";
- }
- }
-
- public enum CommonMetric {
- START_TIME(Constants.START_TIME_VALUE),
- FINISH_TIME(Constants.FINISH_TIME_VALUE),
- LATENCY(Constants.LATENCY_VALUE);
-
- private final String value;
-
- CommonMetric(String value) {
- this.value = value;
- }
-
- @Override
- public String toString() {
- return value;
- }
-
- public static class Constants {
- public static final String START_TIME_VALUE = "StartTime";
- public static final String FINISH_TIME_VALUE = "FinishTime";
- public static final String LATENCY_VALUE = "Latency";
- }
- }
-
- public enum EmptyDimension implements MetricDimension {
- EMPTY("");
-
- private final String value;
-
- EmptyDimension(String value) {
- this.value = value;
- }
-
- @Override
- public String toString() {
- return value;
- }
- }
-
- public enum AggregatedOSDimension implements MetricDimension {
- INDEX_NAME(CommonDimension.INDEX_NAME.toString()),
- OPERATION(CommonDimension.OPERATION.toString()),
- SHARD_ROLE(CommonDimension.SHARD_ROLE.toString()),
- SHARD_ID(CommonDimension.SHARD_ID.toString());
-
- private final String value;
-
- AggregatedOSDimension(String value) {
- this.value = value;
- }
-
- @Override
- public String toString() {
- return value;
- }
- }
-
- public enum LatencyDimension implements MetricDimension {
- OPERATION(CommonDimension.OPERATION.toString()),
- EXCEPTION(CommonDimension.EXCEPTION.toString()),
- INDICES(HttpDimension.INDICES.toString()),
- HTTP_RESP_CODE(HttpDimension.HTTP_RESP_CODE.toString()),
- SHARD_ID(CommonDimension.SHARD_ID.toString()),
- INDEX_NAME(CommonDimension.INDEX_NAME.toString()),
- SHARD_ROLE(CommonDimension.SHARD_ROLE.toString());
-
- private final String value;
-
- LatencyDimension(String value) {
- this.value = value;
- }
-
- @Override
- public String toString() {
- return value;
- }
- }
-
- public enum HttpOnlyDimension implements MetricDimension {
- OPERATION(CommonDimension.OPERATION.toString()),
- EXCEPTION(CommonDimension.EXCEPTION.toString()),
- INDICES(HttpDimension.INDICES.toString()),
- HTTP_RESP_CODE(HttpDimension.HTTP_RESP_CODE.toString());
-
- private final String value;
-
- HttpOnlyDimension(String value) {
- this.value = value;
- }
-
- @Override
- public String toString() {
- return value;
- }
- }
-
- public enum MetricUnits {
- CORES(Constants.CORES_VALUE),
- COUNT_PER_SEC(Constants.COUNT_PER_SEC_VALUE),
- COUNT(Constants.COUNT_VALUE),
- PAGES(Constants.PAGES_VALUE),
- SEC_PER_CONTEXT_SWITCH(Constants.SEC_PER_CONTEXT_SWITCH_VALUE),
- BYTE_PER_SEC(Constants.BYTE_PER_SEC_VALUE),
- SEC_PER_EVENT(Constants.SEC_PER_EVENT_VALUE),
- MILLISECOND(Constants.MILLISECOND_VALUE),
- BYTE(Constants.BYTE_VALUE),
- PERCENT(Constants.PERCENT_VALUE),
- MEGABYTE_PER_SEC(Constants.MEGABYTE_PER_SEC_VALUE),
- SEGMENT_PER_FLOW(Constants.SEGMENT_PER_FLOW_VALUE),
- BYTE_PER_FLOW(Constants.BYTE_PER_FLOW_VALUE),
- PACKET_PER_SEC(Constants.PACKET_PER_SEC_VALUE);
-
- private final String value;
-
- MetricUnits(String value) {
- this.value = value;
- }
-
- @Override
- public String toString() {
- return value;
- }
-
- public static class Constants {
- public static final String CORES_VALUE = "cores";
- public static final String COUNT_PER_SEC_VALUE = "count/s";
- public static final String COUNT_VALUE = "count";
- public static final String PAGES_VALUE = "pages";
- public static final String SEC_PER_CONTEXT_SWITCH_VALUE = "s/ctxswitch";
- public static final String BYTE_PER_SEC_VALUE = "B/s";
- public static final String SEC_PER_EVENT_VALUE = "s/event";
- public static final String MILLISECOND_VALUE = "ms";
- public static final String BYTE_VALUE = "B";
- public static final String PERCENT_VALUE = "%";
- public static final String MEGABYTE_PER_SEC_VALUE = "MB/s";
- public static final String SEGMENT_PER_FLOW_VALUE = "segments/flow";
- public static final String BYTE_PER_FLOW_VALUE = "B/flow";
- public static final String PACKET_PER_SEC_VALUE = "packets/s";
-
- }
- }
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics/MetricDimension.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics/MetricDimension.java
deleted file mode 100644
index 19aaeb47..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics/MetricDimension.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics;
-
-/**
- * This helps writing a general parser. Given a MetricDimension, I can parse the
- * metric file using the values provided by the MetricDimension enum. I don't
- * need to hardcode the exact enum name in the parser. The parser only needs to
- * know this enum has a metric's dimensions and use its members as Json key to
- * parse out the concrete metric dimensions. See
- * src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/reader/MetricProperties.java
- *
- *
- */
-public interface MetricDimension {
-
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics/MetricValue.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics/MetricValue.java
deleted file mode 100644
index 151dcd2c..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics/MetricValue.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics;
-
-/**
- * This helps writing a general parser. Given a MetricValue, I can parse the
- * metric file using the values provided by the MetricValue enum. I don't need
- * to hardcode the exact enum name in the parser. The parser only needs to know
- * this enum has a metric's values and use its members as Json key to parse out
- * the concrete metric values. See
- * src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/reader/MetricProperties.java
- *
- *
- */
-public interface MetricValue {
-
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics/MetricsConfiguration.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics/MetricsConfiguration.java
deleted file mode 100644
index 1116b8bf..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics/MetricsConfiguration.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.CircuitBreakerCollector;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.DisksCollector;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.HeapMetricsCollector;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.MasterServiceEventMetrics;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.MasterServiceMetrics;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.MetricsPurgeActivity;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.NetworkE2ECollector;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.NetworkInterfaceCollector;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.NodeDetailsCollector;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.NodeStatsMetricsCollector;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.ThreadPoolMetricsCollector;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.StatsCollector;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.config.PluginSettings;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.jvm.GCMetrics;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.jvm.HeapMetrics;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.jvm.ThreadList;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.os.OSGlobals;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.os.ThreadCPU;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.os.ThreadDiskIO;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.os.ThreadSched;
-
-public class MetricsConfiguration {
- public static final int SAMPLING_INTERVAL = 5000;
- public static final int ROTATION_INTERVAL = 30000;
- public static final int STATS_ROTATION_INTERVAL = 60000;
- public static final int DELETION_INTERVAL = PluginSettings.instance().getMetricsDeletionInterval();
-
- public static class MetricConfig {
- public int samplingInterval;
- public int rotationInterval;
- public int deletionInterval;
-
- MetricConfig(int samplingInterval,
- int rotationInterval,
- int deletionInterval) {
- this.samplingInterval = samplingInterval;
- this.rotationInterval = rotationInterval;
- this.deletionInterval = deletionInterval;
- }
- }
-
- public static final Map CONFIG_MAP = new HashMap<>();
-
- static {
- MetricConfig cdefault = new MetricConfig(SAMPLING_INTERVAL, 0, 0);
-
- CONFIG_MAP.put(ThreadCPU.class, cdefault);
- CONFIG_MAP.put(ThreadDiskIO.class, cdefault);
- CONFIG_MAP.put(ThreadSched.class, cdefault);
- CONFIG_MAP.put(ThreadList.class, cdefault);
- CONFIG_MAP.put(GCMetrics.class, cdefault);
- CONFIG_MAP.put(HeapMetrics.class, cdefault);
- CONFIG_MAP.put(NetworkE2ECollector.class, cdefault);
- CONFIG_MAP.put(NetworkInterfaceCollector.class, cdefault);
- CONFIG_MAP.put(OSGlobals.class, cdefault);
- CONFIG_MAP.put(PerformanceAnalyzerMetrics.class, new MetricConfig(0, ROTATION_INTERVAL, 0));
- CONFIG_MAP.put(MetricsPurgeActivity.class, new MetricConfig(ROTATION_INTERVAL, 0, DELETION_INTERVAL));
- CONFIG_MAP.put(StatsCollector.class, new MetricConfig(STATS_ROTATION_INTERVAL, 0, 0));
- CONFIG_MAP.put(MasterServiceEventMetrics.class, new MetricConfig(1000, 0, 0));
- CONFIG_MAP.put(MasterServiceMetrics.class, cdefault);
- CONFIG_MAP.put(DisksCollector.class, cdefault);
- CONFIG_MAP.put(CircuitBreakerCollector.class, cdefault);
- CONFIG_MAP.put(HeapMetricsCollector.class, cdefault);
- CONFIG_MAP.put(NodeDetailsCollector.class, cdefault);
- CONFIG_MAP.put(NodeStatsMetricsCollector.class, cdefault);
- CONFIG_MAP.put(ThreadPoolMetricsCollector.class, cdefault);
- }
-}
-
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics/MetricsProcessor.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics/MetricsProcessor.java
deleted file mode 100644
index b4d9b462..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics/MetricsProcessor.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.PerformanceAnalyzerPlugin;
-
-public interface MetricsProcessor {
-
- default String getMetricValues(long startTime, String... keysPath) {
- return PerformanceAnalyzerMetrics.getMetric(getMetricsPath(startTime, keysPath));
- }
-
- default void saveMetricValues(String value, long startTime, String... keysPath) {
- PerformanceAnalyzerPlugin.invokePrivileged(() -> PerformanceAnalyzerMetrics.emitMetric(getMetricsPath(startTime, keysPath), value));
- }
-
- default String getMetricValue(String metricName, long startTime, String... keys) {
- return PerformanceAnalyzerMetrics.extractMetricValue(getMetricValues(startTime, keys), metricName);
- }
-
- String getMetricsPath(long startTime, String... keysPath);
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics/PerformanceAnalyzerMetrics.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics/PerformanceAnalyzerMetrics.java
deleted file mode 100644
index fb13ab5c..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics/PerformanceAnalyzerMetrics.java
+++ /dev/null
@@ -1,226 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics;
-
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.apache.logging.log4j.message.ParameterizedMessage;
-import org.apache.logging.log4j.util.Supplier;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.StatExceptionCode;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.StatsCollector;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.config.PluginSettings;
-
-@SuppressWarnings("checkstyle:constantname")
-public class PerformanceAnalyzerMetrics {
- private static final Logger LOG = LogManager.getLogger(PerformanceAnalyzerMetrics.class);
- public static final String sDevShmLocation = PluginSettings.instance().getMetricsLocation();
- public static final String sDevShmScratchLocation = "performanceanalyzer_scratch";
- public static final String sIndicesPath = "indices";
- public static final String sThreadPoolPath = "thread_pool";
- public static final String sThreadsPath = "threads";
- public static final String sCircuitBreakerPath = "circuit_breaker";
- public static final String sShardBulkPath = "shardbulk";
- public static final String sShardFetchPath = "shardfetch";
- public static final String sShardQueryPath = "shardquery";
- public static final String sMasterTaskPath = "master_task";
- public static final String sHttpPath = "http";
- public static final String sOSPath = "os_metrics";
- public static final String sHeapPath = "heap_metrics";
- public static final String sNodesPath = "node_metrics";
- public static final String sPendingTasksPath = "pending_tasks";
- public static final String sDisksPath = "disk_metrics";
- public static final String sTCPPath = "tcp_metrics";
- public static final String sIPPath = "ip_metrics";
- public static final String sKeyValueDelimitor = ":";
- public static final String sMetricNewLineDelimitor = System.getProperty("line.separator");
- public static final String START_FILE_NAME = "start";
- public static final String FINISH_FILE_NAME = "finish";
- public static final String MASTER_CURRENT = "current";
- public static final String MASTER_META_DATA = "metadata";
- public static final String METRIC_CURRENT_TIME = "current_time";
-
- private static final int NUM_RETRIES_FOR_TMP_FILE = 10;
-
- private static final boolean IS_METRICS_LOG_ENABLED =
- System.getProperty("performanceanalyzer.metrics.log.enabled", "False").equalsIgnoreCase("True");
-
- private static final int sTimeInterval =
- MetricsConfiguration.CONFIG_MAP.get(PerformanceAnalyzerMetrics.class).rotationInterval;
-
- public static long getTimeInterval(long startTime) {
- return getTimeInterval(startTime, sTimeInterval);
- }
-
- public static long getTimeInterval(long startTime, int timeInterval) {
- return (startTime / timeInterval) * timeInterval;
- }
-
- public static String getCurrentTimeMetric() {
- return METRIC_CURRENT_TIME + sKeyValueDelimitor + System.currentTimeMillis();
- }
-
- public static String generatePath(long startTime, String... keysPath) {
- Path sDevShmLocationPath = Paths.get(sDevShmLocation)
- .resolve(Paths.get(String.valueOf(
- PerformanceAnalyzerMetrics.getTimeInterval(startTime)), keysPath));
- return sDevShmLocationPath.toString();
- }
-
- public static void addMetricEntry(StringBuilder value, String metricKey, String metricValue) {
- value.append(PerformanceAnalyzerMetrics.sMetricNewLineDelimitor).append(metricKey)
- .append(PerformanceAnalyzerMetrics.sKeyValueDelimitor).append(metricValue);
- }
-
- public static void addMetricEntry(StringBuilder value, String metricKey, long metricValue) {
- value.append(PerformanceAnalyzerMetrics.sMetricNewLineDelimitor).append(metricKey)
- .append(PerformanceAnalyzerMetrics.sKeyValueDelimitor).append(metricValue);
- }
-
- public static void emitMetric(String keyPath, String value) {
- File file = new File(keyPath);
- if (IS_METRICS_LOG_ENABLED) {
- LOG.info(keyPath + "\n" + value);
- }
-
- try {
- java.nio.file.Files.createDirectories(file.getParentFile().toPath());
- } catch (IOException ex) {
- LOG.debug(
- (Supplier>) () -> new ParameterizedMessage(
- "Error In Creating Directories: {} for keyPath:{} with ExceptionCode: {}",
- ex.toString(), keyPath, StatExceptionCode.METRICS_WRITE_ERROR.toString()),
- ex);
- StatsCollector.instance().logException(StatExceptionCode.METRICS_WRITE_ERROR);
- return;
- }
-
- File tmpFile = null;
- try {
- tmpFile = writeToTmp(keyPath, value);
- } catch (Exception ex) {
- LOG.debug(
- (Supplier>) () -> new ParameterizedMessage(
- "Error in Writing to Tmp File: {} for keyPath:{} with ExceptionCode: {}",
- ex.toString(), keyPath, StatExceptionCode.METRICS_WRITE_ERROR.toString()),
- ex);
- StatsCollector.instance().logException(StatExceptionCode.METRICS_WRITE_ERROR);
- return;
- }
-
- try {
- tmpFile.renameTo(file);
- } catch (Exception ex) {
- LOG.debug(
- (Supplier>) () -> new ParameterizedMessage(
- "Error in Renaming Tmp File: {} for keyPath:{} with ExceptionCode: {}",
- ex.toString(), keyPath, StatExceptionCode.METRICS_WRITE_ERROR.toString()),
- ex);
- StatsCollector.instance().logException(StatExceptionCode.METRICS_WRITE_ERROR);
- }
- }
-
- private static File writeToTmp(String keyPath, String value) throws Exception {
- int numRetries = 0;
-
- //- try 10 times to avoid the hash code collision
- while (numRetries < NUM_RETRIES_FOR_TMP_FILE) {
- //- creating a tmp file under: /dev/shm/performanceanalyzer//performanceanalyzer_scrtach/
- //- In case rename fails, we don't need to delete this, auto purge will happen when the TIMESTAMP bucket will purged
- //- To avoid collisions, temp file name chosen as:
- //- hashcode of (absolue metric file path + value + current time nano seconds)
- StringBuilder tmp = new StringBuilder().append(keyPath).append(value).append(String.valueOf(System.nanoTime()));
- File file = new File(PerformanceAnalyzerMetrics.generatePath(System.currentTimeMillis(), sDevShmScratchLocation,
- String.valueOf(tmp.toString().hashCode())));
- java.nio.file.Files.createDirectories(file.getParentFile().toPath());
- if (file.createNewFile()) {
- try (FileOutputStream fos = new FileOutputStream(file);) {
- fos.write(value.getBytes());
- }
- return file;
- }
- numRetries++;
- }
- throw new Exception("Tmp file not able to create after " + NUM_RETRIES_FOR_TMP_FILE + " retries");
- }
-
- public static String getMetric(long startTime, String... keysPath) {
- return getMetric(generatePath(startTime, keysPath));
- }
-
- public static String getMetric(String keyPath) {
- try {
- return new String(Files.readAllBytes(Paths.get(keyPath)));
- } catch (Exception ex) {
- //-todo logging
-// ex.printStackTrace();
- return "";
- }
- }
-
- public static String extractMetricValue(String metricVal, String key) {
- int startIndex = metricVal.indexOf(key);
-
- if (startIndex != -1) {
- startIndex = metricVal.indexOf(sKeyValueDelimitor, startIndex);
- int endIndex = metricVal.indexOf(sMetricNewLineDelimitor, startIndex + 1);
-
- if (endIndex == -1) {
- endIndex = metricVal.length();
- }
- return metricVal.substring(startIndex + 1, endIndex);
- }
- return null;
- }
-
- public static void removeMetrics(String keyPath) {
- removeMetrics(new File(keyPath));
- }
-
- public static void removeMetrics(File keyPathFile) {
- if (keyPathFile.isDirectory()) {
- String[] children = keyPathFile.list();
- for (int i = 0; i < children.length; i++) {
- removeMetrics(new File(keyPathFile, children[i]));
- }
- }
- try {
- keyPathFile.delete();
- } catch (Exception ex) {
- StatsCollector.instance().logException(StatExceptionCode.METRICS_REMOVE_ERROR);
- LOG.debug(
- (Supplier>) () -> new ParameterizedMessage(
- "Error in deleting file: {} for keyPath:{} with ExceptionCode: {}",
- ex.toString(), keyPathFile.getAbsolutePath(), StatExceptionCode.METRICS_REMOVE_ERROR.toString()),
- ex);
- }
- }
-
- public static String getJsonCurrentMilliSeconds() {
- return new StringBuilder().append("{\"")
- .append(PerformanceAnalyzerMetrics.METRIC_CURRENT_TIME).append("\"")
- .append(PerformanceAnalyzerMetrics.sKeyValueDelimitor)
- .append(System.currentTimeMillis()).append("}").toString();
- }
-}
-
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics/ThreadIDUtil.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics/ThreadIDUtil.java
deleted file mode 100644
index 63c5cfa6..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics/ThreadIDUtil.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.jvm.ThreadList;
-
-public final class ThreadIDUtil {
- private ThreadIDUtil() { }
-
- public static final ThreadIDUtil INSTANCE = new ThreadIDUtil();
-
- public long getNativeCurrentThreadId() {
-
- return getNativeThreadId(Thread.currentThread().getId());
- }
-
- public long getNativeThreadId(long jTid) {
- ThreadList.ThreadState threadState1 = ThreadList.getThreadState(jTid);
-
- long nid = -1;
- if (threadState1 != null) {
- nid = threadState1.nativeTid;
- }
-
- return nid;
- }
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/CPUPagingActivityGenerator.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/CPUPagingActivityGenerator.java
deleted file mode 100644
index 768d9daa..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/CPUPagingActivityGenerator.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator;
-
-public interface CPUPagingActivityGenerator {
-
- // This method will be called before all following get methods
- // to make sure that all information exists for a thread id
- boolean hasPagingActivity(String threadId);
-
- double getCPUUtilization(String threadId);
- double getMajorFault(String threadId);
- double getMinorFault(String threadId);
- double getResidentSetSize(String threadId);
- void addSample();
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/DiskIOMetricsGenerator.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/DiskIOMetricsGenerator.java
deleted file mode 100644
index 3671ad1c..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/DiskIOMetricsGenerator.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator;
-
-public interface DiskIOMetricsGenerator {
-
- // This method will be called before all following get methods
- // to make sure that all information exists for a thread id
- boolean hasDiskIOMetrics(String threadId);
-
- // these metrics include page cache activity;
- // only explicit syscalls: NO mmaps (majflts include mmaps)
- double getAvgReadThroughputBps(String threadId);
- double getAvgWriteThroughputBps(String threadId);
- double getAvgTotalThroughputBps(String threadId);
- double getAvgReadSyscallRate(String threadId);
- double getAvgWriteSyscallRate(String threadId);
- double getAvgTotalSyscallRate(String threadId);
- void addSample();
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/DiskMetricsGenerator.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/DiskMetricsGenerator.java
deleted file mode 100644
index 71ead045..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/DiskMetricsGenerator.java
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator;
-
-import java.util.Set;
-
-public interface DiskMetricsGenerator {
- Set getAllDisks();
- double getDiskUtilization(String disk);
- double getAwait(String disk);
- double getServiceRate(String disk);
- void addSample();
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/IPMetricsGenerator.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/IPMetricsGenerator.java
deleted file mode 100644
index 3c64f928..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/IPMetricsGenerator.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator;
-
-public interface IPMetricsGenerator {
- double getInPacketRate4();
- double getOutPacketRate4();
- double getInDropRate4();
- double getOutDropRate4();
- double getInPacketRate6();
- double getOutPacketRate6();
- double getInDropRate6();
- double getOutDropRate6();
- double getInBps();
- double getOutBps();
- void addSample();
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/OSMetricsGenerator.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/OSMetricsGenerator.java
deleted file mode 100644
index db55423f..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/OSMetricsGenerator.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator;
-
-import java.util.Set;
-
-public interface OSMetricsGenerator {
-
- String getPid();
- CPUPagingActivityGenerator getPagingActivityGenerator();
- SchedMetricsGenerator getSchedMetricsGenerator();
- Set getAllThreadIds();
- DiskIOMetricsGenerator getDiskIOMetricsGenerator();
- TCPMetricsGenerator getTCPMetricsGenerator();
- IPMetricsGenerator getIPMetricsGenerator();
- DiskMetricsGenerator getDiskMetricsGenerator();
-
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/SchedMetricsGenerator.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/SchedMetricsGenerator.java
deleted file mode 100644
index 2d954333..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/SchedMetricsGenerator.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator;
-
-public interface SchedMetricsGenerator {
-
- // This method will be called before all following get methods
- // to make sure that all information exists for a thread id
- boolean hasSchedMetrics(String threadId);
-
- double getAvgRuntime(String threadId);
- double getAvgWaittime(String threadId);
- double getContextSwitchRate(String threadId);
- void addSample();
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/TCPMetricsGenerator.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/TCPMetricsGenerator.java
deleted file mode 100644
index 5c6e0c1b..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/TCPMetricsGenerator.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator;
-
-import java.util.Set;
-
-public interface TCPMetricsGenerator {
-
- Set getAllDestionationIps();
- int getNumberOfFlows(String ip);
- double getTransmitQueueSize(String ip);
- double getReceiveQueueSize(String ip);
- double getCurrentLost(String ip);
- double getSendCongestionWindow(String ip);
- double getSlowStartThreshold(String ip);
- void addSample();
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/linux/LinuxCPUPagingActivityGenerator.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/linux/LinuxCPUPagingActivityGenerator.java
deleted file mode 100644
index 8b6659b7..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/linux/LinuxCPUPagingActivityGenerator.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator.linux;
-
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator.CPUPagingActivityGenerator;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.os.ThreadCPU;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-
-public class LinuxCPUPagingActivityGenerator implements CPUPagingActivityGenerator {
-
- private Map cpu;
- private Map pagingActivities;
- public LinuxCPUPagingActivityGenerator() {
- cpu = new HashMap<>();
- pagingActivities = new HashMap<>();
- }
-
- @Override
- public double getCPUUtilization(final String threadId) {
-
- return cpu.getOrDefault(threadId, 0.0);
- }
-
- @Override
- public double getMajorFault(final String threadId) {
-
- return pagingActivities.get(threadId)[0];
- }
-
- @Override
- public double getMinorFault(final String threadId) {
-
- return pagingActivities.get(threadId)[1];
- }
-
- @Override
- public double getResidentSetSize(final String threadId) {
-
- return pagingActivities.get(threadId)[2];
- }
-
- @Override
- public boolean hasPagingActivity(final String threadId) {
-
- return pagingActivities.containsKey(threadId);
- }
-
- @Override
- public void addSample() {
-
- cpu.clear();
- pagingActivities.clear();
- ThreadCPU.INSTANCE.addSample();
- }
-
- public void setCPUUtilization(final String threadId, final Double cpuUtilization) {
-
- cpu.put(threadId, cpuUtilization);
- }
-
- public Set getAllThreadIds() {
-
- return cpu.keySet();
- }
-
- public void setPagingActivities(final String threadId, final Double[] activityes) {
- pagingActivities.put(threadId, activityes);
- }
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/linux/LinuxDiskIOMetricsGenerator.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/linux/LinuxDiskIOMetricsGenerator.java
deleted file mode 100644
index 77c818ba..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/linux/LinuxDiskIOMetricsGenerator.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator.linux;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator.DiskIOMetricsGenerator;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.os.ThreadDiskIO;
-
-import java.util.HashMap;
-import java.util.Map;
-
-public class LinuxDiskIOMetricsGenerator implements DiskIOMetricsGenerator {
-
-
- private Map diskIOMetricsMap;
- public LinuxDiskIOMetricsGenerator() {
- diskIOMetricsMap = new HashMap<>();
- }
-
- @Override
- public double getAvgReadThroughputBps(final String threadId) {
-
- return diskIOMetricsMap.get(threadId).avgReadThroughputBps;
- }
-
- @Override
- public double getAvgReadSyscallRate(final String threadId) {
-
- return diskIOMetricsMap.get(threadId).avgReadSyscallRate;
- }
-
- @Override
- public double getAvgWriteThroughputBps(final String threadId) {
-
- return diskIOMetricsMap.get(threadId).avgWriteThroughputBps;
- }
-
- @Override
- public double getAvgWriteSyscallRate(final String threadId) {
-
- return diskIOMetricsMap.get(threadId).avgWriteSyscallRate;
- }
-
- @Override
- public double getAvgTotalThroughputBps(final String threadId) {
-
- return diskIOMetricsMap.get(threadId).avgTotalThroughputBps;
- }
-
- @Override
- public double getAvgTotalSyscallRate(final String threadId) {
-
- return diskIOMetricsMap.get(threadId).avgTotalSyscallRate;
- }
-
- @Override
- public boolean hasDiskIOMetrics(final String threadId) {
-
- return diskIOMetricsMap.containsKey(threadId);
- }
-
- @Override
- public void addSample() {
- ThreadDiskIO.addSample();
- }
-
-
- public void setDiskIOMetrics(final String threadId, final ThreadDiskIO.IOMetrics ioMetrics) {
- diskIOMetricsMap.put(threadId, ioMetrics);
- }
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/linux/LinuxDiskMetricsGenerator.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/linux/LinuxDiskMetricsGenerator.java
deleted file mode 100644
index 8ac82e7c..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/linux/LinuxDiskMetricsGenerator.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator.linux;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.DiskMetrics;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.hwnet.Disks;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator.DiskMetricsGenerator;
-
-import java.util.Map;
-import java.util.Set;
-
-public class LinuxDiskMetricsGenerator implements DiskMetricsGenerator {
-
- private Map diskMetricsMap;
-
- @Override
- public Set getAllDisks() {
- return diskMetricsMap.keySet();
- }
-
- @Override
- public double getDiskUtilization(final String disk) {
-
- return diskMetricsMap.get(disk).utilization;
- }
-
- @Override
- public double getAwait(final String disk) {
-
- return diskMetricsMap.get(disk).await;
- }
-
- @Override
- public double getServiceRate(final String disk) {
-
- return diskMetricsMap.get(disk).serviceRate;
- }
-
- @Override
- public void addSample() {
- Disks.addSample();
- }
-
- public void setDiskMetricsMap(final Map map) {
-
- diskMetricsMap = map;
- }
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/linux/LinuxIPMetricsGenerator.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/linux/LinuxIPMetricsGenerator.java
deleted file mode 100644
index 52042d5f..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/linux/LinuxIPMetricsGenerator.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator.linux;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.NetInterfaceSummary;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.hwnet.NetworkInterface;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator.IPMetricsGenerator;
-
-public class LinuxIPMetricsGenerator implements IPMetricsGenerator {
-
-
- private NetInterfaceSummary inNetInterfaceSummary;
- private NetInterfaceSummary outNetInterfaceSummary;
-
- @Override
- public double getInPacketRate4() {
-
- return inNetInterfaceSummary.getPacketRate4();
- }
-
- @Override
- public double getOutPacketRate4() {
-
- return outNetInterfaceSummary.getPacketRate4();
- }
-
- @Override
- public double getInDropRate4() {
-
- return inNetInterfaceSummary.getDropRate4();
- }
-
- @Override
- public double getOutDropRate4() {
-
- return outNetInterfaceSummary.getDropRate4();
- }
-
- @Override
- public double getInPacketRate6() {
-
- return inNetInterfaceSummary.getPacketRate6();
- }
-
- @Override
- public double getOutPacketRate6() {
-
- return outNetInterfaceSummary.getPacketRate6();
- }
-
- @Override
- public double getInDropRate6() {
-
- return inNetInterfaceSummary.getDropRate6();
- }
-
- @Override
- public double getOutDropRate6() {
-
- return outNetInterfaceSummary.getDropRate6();
- }
-
- @Override
- public double getInBps() {
-
- return inNetInterfaceSummary.getBps();
- }
-
- @Override
- public double getOutBps() {
-
- return outNetInterfaceSummary.getBps();
- }
-
- @Override
- public void addSample() {
-
- NetworkInterface.addSample();
- }
-
- public void setInNetworkInterfaceSummary(final NetInterfaceSummary netInterfaceSummary) {
-
- this.inNetInterfaceSummary = netInterfaceSummary;
- }
-
- public void setOutNetworkInterfaceSummary(final NetInterfaceSummary netInterfaceSummary) {
-
- this.outNetInterfaceSummary = netInterfaceSummary;
- }
-
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/linux/LinuxOSMetricsGenerator.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/linux/LinuxOSMetricsGenerator.java
deleted file mode 100644
index 13938e37..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/linux/LinuxOSMetricsGenerator.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator.linux;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.hwnet.Disks;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.hwnet.NetworkE2E;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.hwnet.NetworkInterface;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator.DiskIOMetricsGenerator;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator.DiskMetricsGenerator;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator.IPMetricsGenerator;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator.OSMetricsGenerator;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator.CPUPagingActivityGenerator;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator.SchedMetricsGenerator;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator.TCPMetricsGenerator;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.os.OSGlobals;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.os.ThreadCPU;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.os.ThreadDiskIO;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.os.ThreadSched;
-
-import java.util.Set;
-
-public class LinuxOSMetricsGenerator implements OSMetricsGenerator {
-
- private static OSMetricsGenerator osMetricsGenerator;
- static {
- osMetricsGenerator = new LinuxOSMetricsGenerator();
- }
-
- public static OSMetricsGenerator getInstance() {
-
- return osMetricsGenerator;
- }
-
- @Override
- public String getPid() {
-
- return OSGlobals.getPid();
- }
-
- @Override
- public CPUPagingActivityGenerator getPagingActivityGenerator() {
-
- return ThreadCPU.INSTANCE.getCPUPagingActivity();
- }
-
- @Override
- public Set getAllThreadIds() {
- return ThreadCPU.INSTANCE.getCPUPagingActivity().getAllThreadIds();
- }
-
- @Override
- public DiskIOMetricsGenerator getDiskIOMetricsGenerator() {
-
- return ThreadDiskIO.getIOUtilization();
- }
-
- @Override
- public SchedMetricsGenerator getSchedMetricsGenerator() {
-
- return ThreadSched.INSTANCE.getSchedLatency();
- }
-
- @Override
- public TCPMetricsGenerator getTCPMetricsGenerator() {
-
- return NetworkE2E.getTCPMetricsHandler();
- }
-
- @Override
- public IPMetricsGenerator getIPMetricsGenerator() {
-
- return NetworkInterface.getLinuxIPMetricsGenerator();
- }
-
- @Override
- public DiskMetricsGenerator getDiskMetricsGenerator() {
-
- return Disks.getDiskMetricsHandler();
- }
-
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/linux/LinuxSchedMetricsGenerator.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/linux/LinuxSchedMetricsGenerator.java
deleted file mode 100644
index 5983e950..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/linux/LinuxSchedMetricsGenerator.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator.linux;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator.SchedMetricsGenerator;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.os.ThreadSched;
-
-import java.util.HashMap;
-import java.util.Map;
-
-public class LinuxSchedMetricsGenerator implements SchedMetricsGenerator {
-
- private final Map schedMetricsMap;
-
-
- public LinuxSchedMetricsGenerator() {
- schedMetricsMap = new HashMap<>();
- }
-
- @Override
- public double getAvgRuntime(final String threadId) {
-
- return schedMetricsMap.get(threadId).avgRuntime;
- }
-
- @Override
- public double getAvgWaittime(final String threadId) {
-
- return schedMetricsMap.get(threadId).avgWaittime;
- }
-
- @Override
- public double getContextSwitchRate(final String threadId) {
-
- return schedMetricsMap.get(threadId).contextSwitchRate;
- }
-
- @Override
- public boolean hasSchedMetrics(final String threadId) {
-
- return schedMetricsMap.containsKey(threadId);
- }
-
- @Override
- public void addSample() {
-
- schedMetricsMap.clear();
- ThreadSched.INSTANCE.addSample();
- }
-
- public void setSchedMetric(final String threadId, final ThreadSched.SchedMetrics schedMetrics) {
-
- schedMetricsMap.put(threadId, schedMetrics);
- }
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/linux/LinuxTCPMetricsGenerator.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/linux/LinuxTCPMetricsGenerator.java
deleted file mode 100644
index 7c601c16..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metrics_generator/linux/LinuxTCPMetricsGenerator.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator.linux;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.hwnet.NetworkE2E;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator.TCPMetricsGenerator;
-
-import java.util.Map;
-import java.util.Set;
-
-public class LinuxTCPMetricsGenerator implements TCPMetricsGenerator {
-
- private Map map;
-
- @Override
- public Set getAllDestionationIps() {
- return map.keySet();
- }
-
- @Override
- public int getNumberOfFlows(final String ip) {
- return (int)map.get(ip)[0];
- }
-
- @Override
- public double getTransmitQueueSize(String ip) {
- return map.get(ip)[1];
- }
-
- @Override
- public double getReceiveQueueSize(String ip) {
- return map.get(ip)[2];
- }
-
- @Override
- public double getCurrentLost(String ip) {
- return map.get(ip)[3];
- }
-
- @Override
- public double getSendCongestionWindow(String ip) {
- return map.get(ip)[4];
- }
-
- @Override
- public double getSlowStartThreshold(String ip) {
- return map.get(ip)[5];
- }
-
- @Override
- public void addSample() {
- NetworkE2E.addSample();
- }
-
- public void setTCPMetrics(final Map metrics) {
- map = metrics;
- }
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metricsdb/Dimensions.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metricsdb/Dimensions.java
deleted file mode 100644
index a7dd382d..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metricsdb/Dimensions.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.metricsdb;
-
-import java.util.Set;
-import java.util.Map;
-import java.util.HashMap;
-
-import org.jooq.impl.DSL;
-import org.jooq.Field;
-
-public class Dimensions {
- //Dimension is a key, value
- private Map dimensions;
- public Dimensions() {
- this.dimensions = new HashMap<>();
- }
-
- public void put(String key, String value) {
- this.dimensions.put(key, value);
- }
-
- public String get(String key) {
- return this.dimensions.get(key);
- }
-
- public Map, String> getFieldMap() {
- Map, String> fieldMap = new HashMap, String>();
- for (Map.Entry entry: dimensions.entrySet()) {
- fieldMap.put(DSL.field(DSL.name(entry.getKey()), String.class), entry.getValue());
- }
- return fieldMap;
- }
-
- public Set getDimensionNames() {
- return this.dimensions.keySet();
- }
-}
-
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metricsdb/Metric.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metricsdb/Metric.java
deleted file mode 100644
index 62f2abd1..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metricsdb/Metric.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.metricsdb;
-
-public class Metric {
- private String name;
- private T sum;
- private T avg;
- private T min;
- private T max;
-
- public Metric(String name, T value) {
- this.name = name;
- this.sum = value;
- this.avg = value;
- this.min = value;
- this.max = value;
- }
-
- public Metric(String name, T sum, T avg, T min, T max) {
- this.name = name;
- this.sum = sum;
- this.avg = avg;
- this.min = min;
- this.max = max;
- }
-
- public String getName() {
- return this.name;
- }
-
- public T getSum() {
- return this.sum;
- }
-
- public T getAvg() {
- return this.avg;
- }
-
- public T getMin() {
- return this.min;
- }
-
- public T getMax() {
- return this.max;
- }
-
- public Class> getValueType() {
- return this.sum.getClass();
- }
-
- //Unit test helper methods
- public static Metric cpu(Double val) {
- return new Metric("cpu", val);
- }
-
- //Unit test helper methods
- public static Metric rss(Double val) {
- return new Metric("rss", val);
- }
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metricsdb/MetricsDB.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metricsdb/MetricsDB.java
deleted file mode 100644
index c7781493..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/metricsdb/MetricsDB.java
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.metricsdb;
-
-import java.io.File;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.HashSet;
-import java.util.Arrays;
-
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.jooq.BatchBindStep;
-import org.jooq.DSLContext;
-import org.jooq.Field;
-import org.jooq.Record;
-import org.jooq.Result;
-import org.jooq.SQLDialect;
-import org.jooq.Select;
-import org.jooq.TableLike;
-import org.jooq.impl.DSL;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.DBUtils;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.config.PluginSettings;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.reader.Removable;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.StatsCollector;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.StatExceptionCode;
-
-/**
- * On-disk database that holds a 5 second snapshot of all metrics.
- * We create one table per metric. Every row contains four aggregations and any other relevant dimensions.
- *
- * Eg:
- * CPU table
- * |sum|avg|max|min| index|shard|role|
- * +---+---+---+---+--------+-----+----+
- * | 5|2.5| 3| 2|sonested| 1| N/A|
- *
- * RSS table
- * |sum|avg|max|min| index|shard|role|
- * +---+---+---+---+---------+-----+----+
- * | 30| 15| 20| 10|nyc_taxis| 1| N/A|
- */
-@SuppressWarnings("serial")
-public class MetricsDB implements Removable {
-
- private static final Logger LOG = LogManager.getLogger(MetricsDB.class);
-
- private static final String DB_FILE_PREFIX_PATH_DEFAULT = "/tmp/metricsdb_";
- private static final String DB_FILE_PREFIX_PATH_CONF_NAME = "metrics-db-file-prefix-path";
- private static final String DB_URL = "jdbc:sqlite:";
- private final Connection conn;
- private final DSLContext create;
- public static final String SUM = "sum";
- public static final String AVG = "avg";
- public static final String MIN = "min";
- public static final String MAX = "max";
- public static final HashSet AGG_VALUES = new HashSet<>(Arrays.asList(SUM, AVG, MIN, MAX));
- private long windowStartTime;
-
- public String getDBFilePath() {
- return PluginSettings.instance().getSettingValue(DB_FILE_PREFIX_PATH_CONF_NAME, DB_FILE_PREFIX_PATH_DEFAULT)
- + Long.toString(windowStartTime);
- }
-
- public MetricsDB(long windowStartTime) throws Exception {
- this.windowStartTime = windowStartTime;
- String url = DB_URL + getDBFilePath();
- conn = DriverManager.getConnection(url);
- conn.setAutoCommit(false);
- create = DSL.using(conn, SQLDialect.SQLITE);
- }
-
- public void close() throws Exception {
- conn.close();
- }
-
- public void createMetric(Metric> metric, List dimensions) {
- if (DBUtils.checkIfTableExists(create, metric.getName())) {
- return;
- }
-
- List> fields = DBUtils.getFieldsFromList(dimensions);
- fields.add(DSL.field(SUM, metric.getValueType()));
- fields.add(DSL.field(AVG, metric.getValueType()));
- fields.add(DSL.field(MIN, metric.getValueType()));
- fields.add(DSL.field(MAX, metric.getValueType()));
- create.createTable(metric.getName())
- .columns(fields)
- .execute();
- }
-
- public BatchBindStep startBatchPut(Metric> metric, List dimensions) {
- List> dummyValues = new ArrayList<>();
- for (String dim: dimensions) {
- dummyValues.add(null);
- }
- //Finally add sum, avg, min, max
- dummyValues.add(null);
- dummyValues.add(null);
- dummyValues.add(null);
- dummyValues.add(null);
- return create.batch(create.insertInto(DSL.table(metric.getName())).values(dummyValues));
- }
-
- public BatchBindStep startBatchPut(String tableName, int dimNum) {
- if (dimNum < 1 || !DBUtils.checkIfTableExists(create, tableName)) {
- throw new IllegalArgumentException(String
- .format("Incorrect arguments %s, %d", tableName, dimNum));
- }
- List> dummyValues = new ArrayList<>(dimNum);
- for (int i = 0; i < dimNum; i++) {
- dummyValues.add(null);
- }
-
- return create.batch(
- create.insertInto(DSL.table(tableName)).values(dummyValues));
- }
-
- public void putMetric(Metric metric,
- Dimensions dimensions,
- long windowStartTime) {
- create.insertInto(DSL.table(metric.getName()))
- .set(DSL.field(SUM, Double.class), metric.getSum())
- .set(DSL.field(AVG, Double.class), metric.getAvg())
- .set(DSL.field(MIN, Double.class), metric.getMin())
- .set(DSL.field(MAX, Double.class), metric.getMax())
- .set(dimensions.getFieldMap())
- .execute();
- }
-
- //We have a table per metric. We do a group by/aggregate on
- //every dimension and return all the metric tables.
- public List> getAggregatedMetricTables(List metrics,
- List aggregations, List dimensions) throws Exception {
- List> tList = new ArrayList<>();
- List> groupByFields = DBUtils.getFieldsFromList(dimensions);
-
- for (int i = 0; i < metrics.size(); i++) {
- String metric = metrics.get(i);
- List> selectFields = DBUtils.getFieldsFromList(dimensions);
- String aggType = aggregations.get(i);
- if (aggType.equals(SUM)) {
- Field field = DSL.field(SUM, Double.class);
- selectFields.add(DSL.sum(field).as(metric));
- } else if (aggType.equals(AVG)) {
- Field field = DSL.field(AVG, Double.class);
- selectFields.add(DSL.avg(field).as(metric));
- } else if (aggType.equals(MIN)) {
- Field field = DSL.field(MIN, Double.class);
- selectFields.add(DSL.min(field).as(metric));
- } else if (aggType.equals(MAX)) {
- Field field = DSL.field(MAX, Double.class);
- selectFields.add(DSL.max(field).as(metric));
- } else {
- throw new Exception("Unknown agg type");
- }
- if (!DBUtils.checkIfTableExists(create, metrics.get(i))) {
- tList.add(null);
- } else {
- tList.add(create.select(selectFields)
- .from(DSL.table(metric))
- .groupBy(groupByFields)
- .asTable());
- }
- }
- return tList;
- }
-
-
- /**
- * query metrics from different tables and merge to one table.
- *
- * getAggregatedMetricTables returns tables like:
- * +-----+---------+-----+
- * |shard|indexName| cpu|
- * +-----+---------+-----+
- * |0 |sonested | 10|
- * |1 |sonested | 20|
- *
- * +-----+---------+-----+
- * |shard|indexName| rss|
- * +-----+---------+-----+
- * |0 |sonested | 54|
- * |2 |sonested | 47|
- *
- * We select metrics from each table and union them:
- * +-----+---------+-----+-----+
- * |shard|indexName| cpu| rss|
- * +-----+---------+-----+-----+
- * |0 |sonested | 10| null|
- * |1 |sonested | 20| null|
- * |0 |sonested | null| 54|
- * |2 |sonested | null| 47|
- *
- * Then, we group by dimensions and return following table:
- * +-----+---------+-----+-----+
- * |shard|indexName| cpu| rss|
- * +-----+---------+-----+-----+
- * |0 |sonested | 10| 54|
- * |1 |sonested | 20| null|
- * |2 |sonested | null| 47|
- *
- * @param metrics a list of metrics we want to query
- * @param aggregations aggregation we want to use for each metric
- * @param dimensions dimension we want to use for each metric
- *
- * @return result of query
- *
- * @throws Exception if one of the aggregations contains sth other than
- * "sum", "avg", "min", and "max".
- * */
- public Result queryMetric(List metrics,
- List aggregations, List dimensions) throws Exception {
- List> tList = getAggregatedMetricTables(metrics,
- aggregations, dimensions);
-
- //Join all the individual metric tables to generate the final table.
- Select finalTable = null;
- for (int i = 0; i < tList.size(); i++) {
- TableLike metricTable = tList.get(i);
- if (metricTable == null) {
- LOG.info(String.format("%s metric table does not exist. " +
- "Returning null for the metric/dimension.", metrics.get(i)));
- continue;
- }
- List> selectFields = DBUtils.getSelectFieldsForMetricName(metrics.get(i), metrics, dimensions);
- Select curTable = create.select(selectFields).from(metricTable);
-
- if (finalTable == null) {
- finalTable = curTable;
- } else {
- finalTable = finalTable.union(curTable);
- }
- }
-
- List> allFields = DBUtils.getFieldsFromList(dimensions);
- for (String metric : metrics) {
- allFields.add(DSL.max(DSL.field(metric, Double.class)).as(metric));
- }
- List> groupByFields = DBUtils.getFieldsFromList(dimensions);
- if (finalTable == null) {
- return null;
- }
- return create.select(allFields).from(finalTable).groupBy(groupByFields).fetch();
- }
-
- public void commit() throws Exception {
- conn.commit();
- }
-
- @Override
- public void remove() throws Exception {
- conn.close();
- }
-
- public void deleteOnDiskFile() {
- File dbFile = new File(getDBFilePath());
- if (!dbFile.delete()) {
- LOG.error("Failed to delete File - {} with ExceptionCode: {}",
- getDBFilePath(), StatExceptionCode.OTHER.toString());
- StatsCollector.instance().logException();
- }
- }
-
- public Result queryMetric(String metric) {
- return create.select().from(DSL.table(metric)).fetch();
- }
-
- public boolean metricExists(String metric) {
- return DBUtils.checkIfTableExists(create, metric);
- }
-}
-
-
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/model/MetricAttributes.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/model/MetricAttributes.java
deleted file mode 100644
index e3fe67aa..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/model/MetricAttributes.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.model;
-
-import java.util.HashSet;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.MetricDimension;
-
-
-public class MetricAttributes {
- public String unit;
- public HashSet dimensionNames;
-
- MetricAttributes(String unit,
- MetricDimension[] dimensions) {
-
- this.unit = unit;
- this.dimensionNames = new HashSet();
- for (MetricDimension dimension : dimensions) {
- this.dimensionNames.add(dimension.toString());
- }
- }
-
-}
\ No newline at end of file
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/model/MetricsModel.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/model/MetricsModel.java
deleted file mode 100644
index e0d1bd13..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/model/MetricsModel.java
+++ /dev/null
@@ -1,238 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.model;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.AggregatedOSDimension;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.CircuitBreakerDimension;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.CircuitBreakerValue;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.CommonMetric;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.DiskDimension;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.DiskValue;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.EmptyDimension;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.HeapDimension;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.HeapValue;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.HttpOnlyDimension;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.HttpMetric;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.IPDimension;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.IPValue;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.LatencyDimension;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.MasterPendingValue;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.MetricUnits;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.OSMetrics;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.ShardBulkMetric;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.ShardOperationMetric;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.ShardStatsValue;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.ShardStatsDerivedDimension;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.TCPDimension;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.TCPValue;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.ThreadPoolDimension;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.ThreadPoolValue;
-
-public class MetricsModel {
-
- public static final Map ALL_METRICS = new HashMap<>();
-
- static {
- // OS Metrics
- ALL_METRICS.put(OSMetrics.CPU_UTILIZATION.toString(),
- new MetricAttributes(MetricUnits.CORES.toString(), AggregatedOSDimension.values()));
- ALL_METRICS.put(OSMetrics.PAGING_MAJ_FLT_RATE.toString(),
- new MetricAttributes(MetricUnits.COUNT_PER_SEC.toString(), AggregatedOSDimension.values()));
- ALL_METRICS.put(OSMetrics.PAGING_MIN_FLT_RATE.toString(),
- new MetricAttributes(MetricUnits.COUNT_PER_SEC.toString(), AggregatedOSDimension.values()));
- ALL_METRICS.put(OSMetrics.PAGING_RSS.toString(),
- new MetricAttributes(MetricUnits.PAGES.toString(), AggregatedOSDimension.values()));
- ALL_METRICS.put(OSMetrics.SCHED_RUNTIME.toString(),
- new MetricAttributes(MetricUnits.SEC_PER_CONTEXT_SWITCH.toString(), AggregatedOSDimension.values()));
- ALL_METRICS.put(OSMetrics.SCHED_WAITTIME.toString(),
- new MetricAttributes(MetricUnits.SEC_PER_CONTEXT_SWITCH.toString(), AggregatedOSDimension.values()));
- ALL_METRICS.put(OSMetrics.SCHED_CTX_RATE.toString(),
- new MetricAttributes(MetricUnits.COUNT_PER_SEC.toString(), AggregatedOSDimension.values()));
- ALL_METRICS.put(OSMetrics.HEAP_ALLOC_RATE.toString(),
- new MetricAttributes(MetricUnits.BYTE_PER_SEC.toString(), AggregatedOSDimension.values()));
- ALL_METRICS.put(OSMetrics.IO_READ_THROUGHPUT.toString(),
- new MetricAttributes(MetricUnits.BYTE_PER_SEC.toString(), AggregatedOSDimension.values()));
- ALL_METRICS.put(OSMetrics.IO_WRITE_THROUGHPUT.toString(),
- new MetricAttributes(MetricUnits.BYTE_PER_SEC.toString(), AggregatedOSDimension.values()));
- ALL_METRICS.put(OSMetrics.IO_TOT_THROUGHPUT.toString(),
- new MetricAttributes(MetricUnits.BYTE_PER_SEC.toString(), AggregatedOSDimension.values()));
- ALL_METRICS.put(OSMetrics.IO_READ_SYSCALL_RATE.toString(),
- new MetricAttributes(MetricUnits.COUNT_PER_SEC.toString(), AggregatedOSDimension.values()));
- ALL_METRICS.put(OSMetrics.IO_WRITE_SYSCALL_RATE.toString(),
- new MetricAttributes(MetricUnits.COUNT_PER_SEC.toString(), AggregatedOSDimension.values()));
- ALL_METRICS.put(OSMetrics.IO_TOTAL_SYSCALL_RATE.toString(),
- new MetricAttributes(MetricUnits.COUNT_PER_SEC.toString(), AggregatedOSDimension.values()));
- ALL_METRICS.put(OSMetrics.THREAD_BLOCKED_TIME.toString(),
- new MetricAttributes(MetricUnits.SEC_PER_EVENT.toString(), AggregatedOSDimension.values()));
- ALL_METRICS.put(OSMetrics.THREAD_BLOCKED_EVENT.toString(),
- new MetricAttributes(MetricUnits.COUNT.toString(), AggregatedOSDimension.values()));
-
- // Latency Metric
- ALL_METRICS.put(CommonMetric.LATENCY.toString(),
- new MetricAttributes(MetricUnits.MILLISECOND.toString(), LatencyDimension.values()));
-
- ALL_METRICS.put(ShardOperationMetric.SHARD_OP_COUNT.toString(),
- new MetricAttributes(MetricUnits.COUNT.toString(), AggregatedOSDimension.values()));
- ALL_METRICS.put(ShardBulkMetric.DOC_COUNT.toString(),
- new MetricAttributes(MetricUnits.COUNT.toString(), AggregatedOSDimension.values()));
-
- // HTTP Metrics
- ALL_METRICS.put(HttpMetric.HTTP_REQUEST_DOCS.toString(),
- new MetricAttributes(MetricUnits.COUNT.toString(), HttpOnlyDimension.values()));
- ALL_METRICS.put(HttpMetric.HTTP_TOTAL_REQUESTS.toString(),
- new MetricAttributes(MetricUnits.COUNT.toString(), HttpOnlyDimension.values()));
-
- // Circuit Breaker Metrics
- ALL_METRICS.put(CircuitBreakerValue.CB_ESTIMATED_SIZE.toString(),
- new MetricAttributes(MetricUnits.BYTE.toString(), CircuitBreakerDimension.values()));
- ALL_METRICS.put(CircuitBreakerValue.CB_CONFIGURED_SIZE.toString(),
- new MetricAttributes(MetricUnits.BYTE.toString(), CircuitBreakerDimension.values()));
- ALL_METRICS.put(CircuitBreakerValue.CB_TRIPPED_EVENTS.toString(),
- new MetricAttributes(MetricUnits.COUNT.toString(), CircuitBreakerDimension.values()));
-
- // Heap Metrics
- ALL_METRICS.put(HeapValue.GC_COLLECTION_EVENT.toString(),
- new MetricAttributes(MetricUnits.COUNT.toString(), HeapDimension.values()));
- ALL_METRICS.put(HeapValue.GC_COLLECTION_TIME.toString(),
- new MetricAttributes(MetricUnits.MILLISECOND.toString(), HeapDimension.values()));
- ALL_METRICS.put(HeapValue.HEAP_COMMITTED.toString(),
- new MetricAttributes(MetricUnits.BYTE.toString(), HeapDimension.values()));
- ALL_METRICS.put(HeapValue.HEAP_INIT.toString(),
- new MetricAttributes(MetricUnits.BYTE.toString(), HeapDimension.values()));
- ALL_METRICS.put(HeapValue.HEAP_MAX.toString(),
- new MetricAttributes(MetricUnits.BYTE.toString(), HeapDimension.values()));
- ALL_METRICS.put(HeapValue.HEAP_USED.toString(),
- new MetricAttributes(MetricUnits.BYTE.toString(), HeapDimension.values()));
-
- // Disk Metrics
- ALL_METRICS.put(DiskValue.DISK_UTILIZATION.toString(),
- new MetricAttributes(MetricUnits.PERCENT.toString(), DiskDimension.values()));
- ALL_METRICS.put(DiskValue.DISK_WAITTIME.toString(),
- new MetricAttributes(MetricUnits.MILLISECOND.toString(), DiskDimension.values()));
- ALL_METRICS.put(DiskValue.DISK_SERVICE_RATE.toString(),
- new MetricAttributes(MetricUnits.MEGABYTE_PER_SEC.toString(), DiskDimension.values()));
-
- // TCP Metrics
- ALL_METRICS.put(TCPValue.Net_TCP_NUM_FLOWS.toString(),
- new MetricAttributes(MetricUnits.COUNT.toString(), TCPDimension.values()));
- ALL_METRICS.put(TCPValue.Net_TCP_TXQ.toString(),
- new MetricAttributes(MetricUnits.SEGMENT_PER_FLOW.toString(), TCPDimension.values()));
- ALL_METRICS.put(TCPValue.Net_TCP_RXQ.toString(),
- new MetricAttributes(MetricUnits.SEGMENT_PER_FLOW.toString(), TCPDimension.values()));
- ALL_METRICS.put(TCPValue.Net_TCP_LOST.toString(),
- new MetricAttributes(MetricUnits.SEGMENT_PER_FLOW.toString(), TCPDimension.values()));
- ALL_METRICS.put(TCPValue.Net_TCP_SEND_CWND.toString(),
- new MetricAttributes(MetricUnits.BYTE_PER_FLOW.toString(), TCPDimension.values()));
- ALL_METRICS.put(TCPValue.Net_TCP_SSTHRESH.toString(),
- new MetricAttributes(MetricUnits.BYTE_PER_FLOW.toString(), TCPDimension.values()));
-
- // IP Metrics
- ALL_METRICS.put(IPValue.NET_PACKET_RATE4.toString(),
- new MetricAttributes(MetricUnits.PACKET_PER_SEC.toString(), IPDimension.values()));
- ALL_METRICS.put(IPValue.NET_PACKET_DROP_RATE4.toString(),
- new MetricAttributes(MetricUnits.PACKET_PER_SEC.toString(), IPDimension.values()));
- ALL_METRICS.put(IPValue.NET_PACKET_RATE6.toString(),
- new MetricAttributes(MetricUnits.PACKET_PER_SEC.toString(), IPDimension.values()));
- ALL_METRICS.put(IPValue.NET_PACKET_DROP_RATE6.toString(),
- new MetricAttributes(MetricUnits.PACKET_PER_SEC.toString(), IPDimension.values()));
- ALL_METRICS.put(IPValue.NET_THROUGHPUT.toString(),
- new MetricAttributes(MetricUnits.BYTE_PER_SEC.toString(), IPDimension.values()));
-
- // Thread Pool Metrics
- ALL_METRICS.put(ThreadPoolValue.THREADPOOL_QUEUE_SIZE.toString(),
- new MetricAttributes(MetricUnits.COUNT.toString(), ThreadPoolDimension.values()));
- ALL_METRICS.put(ThreadPoolValue.THREADPOOL_REJECTED_REQS.toString(),
- new MetricAttributes(MetricUnits.COUNT.toString(), ThreadPoolDimension.values()));
- ALL_METRICS.put(ThreadPoolValue.THREADPOOL_TOTAL_THREADS.toString(),
- new MetricAttributes(MetricUnits.COUNT.toString(), ThreadPoolDimension.values()));
- ALL_METRICS.put(ThreadPoolValue.THREADPOOL_ACTIVE_THREADS.toString(),
- new MetricAttributes(MetricUnits.COUNT.toString(), ThreadPoolDimension.values()));
-
- // Shard Stats Metrics
- ALL_METRICS.put(ShardStatsValue.INDEXING_THROTTLE_TIME.toString(),
- new MetricAttributes(MetricUnits.MILLISECOND.toString(), ShardStatsDerivedDimension.values()));
- ALL_METRICS.put(ShardStatsValue.CACHE_QUERY_HIT.toString(),
- new MetricAttributes(MetricUnits.COUNT.toString(), ShardStatsDerivedDimension.values()));
- ALL_METRICS.put(ShardStatsValue.CACHE_QUERY_MISS.toString(),
- new MetricAttributes(MetricUnits.COUNT.toString(), ShardStatsDerivedDimension.values()));
- ALL_METRICS.put(ShardStatsValue.CACHE_QUERY_SIZE.toString(),
- new MetricAttributes(MetricUnits.BYTE.toString(), ShardStatsDerivedDimension.values()));
- ALL_METRICS.put(ShardStatsValue.CACHE_FIELDDATA_EVICTION.toString(),
- new MetricAttributes(MetricUnits.COUNT.toString(), ShardStatsDerivedDimension.values()));
- ALL_METRICS.put(ShardStatsValue.CACHE_FIELDDATA_SIZE.toString(),
- new MetricAttributes(MetricUnits.BYTE.toString(), ShardStatsDerivedDimension.values()));
- ALL_METRICS.put(ShardStatsValue.CACHE_REQUEST_HIT.toString(),
- new MetricAttributes(MetricUnits.COUNT.toString(), ShardStatsDerivedDimension.values()));
- ALL_METRICS.put(ShardStatsValue.CACHE_REQUEST_MISS.toString(),
- new MetricAttributes(MetricUnits.COUNT.toString(), ShardStatsDerivedDimension.values()));
- ALL_METRICS.put(ShardStatsValue.CACHE_REQUEST_EVICTION.toString(),
- new MetricAttributes(MetricUnits.COUNT.toString(), ShardStatsDerivedDimension.values()));
- ALL_METRICS.put(ShardStatsValue.CACHE_REQUEST_SIZE.toString(),
- new MetricAttributes(MetricUnits.BYTE.toString(), ShardStatsDerivedDimension.values()));
- ALL_METRICS.put(ShardStatsValue.REFRESH_EVENT.toString(),
- new MetricAttributes(MetricUnits.COUNT.toString(), ShardStatsDerivedDimension.values()));
- ALL_METRICS.put(ShardStatsValue.REFRESH_TIME.toString(),
- new MetricAttributes(MetricUnits.MILLISECOND.toString(), ShardStatsDerivedDimension.values()));
- ALL_METRICS.put(ShardStatsValue.FLUSH_EVENT.toString(),
- new MetricAttributes(MetricUnits.COUNT.toString(), ShardStatsDerivedDimension.values()));
- ALL_METRICS.put(ShardStatsValue.FLUSH_TIME.toString(),
- new MetricAttributes(MetricUnits.MILLISECOND.toString(), ShardStatsDerivedDimension.values()));
- ALL_METRICS.put(ShardStatsValue.MERGE_EVENT.toString(),
- new MetricAttributes(MetricUnits.COUNT.toString(), ShardStatsDerivedDimension.values()));
- ALL_METRICS.put(ShardStatsValue.MERGE_TIME.toString(),
- new MetricAttributes(MetricUnits.MILLISECOND.toString(), ShardStatsDerivedDimension.values()));
- ALL_METRICS.put(ShardStatsValue.MERGE_CURRENT_EVENT.toString(),
- new MetricAttributes(MetricUnits.COUNT.toString(), ShardStatsDerivedDimension.values()));
- ALL_METRICS.put(ShardStatsValue.INDEXING_BUFFER.toString(),
- new MetricAttributes(MetricUnits.BYTE.toString(), ShardStatsDerivedDimension.values()));
- ALL_METRICS.put(ShardStatsValue.SEGMENTS_TOTAL.toString(),
- new MetricAttributes(MetricUnits.COUNT.toString(), ShardStatsDerivedDimension.values()));
- ALL_METRICS.put(ShardStatsValue.SEGMENTS_MEMORY.toString(),
- new MetricAttributes(MetricUnits.BYTE.toString(), ShardStatsDerivedDimension.values()));
- ALL_METRICS.put(ShardStatsValue.TERMS_MEMORY.toString(),
- new MetricAttributes(MetricUnits.BYTE.toString(), ShardStatsDerivedDimension.values()));
- ALL_METRICS.put(ShardStatsValue.STORED_FIELDS_MEMORY.toString(),
- new MetricAttributes(MetricUnits.BYTE.toString(), ShardStatsDerivedDimension.values()));
- ALL_METRICS.put(ShardStatsValue.TERM_VECTOR_MEMORY.toString(),
- new MetricAttributes(MetricUnits.BYTE.toString(), ShardStatsDerivedDimension.values()));
- ALL_METRICS.put(ShardStatsValue.NORMS_MEMORY.toString(),
- new MetricAttributes(MetricUnits.BYTE.toString(), ShardStatsDerivedDimension.values()));
- ALL_METRICS.put(ShardStatsValue.POINTS_MEMORY.toString(),
- new MetricAttributes(MetricUnits.BYTE.toString(), ShardStatsDerivedDimension.values()));
- ALL_METRICS.put(ShardStatsValue.DOC_VALUES_MEMORY.toString(),
- new MetricAttributes(MetricUnits.BYTE.toString(), ShardStatsDerivedDimension.values()));
- ALL_METRICS.put(ShardStatsValue.INDEX_WRITER_MEMORY.toString(),
- new MetricAttributes(MetricUnits.BYTE.toString(), ShardStatsDerivedDimension.values()));
- ALL_METRICS.put(ShardStatsValue.VERSION_MAP_MEMORY.toString(),
- new MetricAttributes(MetricUnits.BYTE.toString(), ShardStatsDerivedDimension.values()));
- ALL_METRICS.put(ShardStatsValue.BITSET_MEMORY.toString(),
- new MetricAttributes(MetricUnits.BYTE.toString(), ShardStatsDerivedDimension.values()));
-
- // Master Metrics
- ALL_METRICS.put(MasterPendingValue.MASTER_PENDING_QUEUE_SIZE.toString(),
- new MetricAttributes(MetricUnits.COUNT.toString(), EmptyDimension.values()));
-
- ALL_METRICS.put(AllMetrics.MasterMetricValues.MASTER_TASK_QUEUE_TIME.toString(),
- new MetricAttributes(MetricUnits.MILLISECOND.toString(), AllMetrics.MasterMetricDimensions.values()));
-
- ALL_METRICS.put(AllMetrics.MasterMetricValues.MASTER_TASK_RUN_TIME.toString(),
- new MetricAttributes(MetricUnits.MILLISECOND.toString(), AllMetrics.MasterMetricDimensions.values()));
- }
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/os/OSGlobals.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/os/OSGlobals.java
deleted file mode 100644
index acbc1f2e..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/os/OSGlobals.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.os;
-
-import java.io.File;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.apache.logging.log4j.message.ParameterizedMessage;
-import org.apache.logging.log4j.util.Supplier;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.ConfigStatus;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.MetricsConfiguration;
-
-public class OSGlobals {
- private static long scClkTck;
- private static String pid;
- private static final String CLK_TCK_SYS_PROPERTY_NAME = "clk.tck";
-
- private static final Logger LOGGER = LogManager.getLogger(OSGlobals.class);
- private static final long REFRESH_INTERVAL_MS = MetricsConfiguration.CONFIG_MAP.get(OSGlobals.class).samplingInterval;
- private static List tids = new ArrayList<>();
- private static long lastUpdated = -1;
-
- static {
- try {
- pid = new File("/proc/self").getCanonicalFile().getName();
- getScClkTckFromConfig();
- enumTids();
- lastUpdated = System.currentTimeMillis();
- } catch (Exception e) {
- LOGGER.error(
- (Supplier>) () -> new ParameterizedMessage(
- "Error in static initialization of OSGlobals with exception: {}",
- e.toString()),
- e);
- }
- }
-
- public static String getPid() {
- return pid;
- }
-
- public static long getScClkTck() {
- return scClkTck;
- }
-
- private static void getScClkTckFromConfig() throws Exception {
- try {
- scClkTck = Long.parseUnsignedLong(System.getProperty(CLK_TCK_SYS_PROPERTY_NAME));
- } catch (Exception e) {
- LOGGER.error(
- (Supplier>) () -> new ParameterizedMessage(
- "Error in reading/parsing clk.tck value: {}",
- e.toString()),
- e);
- ConfigStatus.INSTANCE.setConfigurationInvalid();
- }
- }
-
- private static void enumTids() {
- tids.clear();
- tids.add(pid);
-
- File self = new File("/proc/self/task");
- File[] filesList = self.listFiles();
- for (File f : filesList) {
- if (f.isDirectory()) {
- String tid = f.getName();
- tids.add(tid);
- }
- }
- }
-
- static synchronized List getTids() {
- long curtime = System.currentTimeMillis();
- if (curtime - lastUpdated > REFRESH_INTERVAL_MS) {
- enumTids();
- lastUpdated = curtime;
- }
- return new ArrayList<>(tids);
- }
-}
-
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/os/SchemaFileParser.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/os/SchemaFileParser.java
deleted file mode 100644
index 5ebcc21d..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/os/SchemaFileParser.java
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.os;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileReader;
-import java.io.FileNotFoundException;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Arrays;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.StatsCollector;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.StatExceptionCode;
-
-public class SchemaFileParser {
- private static final Logger LOGGER = LogManager.getLogger(SchemaFileParser.class);
- public enum FieldTypes {
- INT,
- STRING,
- CHAR,
- ULONG,
- DOUBLE;
- }
-
- private String filename = null;
- private String[] keys = null;
- private FieldTypes[] types = null;
- private boolean preProcess = false;
-
- public SchemaFileParser (String file, String[] keys, FieldTypes[] types) {
- this.filename = file;
- this.keys = keys.clone();
- this.types = types.clone();
- }
-
- //- from java 11 onwards, there is thread name in /proc/pid/task/tid/stat, which has spaces in it
- //- And threadname has "()" around it. Introduced a preprocess step to combine all of them
- public SchemaFileParser (String file, String[] keys, FieldTypes[] types, boolean preProcess) {
- this.filename = file;
- this.keys = keys.clone();
- this.types = types.clone();
- this.preProcess = preProcess;
- }
-
- private Object getTypedValue(String value, FieldTypes type) {
- switch (type) {
- case CHAR:
- return value.charAt(0);
- case INT:
- return Integer.valueOf(value);
- case STRING:
- return value;
- case ULONG:
- return Long.parseUnsignedLong(value);
- case DOUBLE:
- return Double.valueOf(value);
- default:
- return null;
- }
- }
-
- private void generateMap(String content, Map map) {
- String[] splitvalues = content.trim().split(" +");
- String[] values = preProcess(splitvalues);
- if(values.length < types.length) {
- LOGGER.debug("Content Values tokens {} length is less than types {} length with ExceptionCode: {}",
- () -> Arrays.toString(values), () -> Arrays.toString(types), () -> StatExceptionCode.SCHEMA_PARSER_ERROR.toString());
- StatsCollector.instance().logException(StatExceptionCode.SCHEMA_PARSER_ERROR);
- }
- int lim = Math.min(values.length, types.length);
- for (int idx = 0; idx < lim; idx++) {
- map.put(keys[idx], getTypedValue(values[idx], types[idx]));
- }
- }
-
- private String[] preProcess(String[] tokens) {
- if(preProcess) {
- List processedTokens = new ArrayList<>();
- StringBuffer tmp = new StringBuffer();
- boolean beingProcessed = false;
- for (int idx = 0; idx < tokens.length; idx++) {
- if(beingProcessed) {
- tmp.append(tokens[idx]);
- if(tokens[idx].endsWith(")")) {
- beingProcessed = false;
- processedTokens.add(tmp.toString());
- tmp.setLength(0);
- }
- } else if(tokens[idx].startsWith("(")) {
- if(tokens[idx].endsWith(")")) {
- processedTokens.add(tokens[idx]);
- } else {
- beingProcessed = true;
- tmp.append(tokens[idx]);
- }
- } else {
- processedTokens.add(tokens[idx]);
- }
- }
- return processedTokens.toArray(new String[processedTokens.size()]);
- } else {
- return tokens;
- }
- }
-
- /*
- to be used for parsing the outputs that contains single line
- */
- public Map parse() {
- Map map = new HashMap<>();
- try (FileReader fileReader = new FileReader(new File(filename));
- BufferedReader bufferedReader = new BufferedReader(fileReader);) {
- String line = bufferedReader.readLine();
- if (line == null) {
- return map;
- }
- generateMap(line, map);
- } catch (FileNotFoundException e) {
- LOGGER.debug("FileNotFound in parse with exception: {}", () -> e.toString());
- } catch (Exception e) {
- LOGGER.debug("Error in parse with exception: {} with ExceptionCode: {}",
- () -> e.toString(), () -> StatExceptionCode.SCHEMA_PARSER_ERROR.toString());
- StatsCollector.instance().logException(StatExceptionCode.SCHEMA_PARSER_ERROR);
- }
- return map;
- }
-
- /*
- to be used for parsing the outputs that contains multiple lines
- */
- public List> parseMultiple() {
- List> mapList = new ArrayList<>();
- try (FileReader fileReader = new FileReader(new File(filename));
- BufferedReader bufferedReader = new BufferedReader(fileReader);) {
- String line;
- while ((line = bufferedReader.readLine()) != null) {
- Map map = new HashMap<>();
- generateMap(line, map);
- mapList.add(map);
- }
- } catch (FileNotFoundException e) {
- LOGGER.debug("FileNotFound in parse with exception: {}", () -> e.toString());
- } catch (Exception e) {
- LOGGER.debug("Error in parseMultiple with exception: {} with ExceptionCode: {}",
- () -> e.toString(), () -> StatExceptionCode.SCHEMA_PARSER_ERROR.toString());
- StatsCollector.instance().logException(StatExceptionCode.SCHEMA_PARSER_ERROR);
- }
- return mapList;
- }
-}
-
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/os/ThreadCPU.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/os/ThreadCPU.java
deleted file mode 100644
index fc169e34..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/os/ThreadCPU.java
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.os;
-
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.apache.logging.log4j.message.ParameterizedMessage;
-import org.apache.logging.log4j.util.Supplier;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator.linux.LinuxCPUPagingActivityGenerator;
-
-public final class ThreadCPU {
- private static final Logger LOGGER = LogManager.getLogger(ThreadCPU.class);
- public static final ThreadCPU INSTANCE = new ThreadCPU();
- private long scClkTck = 0;
- private String pid = null;
- private List tids = null;
- private Map> tidKVMap = new HashMap<>();
- private Map> oldtidKVMap = new HashMap<>();
- private long kvTimestamp = 0;
- private long oldkvTimestamp = 0;
- private LinuxCPUPagingActivityGenerator cpuPagingActivityMap = new LinuxCPUPagingActivityGenerator();
-
- // these two arrays map 1-1
- private static String[] statKeys = {
- "pid",
- "comm",
- "state",
- "ppid",
- "pgrp",
- "session",
- "ttynr",
- "tpgid",
- "flags",
- "minflt",
- "cminflt",
- "majflt",
- "cmajflt",
- "utime",
- "stime",
- "cutime",
- "cstime",
- "prio",
- "nice",
- "nthreads",
- "itrealvalue",
- "starttime",
- "vsize",
- "rss",
- "rsslim",
- "startcode",
- "endcode",
- "startstack",
- "kstkesp",
- "kstkeip",
- "signal",
- "blocked",
- "sigignore",
- "sigcatch",
- "wchan",
- "nswap",
- "cnswap",
- "exitsig",
- "cpu",
- "rtprio",
- "schedpolicy",
- "bio_ticks",
- "vmtime",
- "cvmtime"
- // more that we ignore
- };
-
- private static SchemaFileParser.FieldTypes[] statTypes = {
- SchemaFileParser.FieldTypes.INT,
- SchemaFileParser.FieldTypes.STRING,
- SchemaFileParser.FieldTypes.CHAR,
- SchemaFileParser.FieldTypes.INT,
- SchemaFileParser.FieldTypes.INT,
- SchemaFileParser.FieldTypes.INT,
- SchemaFileParser.FieldTypes.INT,
- SchemaFileParser.FieldTypes.INT,
- SchemaFileParser.FieldTypes.INT,
- SchemaFileParser.FieldTypes.ULONG, //10
- SchemaFileParser.FieldTypes.ULONG,
- SchemaFileParser.FieldTypes.ULONG,
- SchemaFileParser.FieldTypes.ULONG,
- SchemaFileParser.FieldTypes.ULONG,
- SchemaFileParser.FieldTypes.ULONG,
- SchemaFileParser.FieldTypes.ULONG,
- SchemaFileParser.FieldTypes.ULONG,
- SchemaFileParser.FieldTypes.ULONG,
- SchemaFileParser.FieldTypes.ULONG,
- SchemaFileParser.FieldTypes.ULONG, //20
- SchemaFileParser.FieldTypes.ULONG,
- SchemaFileParser.FieldTypes.ULONG,
- SchemaFileParser.FieldTypes.ULONG,
- SchemaFileParser.FieldTypes.ULONG,
- SchemaFileParser.FieldTypes.ULONG,
- SchemaFileParser.FieldTypes.ULONG,
- SchemaFileParser.FieldTypes.ULONG,
- SchemaFileParser.FieldTypes.ULONG,
- SchemaFileParser.FieldTypes.ULONG,
- SchemaFileParser.FieldTypes.ULONG, //30
- SchemaFileParser.FieldTypes.ULONG,
- SchemaFileParser.FieldTypes.ULONG,
- SchemaFileParser.FieldTypes.ULONG,
- SchemaFileParser.FieldTypes.ULONG,
- SchemaFileParser.FieldTypes.ULONG,
- SchemaFileParser.FieldTypes.ULONG,
- SchemaFileParser.FieldTypes.ULONG,
- SchemaFileParser.FieldTypes.INT,
- SchemaFileParser.FieldTypes.INT,
- SchemaFileParser.FieldTypes.INT, //40
- SchemaFileParser.FieldTypes.INT,
- SchemaFileParser.FieldTypes.INT,
- SchemaFileParser.FieldTypes.INT,
- SchemaFileParser.FieldTypes.INT
- };
-
- private ThreadCPU() {
- try {
- pid = OSGlobals.getPid();
- scClkTck = OSGlobals.getScClkTck();
- tids = OSGlobals.getTids();
- } catch (Exception e) {
- LOGGER.error(
- (Supplier>) () -> new ParameterizedMessage(
- "Error In Initializing ThreadCPU: {}",
- e.toString()),
- e);
- }
- }
-
- public synchronized void addSample() {
- tids = OSGlobals.getTids();
-
- oldtidKVMap.clear();
- oldtidKVMap.putAll(tidKVMap);
-
- tidKVMap.clear();
- oldkvTimestamp = kvTimestamp;
- kvTimestamp = System.currentTimeMillis();
- for (String tid : tids) {
- Map sample =
- //(new SchemaFileParser("/proc/"+tid+"/stat",
- (new SchemaFileParser("/proc/" + pid + "/task/" + tid + "/stat", statKeys, statTypes, true)).parse();
- tidKVMap.put(tid, sample);
- }
-
- calculateCPUDetails();
- calculatePagingActivity();
- }
-
- private void calculateCPUDetails() {
- if (oldkvTimestamp == kvTimestamp) {
- return;
- }
-
- for (String tid : tidKVMap.keySet()) {
- Map v = tidKVMap.get(tid);
- Map oldv = oldtidKVMap.get(tid);
- if (v != null && oldv != null) {
- if (!v.containsKey("utime") || !oldv.containsKey("utime")) {
- continue;
- }
- long diff = ((long) (v.getOrDefault("utime", 0L)) - (long) (oldv.getOrDefault("utime", 0L)))
- + ((long) (v.getOrDefault("stime", 0L)) - (long) (oldv.getOrDefault("stime", 0L)));
- double util = (1.0e3 * diff / scClkTck) / (kvTimestamp - oldkvTimestamp);
- cpuPagingActivityMap.setCPUUtilization(tid, util);
- }
- }
- }
-
- /**
- * Note: major faults include mmap()'ed accesses
- *
- */
- private void calculatePagingActivity() {
- if (oldkvTimestamp == kvTimestamp) {
- return;
- }
-
-
- for (String tid : tidKVMap.keySet()) {
- Map v = tidKVMap.get(tid);
- Map oldv = oldtidKVMap.get(tid);
- if (v != null && oldv != null) {
- if (!v.containsKey("majflt") || !oldv.containsKey("majflt")) {
- continue;
- }
- double majdiff = ((long) (v.getOrDefault("majflt", 0L)) - (long) (oldv.getOrDefault("majflt", 0L)));
- majdiff /= 1.0e-3 * (kvTimestamp - oldkvTimestamp);
- double mindiff = ((long) (v.getOrDefault("minflt", 0L)) - (long) (oldv.getOrDefault("minflt", 0L)));
- mindiff /= 1.0e-3 * (kvTimestamp - oldkvTimestamp);
-
- Double[] fltarr = {majdiff, mindiff, (double) ((long) v.getOrDefault("rss", 0L))};
- cpuPagingActivityMap.setPagingActivities(tid, fltarr);
- }
- }
- }
-
- public LinuxCPUPagingActivityGenerator getCPUPagingActivity() {
-
- return cpuPagingActivityMap;
- }
-}
-
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/os/ThreadDiskIO.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/os/ThreadDiskIO.java
deleted file mode 100644
index 5915b6fd..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/os/ThreadDiskIO.java
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.os;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileReader;
-import java.io.FileNotFoundException;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator.linux.LinuxDiskIOMetricsGenerator;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.StatsCollector;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.collectors.StatExceptionCode;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-
-public class ThreadDiskIO {
- private static String pid = OSGlobals.getPid();
- private static List tids = null;
- private static final Logger LOGGER = LogManager.getLogger(ThreadDiskIO.class);
-
- private static Map> tidKVMap = new HashMap<>();
- private static Map> oldtidKVMap = new HashMap<>();
- private static long kvTimestamp = 0;
- private static long oldkvTimestamp = 0;
-
- public static class IOMetrics {
- public double avgReadThroughputBps;
- public double avgWriteThroughputBps;
- public double avgTotalThroughputBps;
-
- public double avgReadSyscallRate;
- public double avgWriteSyscallRate;
- public double avgTotalSyscallRate;
-
- public double avgPageCacheReadThroughputBps;
- public double avgPageCacheWriteThroughputBps;
- public double avgPageCacheTotalThroughputBps;
-
- @SuppressWarnings("checkstyle:parameternumber")
- IOMetrics(double avgReadThroughputBps,
- double avgReadSyscallRate,
- double avgWriteThroughputBps,
- double avgWriteSyscallRate,
- double avgTotalThroughputBps,
- double avgTotalSyscallRate,
- double avgPageCacheReadThroughputBps,
- double avgPageCacheWriteThroughputBps,
- double avgPageCacheTotalThroughputBps) {
- this.avgReadThroughputBps = avgReadThroughputBps;
- this.avgWriteThroughputBps = avgWriteThroughputBps;
- this.avgTotalThroughputBps = avgTotalThroughputBps;
- this.avgReadSyscallRate = avgReadSyscallRate;
- this.avgWriteSyscallRate = avgWriteSyscallRate;
- this.avgTotalSyscallRate = avgTotalSyscallRate;
- this.avgPageCacheReadThroughputBps = avgPageCacheReadThroughputBps;
- this.avgPageCacheWriteThroughputBps = avgPageCacheWriteThroughputBps;
- this.avgPageCacheTotalThroughputBps = avgPageCacheTotalThroughputBps;
- }
- public String toString() {
- return new StringBuilder().append("rBps:").append(avgReadThroughputBps)
- .append(" wBps:").append(avgWriteThroughputBps)
- .append(" totBps:").append(avgTotalThroughputBps)
- .append(" rSysc:").append(avgReadSyscallRate)
- .append(" wSysc:").append(avgWriteSyscallRate)
- .append(" totSysc:").append(avgTotalSyscallRate)
- .append(" rPcBps:").append(avgPageCacheReadThroughputBps)
- .append(" wPcBps:").append(avgPageCacheWriteThroughputBps)
- .append(" totPcBps:").append(avgPageCacheTotalThroughputBps).toString();
- }
- }
-
-
- private static void addSampleTid(String tid) {
- try (FileReader fileReader = new FileReader(new File("/proc/" + pid + "/task/" + tid + "/io"));
- BufferedReader bufferedReader = new BufferedReader(fileReader);) {
- String line = null;
- Map kvmap = new HashMap<>();
- while ((line = bufferedReader.readLine()) != null) {
- String[] toks = line.split("[: ]+");
- String key = toks[0];
- long val = Long.parseLong(toks[1]);
- kvmap.put(key, val);
- }
- tidKVMap.put(tid, kvmap);
- } catch (FileNotFoundException e) {
- LOGGER.debug("FileNotFound in parse with exception: {}", () -> e.toString());
- } catch (Exception e) {
- LOGGER.debug("Error In addSample Tid for: {} with error: {} with ExceptionCode: {}",
- () -> tid, () -> e.toString(), () -> StatExceptionCode.THREAD_IO_ERROR.toString());
- StatsCollector.instance().logException(StatExceptionCode.THREAD_IO_ERROR);
- }
- }
-
- public static synchronized void addSample() {
- tids = OSGlobals.getTids();
- oldtidKVMap.clear();
- oldtidKVMap.putAll(tidKVMap);
-
- tidKVMap.clear();
- oldkvTimestamp = kvTimestamp;
- kvTimestamp = System.currentTimeMillis();
- for (String tid : tids) {
- addSampleTid(tid);
- }
- }
-
- public static synchronized LinuxDiskIOMetricsGenerator getIOUtilization() {
-
- LinuxDiskIOMetricsGenerator linuxDiskIOMetricsHandler = new LinuxDiskIOMetricsGenerator();
- if (oldkvTimestamp == kvTimestamp) {
- return linuxDiskIOMetricsHandler;
- }
-
- for (String tid : tidKVMap.keySet()) {
- Map v = tidKVMap.get(tid);
- Map oldv = oldtidKVMap.get(tid);
- if (v != null && oldv != null) {
- double duration = 1.0e-3 * (kvTimestamp - oldkvTimestamp);
- double readBytes = v.get("read_bytes") - oldv.get("read_bytes");
- double writeBytes = v.get("write_bytes") - oldv.get("write_bytes");
- double readSyscalls = v.get("syscr") - oldv.get("syscr");
- double writeSyscalls = v.get("syscw") - oldv.get("syscw");
- double readPcBytes = v.get("rchar") - oldv.get("rchar") - readBytes;
- double writePcBytes = v.get("wchar") - oldv.get("wchar") - writeBytes;
- readBytes /= duration;
- readSyscalls /= duration;
- writeBytes /= duration;
- writeSyscalls /= duration;
- readPcBytes /= duration;
- writePcBytes /= duration;
-
- linuxDiskIOMetricsHandler.setDiskIOMetrics(tid, new IOMetrics(readBytes, readSyscalls, writeBytes, writeSyscalls,
- readBytes+writeBytes, readSyscalls+writeSyscalls,
- readPcBytes, writePcBytes, readPcBytes+writePcBytes));
- }
- }
- return linuxDiskIOMetricsHandler;
- }
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/os/ThreadSched.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/os/ThreadSched.java
deleted file mode 100644
index ea8d5f0b..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/os/ThreadSched.java
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.os;
-
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.apache.logging.log4j.message.ParameterizedMessage;
-import org.apache.logging.log4j.util.Supplier;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator.SchedMetricsGenerator;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics_generator.linux.LinuxSchedMetricsGenerator;
-
-public final class ThreadSched {
- private static final Logger LOGGER = LogManager.getLogger(ThreadSched.class);
- public static final ThreadSched INSTANCE = new ThreadSched();
- private String pid = null;
- private List tids = null;
- private Map> tidKVMap = new HashMap<>();
- private Map> oldtidKVMap = new HashMap<>();
- private long kvTimestamp = 0;
- private long oldkvTimestamp = 0;
-
- public static class SchedMetrics {
- public final double avgRuntime;
- public final double avgWaittime;
- public final double contextSwitchRate; //both voluntary and involuntary
- SchedMetrics(double avgRuntime, double avgWaittime, double contextSwitchRate) {
- this.avgRuntime = avgRuntime;
- this.avgWaittime = avgWaittime;
- this.contextSwitchRate = contextSwitchRate;
- }
- @Override
- public String toString() {
- return new StringBuilder().append("avgruntime: ")
- .append(avgRuntime).append(" avgwaittime: ").append(avgWaittime)
- .append(" ctxrate: ").append(contextSwitchRate).toString();
- }
- }
-
- private LinuxSchedMetricsGenerator schedLatencyMap = new LinuxSchedMetricsGenerator();
-
- private static String[] schedKeys = {
- "runticks",
- "waitticks",
- "totctxsws"
- };
-
- private static SchemaFileParser.FieldTypes[] schedTypes = {
- SchemaFileParser.FieldTypes.ULONG,
- SchemaFileParser.FieldTypes.ULONG,
- SchemaFileParser.FieldTypes.ULONG
- };
-
- private ThreadSched() {
- try {
- pid = OSGlobals.getPid();
- tids = OSGlobals.getTids();
- } catch (Exception e) {
- LOGGER.error(
- (Supplier>) () -> new ParameterizedMessage(
- "Error In Initializing ThreadCPU: {}",
- e.toString()),
- e);
- }
- }
-
- public synchronized void addSample() {
- tids = OSGlobals.getTids();
-
- oldtidKVMap.clear();
- oldtidKVMap.putAll(tidKVMap);
-
- tidKVMap.clear();
- oldkvTimestamp = kvTimestamp;
- kvTimestamp = System.currentTimeMillis();
- for (String tid : tids) {
- Map sample =
- (new SchemaFileParser("/proc/" + pid + "/task/" + tid + "/schedstat",
- schedKeys, schedTypes)).parse();
- tidKVMap.put(tid, sample);
- }
-
- calculateSchedLatency();
- }
-
- private void calculateSchedLatency() {
- if (oldkvTimestamp == kvTimestamp) {
- return;
- }
-
- for (String tid : tidKVMap.keySet()) {
- Map v = tidKVMap.get(tid);
- Map oldv = oldtidKVMap.get(tid);
- if (v != null && oldv != null) {
- if (!v.containsKey("totctxsws") || !oldv.containsKey("totctxsws")) {
- continue;
- }
- long ctxdiff = (long) v.getOrDefault("totctxsws", 0L) - (long) oldv.getOrDefault("totctxsws", 0L);
- double avgRuntime = 1.0e-9 * ((long) v.getOrDefault("runticks", 0L) - (long) oldv.getOrDefault("runticks", 0L));
- double avgWaittime = 1.0e-9 * ((long) v.getOrDefault("waitticks", 0L) - (long) oldv.getOrDefault("waitticks", 0L));
- if (ctxdiff == 0) {
- avgRuntime = 0;
- avgWaittime = 0;
- } else {
- avgRuntime /= 1.0 * ctxdiff;
- avgWaittime /= 1.0 * ctxdiff;
- }
- double contextSwitchRate = ctxdiff;
- contextSwitchRate /= 1.0e-3 * (kvTimestamp - oldkvTimestamp);
-
- schedLatencyMap.setSchedMetric(tid, new SchedMetrics(avgRuntime, avgWaittime, contextSwitchRate));
- }
- }
- }
-
- public synchronized SchedMetricsGenerator getSchedLatency() {
-
- return schedLatencyMap;
- }
-}
-
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/reader/ClusterLevelMetricsReader.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/reader/ClusterLevelMetricsReader.java
deleted file mode 100644
index 0564d42f..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/reader/ClusterLevelMetricsReader.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.reader;
-
-import java.util.Map;
-
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.NodeDetailColumns;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.PerformanceAnalyzerMetrics;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.util.JsonConverter;
-
-public class ClusterLevelMetricsReader {
-
- /**
- * Almost the same as NodeDetailsCollector.NodeDetailsStatus.
- * Consider keeping only one of them for easy maintenance. Don't do it now
- * as we may separate reader and writer code later and we don't want many
- * refactoring before release.
- *
- */
- public static class NodeDetails {
- private String id;
- private String hostAddress;
-
- NodeDetails(String stringifiedMetrics) {
- Map map = JsonConverter
- .createMapFrom(stringifiedMetrics);
- id = (String) map.get(NodeDetailColumns.ID.toString());
- hostAddress = (String) map.get(NodeDetailColumns.HOST_ADDRESS
- .toString());
- }
-
- @Override
- public String toString() {
- StringBuilder stringBuilder = new StringBuilder();
- stringBuilder.append("{");
- stringBuilder.append("id:" + id);
- stringBuilder.append(" hostAddress:" + hostAddress);
- stringBuilder.append("}");
- return stringBuilder.toString();
- }
-
- public String getId() {
- return id;
- }
-
- public String getHostAddress() {
- return hostAddress;
- }
- }
-
- private static int sPollTimeInterval = 60000;
- private static final Logger LOG = LogManager.getLogger(ClusterLevelMetricsReader.class);
- private static int sBuckets = 60;
-
- private static NodeDetails[] nodesDetails = new NodeDetails[0];
-
- public static NodeDetails[] getNodes() {
- return nodesDetails.clone();
- }
-
- public static void collectNodeMetrics(long startTime) throws Exception {
- String sNodesDetails = PerformanceAnalyzerMetrics.getMetric(startTime, PerformanceAnalyzerMetrics.sNodesPath);
-
- if(sNodesDetails != null) {
- String lines[] = sNodesDetails.split("\\r?\\n");
-
-
- if(lines.length < 2) {
- LOG.error("Skip parsing. Number of lines: {}.", lines.length);
- return;
- }
-
- NodeDetails[] tmpNodesDetails = new NodeDetails[lines.length-1];
-
- // line 0 is last modified time of the file
-
- tmpNodesDetails[0] = new NodeDetails(lines[1]);
- int tmpNodeDetailsIndex = 1;
-
- for(int i = 2; i < lines.length; i++) {
- NodeDetails tmp = new NodeDetails(lines[i]);
-
- if(!tmp.id.equals(tmpNodesDetails[0].id)) {
- tmpNodesDetails[tmpNodeDetailsIndex++] = tmp;
- }
- }
-
- nodesDetails = tmpNodesDetails;
- }
- }
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/reader/FileHandler.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/reader/FileHandler.java
deleted file mode 100644
index 5fa0c97c..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/reader/FileHandler.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.reader;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.List;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.PerformanceAnalyzerMetrics;
-import com.google.common.annotations.VisibleForTesting;
-
-public abstract class FileHandler {
- private static final String[] EMPTY_STRING_ARRAY = new String[0];
-
- private String rootLocation;
-
- // find all relevant files for a metric
- public abstract List findFiles4Metric(long timeBucket);
-
- FileHandler() {
- this.rootLocation = PerformanceAnalyzerMetrics.sDevShmLocation;
- }
-
- public String[] processExtraDimensions(File file) throws IOException {
- if (filePathRegex().isEmpty()) {
- return EMPTY_STRING_ARRAY;
- }
-
- // Note the question mark in the 1st group is reluctant
- // quantifier.
- Pattern pattern = Pattern.compile(filePathRegex());
- // our regex uses '/' as file separator
- Matcher matcher = pattern.matcher(file.getCanonicalPath());
- if (matcher.find()) {
- int groupCount = matcher.groupCount();
- String[] extraDimensions = new String[groupCount];
- // group 0 is the entire match
- for (int i = 1; i <= groupCount; i++) {
- extraDimensions[i-1] = matcher.group(i);
- }
- return extraDimensions;
- }
- throw new IOException(String.format(
- "Cannot find a matching path %s", file.getCanonicalPath()));
-
- }
-
- // override this method if we need to extra dimensions from the file
- // path
- protected String filePathRegex() {
- return "";
- }
-
- public String getRootLocation() {
- return rootLocation;
- }
-
- @VisibleForTesting
- void setRootLocation(String location) {
- rootLocation = location;
- }
-}
-
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/reader/HttpRequestMetricsSnapshot.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/reader/HttpRequestMetricsSnapshot.java
deleted file mode 100644
index acf0d22f..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/reader/HttpRequestMetricsSnapshot.java
+++ /dev/null
@@ -1,328 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.reader;
-
-import java.sql.Connection;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.jooq.BatchBindStep;
-import org.jooq.DSLContext;
-import org.jooq.Field;
-import org.jooq.Record;
-import org.jooq.Result;
-import org.jooq.SQLDialect;
-import org.jooq.SelectField;
-import org.jooq.SelectHavingStep;
-import org.jooq.impl.DSL;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.DBUtils;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.CommonDimension;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.HttpDimension;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.HttpMetric;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metricsdb.MetricsDB;
-
-/**
- * Snapshot of start/end events generated by customer initiated http operations like bulk and search.
- */
-@SuppressWarnings("serial")
-public class HttpRequestMetricsSnapshot implements Removable {
- private static final Logger LOG = LogManager.getLogger(HttpRequestMetricsSnapshot.class);
- private static final Long EXPIRE_AFTER = 600000L;
- private final DSLContext create;
- private final Long windowStartTime;
- private final String tableName;
- private List columns;
-
- public enum Fields {
- RID("rid"),
- OPERATION(CommonDimension.OPERATION.toString()),
- INDICES(HttpDimension.INDICES.toString()),
- HTTP_RESP_CODE(HttpDimension.HTTP_RESP_CODE.toString()),
- EXCEPTION(CommonDimension.EXCEPTION.toString()),
- HTTP_REQUEST_DOCS(HttpMetric.HTTP_REQUEST_DOCS.toString()),
- ST("st"),
- ET("et"),
- LAT("lat"),
- HTTP_TOTAL_REQUESTS(HttpMetric.HTTP_TOTAL_REQUESTS.toString());
-
- private final String fieldValue;
-
- Fields(String fieldValue) {
- this.fieldValue = fieldValue;
- }
-
- @Override
- public String toString() {
- return fieldValue;
- }
- }
-
- public HttpRequestMetricsSnapshot(Connection conn, Long windowStartTime) throws Exception {
- this.create = DSL.using(conn, SQLDialect.SQLITE);
- this.windowStartTime = windowStartTime;
- this.tableName = "http_rq_" + windowStartTime;
-
- this.columns = new ArrayList() { {
- this.add(Fields.RID.toString());
- this.add(Fields.OPERATION.toString());
- this.add(Fields.INDICES.toString());
- this.add(Fields.HTTP_RESP_CODE.toString());
- this.add(Fields.EXCEPTION.toString());
- this.add(Fields.HTTP_REQUEST_DOCS.toString());
- this.add(Fields.ST.toString());
- this.add(Fields.ET.toString());
- } };
-
- List> fields = new ArrayList>() { {
- this.add(DSL.field(DSL.name(Fields.RID.toString()), String.class));
- this.add(DSL.field(DSL.name(Fields.OPERATION.toString()), String.class));
- this.add(DSL.field(DSL.name(Fields.INDICES.toString()), String.class));
- this.add(DSL.field(DSL.name(Fields.HTTP_RESP_CODE.toString()), String.class));
- this.add(DSL.field(DSL.name(Fields.EXCEPTION.toString()), String.class));
- this.add(DSL.field(DSL.name(Fields.HTTP_REQUEST_DOCS.toString()), Long.class));
- this.add(DSL.field(DSL.name(Fields.ST.toString()), Long.class));
- this.add(DSL.field(DSL.name(Fields.ET.toString()), Long.class));
- } };
-
- create.createTable(this.tableName)
- .columns(fields)
- .execute();
- }
-
- public void putStartMetric(Long startTime, Long itemCount, Map dimensions) {
- Map, String> dimensionMap = new HashMap<>();
- for (Map.Entry dimension: dimensions.entrySet()) {
- dimensionMap.put(DSL.field(DSL.name(dimension.getKey()), String.class),
- dimension.getValue());
- }
- create.insertInto(DSL.table(this.tableName))
- .set(DSL.field(DSL.name(Fields.ST.toString()), Long.class), startTime)
- .set(DSL.field(DSL.name(Fields.HTTP_REQUEST_DOCS.toString()), Long.class), itemCount)
- .set(dimensionMap)
- .execute();
- }
-
- public BatchBindStep startBatchPut() {
- List dummyValues = new ArrayList<>();
- for (int i = 0; i < columns.size(); i++) {
- dummyValues.add(null);
- }
- return create.batch(create.insertInto(DSL.table(this.tableName)).values(dummyValues));
- }
-
- public void putEndMetric(Long endTime, Map dimensions) {
- Map, String> dimensionMap = new HashMap<>();
- for (Map.Entry dimension: dimensions.entrySet()) {
- dimensionMap.put(DSL.field(
- DSL.name(dimension.getKey()), String.class),
- dimension.getValue());
- }
- create.insertInto(DSL.table(this.tableName))
- .set(DSL.field(DSL.name(Fields.ET.toString()), Long.class), endTime)
- .set(dimensionMap)
- .execute();
- }
-
- public Result fetchAll() {
- return create.select().from(DSL.table(this.tableName)).fetch();
- }
-
- /**
- * This function returns a single row for each request.
- * We have a start and end event for each request and each event has different attributes.
- * This function aggregates all the data into a single row.
- *
- * Actual Table -
- * |rid |operation|indices |status|exception|itemCount| st| et|
- * +-------+---------+--------+------+---------+---------+-------------+-------------+
- * |1417935|search | |{null}|{null} | 0|1535065254939| {null}|
- * |1418424|search |{null} |200 | | {null}| {null}|1535065341025|
- * |1418424|search |sonested|{null}|{null} | 0|1535065340730| {null}|
- * |1418435|search |{null} |200 | | {null}| {null}|1535065343355|
- *
- * Returned Table
- * |rid |operation|indices |status|exception|itemCount| st| et|
- * +-------+---------+--------+------+---------+---------+-------------+-------------+
- * |1418424|search |sonested|200 | | 0|1535065340730|1535065341025|
- * |1418435|search | |200 | | 0|1535065254939|1535065343355|
- *
- * @return a single row for each http request
- */
- public SelectHavingStep groupByRidSelect() {
- ArrayList> fields = new ArrayList>() { {
- this.add(DSL.field(DSL.name(Fields.RID.toString()), String.class));
- this.add(DSL.field(DSL.name(Fields.OPERATION.toString()), String.class));
- } };
- fields.add(DSL.max(DSL.field(Fields.ST.toString(), Long.class)).as(DSL.name(Fields.ST.toString())));
- fields.add(DSL.max(DSL.field(Fields.ET.toString(), Long.class)).as(DSL.name(Fields.ET.toString())));
- fields.add(DSL.max(DSL.field(Fields.INDICES.toString())).as(DSL.name(Fields.INDICES.toString())));
- fields.add(DSL.max(DSL.field(Fields.HTTP_RESP_CODE.toString())).as(DSL.name(Fields.HTTP_RESP_CODE.toString())));
- fields.add(DSL.max(DSL.field(Fields.EXCEPTION.toString())).as(DSL.name(Fields.EXCEPTION.toString())));
- fields.add(DSL.max(DSL.field(Fields.HTTP_REQUEST_DOCS.toString())).as(DSL.name(Fields.HTTP_REQUEST_DOCS.toString())));
-
- return create.select(fields).from(DSL.table(this.tableName))
- .groupBy(DSL.field(Fields.RID.toString()));
- }
-
- /**
- * This function returns row with latency for each request.
- * We have a start and end event for each request and each event has different attributes.
- * This function aggregates all the data into a single row.
- *
- * Actual Table -
- * |rid |operation|indices |status|exception|itemCount| st| et|
- * +-------+---------+--------+------+---------+---------+-------------+-------------+
- * |1417935|search | |{null}|{null} | 0|1535065254939| {null}|
- * |1418424|search |{null} |200 | | {null}| {null}|1535065341025|
- * |1418424|search |sonested|{null}|{null} | 0|1535065340730| {null}|
- * |1418435|search |{null} |200 | | {null}| {null}|1535065343355|
- *
- * Returned Table
- * |rid |operation|indices |status|exception|itemCount| st| et| lat|
- * +-------+---------+--------+------+---------+---------+-------------+-------------+-----+
- * |1418424|search |sonested|200 | | 0|1535065340730|1535065341025| 295|
- * |1418435|search | |200 | | 0|1535065254939|1535065343355|88416|
- *
- * @return rows with latency for each request
- */
- public SelectHavingStep fetchLatencyTable() {
- ArrayList> fields = new ArrayList>() { {
- this.add(DSL.field(DSL.name(Fields.RID.toString()), String.class));
- this.add(DSL.field(DSL.name(Fields.OPERATION.toString()), String.class));
- this.add(DSL.field(Fields.ST.toString(), Long.class));
- this.add(DSL.field(Fields.ET.toString(), Long.class));
- this.add(DSL.field(Fields.HTTP_RESP_CODE.toString()));
- this.add(DSL.field(Fields.INDICES.toString()));
- this.add(DSL.field(Fields.EXCEPTION.toString()));
- this.add(DSL.field(Fields.HTTP_REQUEST_DOCS.toString()));
- } };
- fields.add(DSL.field(Fields.ET.toString()).minus(DSL.field(Fields.ST.toString())).as(DSL.name(Fields.LAT.toString())));
- return create.select(fields).from(groupByRidSelect())
- .where(DSL.field(Fields.ET.toString()).isNotNull().and(
- DSL.field(Fields.ST.toString()).isNotNull()));
- }
-
- /**
- * This function aggregates rows by operation.
- * This is a performance optimization to avoid writing one entry per request back into metricsDB.
- * This function returns one row per operation.
- *
- * Latency Table -
- * |rid |operation|indices |status|exception|itemCount| st| et|lat|
- * +-------+---------+--------+------+---------+---------+-------------+-------------+-----+
- * |1418424|search |sonested|200 | | 0|1535065340730|1535065341025|295|
- * |1418435|search |sonested|200 | | 0|1535065254939|1535065343355|305|
- *
- * Returned Table -
- * |operation|indices |status|exception|sum_lat|avg_lat|min_lat|max_lat|
- * +---------+--------+------+---------+---------+-------------+-------+
- * |search |sonested|200 | | 600| 300| 295| 305|
- *
- * @return latency rows by operation
- */
- public Result fetchLatencyByOp() {
- ArrayList> fields = new ArrayList>() { {
- this.add(DSL.field(DSL.name(Fields.OPERATION.toString()), String.class));
- this.add(DSL.field(DSL.name(Fields.HTTP_RESP_CODE.toString()), String.class));
- this.add(DSL.field(DSL.name(Fields.INDICES.toString()), String.class));
- this.add(DSL.field(DSL.name(Fields.EXCEPTION.toString()), String.class));
- this.add(DSL.sum(DSL.field(DSL.name(Fields.HTTP_REQUEST_DOCS.toString()), Long.class))
- .as(DBUtils.getAggFieldName(Fields.HTTP_REQUEST_DOCS.toString(), MetricsDB.SUM)));
- this.add(DSL.avg(DSL.field(DSL.name(Fields.HTTP_REQUEST_DOCS.toString()), Long.class))
- .as(DBUtils.getAggFieldName(Fields.HTTP_REQUEST_DOCS.toString(), MetricsDB.AVG)));
- this.add(DSL.min(DSL.field(DSL.name(Fields.HTTP_REQUEST_DOCS.toString()), Long.class))
- .as(DBUtils.getAggFieldName(Fields.HTTP_REQUEST_DOCS.toString(), MetricsDB.MIN)));
- this.add(DSL.max(DSL.field(DSL.name(Fields.HTTP_REQUEST_DOCS.toString()), Long.class))
- .as(DBUtils.getAggFieldName(Fields.HTTP_REQUEST_DOCS.toString(), MetricsDB.MAX)));
- this.add(DSL.sum(DSL.field(DSL.name(Fields.LAT.toString()), Double.class))
- .as(DBUtils.getAggFieldName(Fields.LAT.toString(), MetricsDB.SUM)));
- this.add(DSL.avg(DSL.field(DSL.name(Fields.LAT.toString()), Double.class))
- .as(DBUtils.getAggFieldName(Fields.LAT.toString(), MetricsDB.AVG)));
- this.add(DSL.min(DSL.field(DSL.name(Fields.LAT.toString()), Double.class))
- .as(DBUtils.getAggFieldName(Fields.LAT.toString(), MetricsDB.MIN)));
- this.add(DSL.max(DSL.field(DSL.name(Fields.LAT.toString()), Double.class))
- .as(DBUtils.getAggFieldName(Fields.LAT.toString(), MetricsDB.MAX)));
- this.add(DSL.count().as(Fields.HTTP_TOTAL_REQUESTS.toString()));
- } };
- ArrayList> groupByFields = new ArrayList>() { {
- this.add(DSL.field(DSL.name(Fields.OPERATION.toString()), String.class));
- this.add(DSL.field(DSL.name(Fields.HTTP_RESP_CODE.toString()), String.class));
- this.add(DSL.field(DSL.name(Fields.INDICES.toString()), String.class));
- this.add(DSL.field(DSL.name(Fields.EXCEPTION.toString()), String.class));
- } };
-
- return create.select(fields).from(fetchLatencyTable())
- .groupBy(groupByFields).fetch();
- }
-
- /**
- * This function returns requests with a missing end event.
- * A request maybe long running and the end event might not have occured in this snapshot.
- *
- * Actual Table -
- * |rid |operation|indices |status|exception|itemCount| st| et|
- * +-------+---------+--------+------+---------+---------+-------------+-------------+
- * |1417935|search | |{null}|{null} | 0|1535065254939| {null}|
- * |1418424|search |sonested|{null}|{null} | 0|1535065340730| {null}|
- * |1418435|search |{null} |200 | | {null}| {null}|1535065343355|
- *
- * Returned Table
- * |rid |operation|indices |status|exception|itemCount| st| et|
- * +-------+---------+--------+------+---------+---------+-------------+-------------+
- * |1418424|search |sonested|200 | | 0|1535065340730| |
- *
- * @return rows missing an end event
- */
- public SelectHavingStep fetchInflightRequests() {
- ArrayList> fields = new ArrayList>() { {
- this.add(DSL.field(DSL.name(Fields.RID.toString()), String.class));
- this.add(DSL.field(DSL.name(Fields.OPERATION.toString()), String.class));
- this.add(DSL.field(DSL.name(Fields.INDICES.toString()), String.class));
- this.add(DSL.field(DSL.name(Fields.HTTP_RESP_CODE.toString()), String.class));
- this.add(DSL.field(DSL.name(Fields.EXCEPTION.toString()), String.class));
- this.add(DSL.field(DSL.name(Fields.HTTP_REQUEST_DOCS.toString()), Long.class));
- this.add(DSL.field(Fields.ST.toString(), Long.class));
- this.add(DSL.field(Fields.ET.toString(), Long.class));
- } };
-
- return create.select(fields).from(groupByRidSelect())
- .where(DSL.field(Fields.ST.toString()).isNotNull()
- .and(DSL.field(Fields.ET.toString()).isNull())
- .and(DSL.field(Fields.ST.toString()).gt(this.windowStartTime - EXPIRE_AFTER)));
- }
-
- public String getTableName() {
- return this.tableName;
- }
-
- @Override
- public void remove() {
- LOG.info("Dropping table - {}", this.tableName);
- create.dropTable(DSL.table(this.tableName)).execute();
- }
-
- public void rolloverInflightRequests(HttpRequestMetricsSnapshot prevSnap) {
- //Fetch all entries that have not ended and write to current table.
- create.insertInto(DSL.table(this.tableName)).select(
- create.select().from(prevSnap.fetchInflightRequests())).execute();
- }
-}
-
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/reader/MasterEventMetricsSnapshot.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/reader/MasterEventMetricsSnapshot.java
deleted file mode 100644
index 644fc637..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/reader/MasterEventMetricsSnapshot.java
+++ /dev/null
@@ -1,311 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.reader;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.DBUtils;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.MetricsConfiguration;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metricsdb.MetricsDB;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.jooq.BatchBindStep;
-import org.jooq.DSLContext;
-import org.jooq.Field;
-import org.jooq.Record;
-import org.jooq.Result;
-import org.jooq.SQLDialect;
-import org.jooq.SelectField;
-import org.jooq.SelectHavingStep;
-import org.jooq.impl.DSL;
-
-
-import java.sql.Connection;
-import java.util.ArrayList;
-import java.util.List;
-
-
-public class MasterEventMetricsSnapshot implements Removable {
- private static final Logger LOG = LogManager.getLogger(MasterEventMetricsSnapshot.class);
-
- private final DSLContext create;
- private final Long windowStartTime;
- private final String tableName;
- private static final Long EXPIRE_AFTER = 1200000L;
- private List> columns;
-
-
- public enum Fields {
-
- TID("tid"),
- IS_CURRENT("isCurrent"),
- OLD_START("oldStart"),
- ST("st"),
- ET("et"),
- LAT ("lat");
-
- private final String fieldValue;
-
- Fields(String fieldValue) {
- this.fieldValue = fieldValue;
- }
-
- @Override
- public String toString() {
- return fieldValue;
- }
- };
-
- public MasterEventMetricsSnapshot(Connection conn, Long windowStartTime) {
- this.create = DSL.using(conn, SQLDialect.SQLITE);
- this.windowStartTime = windowStartTime;
- this.tableName = "master_event_" + windowStartTime;
-
- this.columns = new ArrayList>() { {
- this.add(DSL.field(DSL.name(Fields.TID.toString()), String.class));
- this.add(DSL.field(DSL.name(AllMetrics.MasterMetricDimensions.MASTER_TASK_INSERT_ORDER.toString()), String.class));
- this.add(DSL.field(DSL.name(AllMetrics.MasterMetricDimensions.MASTER_TASK_PRIORITY.toString()), String.class));
- this.add(DSL.field(DSL.name(AllMetrics.MasterMetricDimensions.MASTER_TASK_TYPE.toString()), String.class));
- this.add(DSL.field(DSL.name(AllMetrics.MasterMetricDimensions.MASTER_TASK_METADATA.toString()), String.class));
- this.add(DSL.field(DSL.name(AllMetrics.MasterMetricDimensions.MASTER_TASK_QUEUE_TIME.toString()), String.class));
- this.add(DSL.field(DSL.name(Fields.ST.toString()), Long.class));
- this.add(DSL.field(DSL.name(Fields.ET.toString()), Long.class));
- } };
-
- create.createTable(this.tableName)
- .columns(columns)
- .execute();
- }
-
-
- @Override
- public void remove() throws Exception {
-
- create.dropTable(DSL.table(this.tableName)).execute();
- }
-
- public void rolloverInflightRequests(MasterEventMetricsSnapshot prevSnap) {
- //Fetch all entries that have not ended and write to current table.
- create.insertInto(DSL.table(this.tableName)).select(prevSnap.fetchInflightRequests()).execute();
-
- LOG.debug("Inflight shard requests");
- LOG.debug(() -> fetchAll());
- }
-
- private SelectHavingStep fetchInflightRequests() {
-
- ArrayList> fields = new ArrayList>() { {
- this.add(DSL.field(DSL.name(Fields.TID.toString()), String.class));
- this.add(DSL.field(DSL.name(AllMetrics.MasterMetricDimensions.MASTER_TASK_INSERT_ORDER.toString()), String.class));
- this.add(DSL.field(DSL.name(AllMetrics.MasterMetricDimensions.MASTER_TASK_PRIORITY.toString()), String.class));
- this.add(DSL.field(DSL.name(AllMetrics.MasterMetricDimensions.MASTER_TASK_TYPE.toString()), String.class));
- this.add(DSL.field(DSL.name(AllMetrics.MasterMetricDimensions.MASTER_TASK_METADATA.toString()), String.class));
- this.add(DSL.field(DSL.name(AllMetrics.MasterMetricDimensions.MASTER_TASK_QUEUE_TIME.toString()), String.class));
- this.add(DSL.field(DSL.name(Fields.ST.toString()), Long.class));
- this.add(DSL.field(DSL.name(Fields.ET.toString()), Long.class));
- } };
-
- return create.select(fields).from(groupByInsertOrder())
- .where(DSL.field(Fields.ST.toString()).isNotNull()
- .and(DSL.field(Fields.ET.toString()).isNull())
- .and(DSL.field(Fields.ST.toString()).gt(this.windowStartTime - EXPIRE_AFTER)));
- }
-
-
- /**
- * Return all master task event in the current window.
- *
- * Actual Table
- * |tid |insertOrder|taskType |priority|queueTime|metadata| st| et|
- * +-----+-----------+------------+--------+---------+--------+-------------+-------------+
- * |111 |1 |create-index|urgent |3 |{string}|1535065340625| {null}|
- * |111 |2 |create-index|urgent |12 |{string}|1535065340825| {null}|
- * |111 |1 | {null}| {null}| {null}| {null}| {null}|1535065340725|
- *
- * @return aggregated master task
- */
- public Result fetchAll() {
-
- return create.select().from(DSL.table(this.tableName)).fetch();
- }
-
- public BatchBindStep startBatchPut() {
-
- List dummyValues = new ArrayList<>();
- for (int i = 0; i < columns.size(); i++) {
- dummyValues.add(null);
- }
- return create.batch(create.insertInto(DSL.table(this.tableName)).values(dummyValues));
- }
-
-
- /**
- * Return one row per master task event. Group by the InsertOrder.
- * It has 12 columns
- * |InsertOrder|Priority|Type|Metadata|SUM_QueueTime|AVG_QueueTime|MIN_QueueTime|MAX_QueueTime|
- * SUM_RUNTIME|AVG_RUNTIME|MIN_RUNTIME|MAX_RUNTIME|
- *
- * @return aggregated master task
- */
- public Result fetchQueueAndRunTime() {
-
- List> fields = new ArrayList>() { {
- this.add(DSL.field(DSL.name(AllMetrics.MasterMetricDimensions.MASTER_TASK_INSERT_ORDER.toString()), String.class));
- this.add(DSL.field(DSL.name(AllMetrics.MasterMetricDimensions.MASTER_TASK_PRIORITY.toString()), String.class));
- this.add(DSL.field(DSL.name(AllMetrics.MasterMetricDimensions.MASTER_TASK_TYPE.toString()), String.class));
- this.add(DSL.field(DSL.name(AllMetrics.MasterMetricDimensions.MASTER_TASK_METADATA.toString()), String.class));
-
- this.add(DSL.sum(DSL.field(DSL.name(AllMetrics.MasterMetricDimensions.MASTER_TASK_QUEUE_TIME.toString()), Double.class))
- .as(DBUtils.getAggFieldName(AllMetrics.MasterMetricDimensions.MASTER_TASK_QUEUE_TIME.toString(), MetricsDB.SUM)));
- this.add(DSL.avg(DSL.field(DSL.name(AllMetrics.MasterMetricDimensions.MASTER_TASK_QUEUE_TIME.toString()), Double.class))
- .as(DBUtils.getAggFieldName(AllMetrics.MasterMetricDimensions.MASTER_TASK_QUEUE_TIME.toString(), MetricsDB.AVG)));
- this.add(DSL.min(DSL.field(DSL.name(AllMetrics.MasterMetricDimensions.MASTER_TASK_QUEUE_TIME.toString()), Double.class))
- .as(DBUtils.getAggFieldName(AllMetrics.MasterMetricDimensions.MASTER_TASK_QUEUE_TIME.toString(), MetricsDB.MIN)));
- this.add(DSL.max(DSL.field(DSL.name(AllMetrics.MasterMetricDimensions.MASTER_TASK_QUEUE_TIME.toString()), Double.class))
- .as(DBUtils.getAggFieldName(AllMetrics.MasterMetricDimensions.MASTER_TASK_QUEUE_TIME.toString(), MetricsDB.MAX)));
-
- this.add(DSL.sum(DSL.field(DSL.name(AllMetrics.MasterMetricDimensions.MASTER_TASK_RUN_TIME.toString()), Double.class))
- .as(DBUtils.getAggFieldName(AllMetrics.MasterMetricDimensions.MASTER_TASK_RUN_TIME.toString(), MetricsDB.SUM)));
- this.add(DSL.avg(DSL.field(DSL.name(AllMetrics.MasterMetricDimensions.MASTER_TASK_RUN_TIME.toString()), Double.class))
- .as(DBUtils.getAggFieldName(AllMetrics.MasterMetricDimensions.MASTER_TASK_RUN_TIME.toString(), MetricsDB.AVG)));
- this.add(DSL.min(DSL.field(DSL.name(AllMetrics.MasterMetricDimensions.MASTER_TASK_RUN_TIME.toString()), Double.class))
- .as(DBUtils.getAggFieldName(AllMetrics.MasterMetricDimensions.MASTER_TASK_RUN_TIME.toString(), MetricsDB.MIN)));
- this.add(DSL.max(DSL.field(DSL.name(AllMetrics.MasterMetricDimensions.MASTER_TASK_RUN_TIME.toString()), Double.class))
- .as(DBUtils.getAggFieldName(AllMetrics.MasterMetricDimensions.MASTER_TASK_RUN_TIME.toString(), MetricsDB.MAX)));
- } };
-
- ArrayList> groupByFields = new ArrayList>() { {
- this.add(DSL.field(DSL.name(AllMetrics.MasterMetricDimensions.MASTER_TASK_INSERT_ORDER.toString()), String.class));
- } };
-
- return create.select(fields).from(fetchRunTimeHelper())
- .groupBy(groupByFields)
- .fetch();
- }
-
- private SelectHavingStep fetchRunTimeHelper() {
-
- List> fields = new ArrayList>() { {
- this.add(DSL.field(DSL.name(AllMetrics.MasterMetricDimensions.MASTER_TASK_INSERT_ORDER.toString()), String.class));
- this.add(DSL.field(DSL.name(AllMetrics.MasterMetricDimensions.MASTER_TASK_PRIORITY.toString()), String.class));
- this.add(DSL.field(DSL.name(AllMetrics.MasterMetricDimensions.MASTER_TASK_TYPE.toString()), String.class));
- this.add(DSL.field(DSL.name(AllMetrics.MasterMetricDimensions.MASTER_TASK_METADATA.toString()), String.class));
- this.add(DSL.field(DSL.name(AllMetrics.MasterMetricDimensions.MASTER_TASK_QUEUE_TIME.toString()), String.class));
- this.add(DSL.field(Fields.ET.toString()).minus(DSL.field(Fields.ST.toString())).
- as(DSL.name(AllMetrics.MasterMetricDimensions.MASTER_TASK_RUN_TIME.toString())));
- } };
-
- return create.select(fields).from(groupByInsertOrderAndAutoFillEndTime())
- .where(DSL.field(Fields.ET.toString()).isNotNull().and(
- DSL.field(Fields.ST.toString()).isNotNull()));
- }
-
-
- /**
- * Return one row per master task event. Group by the InsertOrder.
- * For a master task without a finish event, we will use the current window end time
- *
- * CurrentWindowEndTime: 1535065341025
- * Actual Table
- * |tid |insertOrder|taskType |priority|queueTime|metadata| st| et|
- * +-----+-----------+------------+--------+---------+--------+-------------+-------------+
- * |111 |1 |create-index|urgent |3 |{string}|1535065340625| {null}|
- * |111 |2 |create-index|urgent |12 |{string}|1535065340825| {null}|
- * |111 |1 | {null}| {null}| {null}| {null}| {null}|1535065340725|
- *
- * Returned:
- *
- * |tid |insertOrder|taskType |priority|queueTime|metadata| st| et|
- * +-----+-----------+------------+--------+---------+--------+-------------+-------------+
- * |111 |1 |create-index|urgent |3 |{string}|1535065340625|1535065340725|
- * |111 |2 |create-index|urgent |12 |{string}|1535065340825|1535065341025|
- *
- * @return aggregated master task
- */
- private SelectHavingStep groupByInsertOrderAndAutoFillEndTime() {
-
- Long endTime = windowStartTime + MetricsConfiguration.SAMPLING_INTERVAL;
- List> fields = getGroupByInsertOrderSelectFields();
- fields.add(DSL.least(DSL.coalesce(DSL.max(DSL.field(Fields.ET.toString(), Long.class)), endTime), endTime)
- .as(DSL.name(Fields.ET.toString())));
-
- ArrayList> groupByInsertOrder = new ArrayList>() { {
- this.add(DSL.field(DSL.name(AllMetrics.MasterMetricDimensions.MASTER_TASK_INSERT_ORDER.toString()), String.class));
- } };
-
- return create.select(fields)
- .from(DSL.table(this.tableName))
- .groupBy(groupByInsertOrder);
- }
-
- /**
- * Return one row per master task event. Group by the InsertOrder, with possible et remains as null
- *
- * Actual Table
- * |tid |insertOrder|taskType |priority|queueTime|metadata| st| et|
- * +-----+-----------+------------+--------+---------+--------+-------------+-------------+
- * |111 |1 |create-index|urgent |3 |{string}|1535065340625| {null}|
- * |111 |2 |create-index|urgent |12 |{string}|1535065340825| {null}|
- * |111 |1 | {null}| {null}| {null}| {null}| {null}|1535065340725|
- *
- * Returned:
- *
- * |tid |insertOrder|taskType |priority|queueTime|metadata| st| et|
- * +-----+-----------+------------+--------+---------+--------+-------------+-------------+
- * |111 |1 |create-index|urgent |3 |{string}|1535065340625|1535065340725|
- * |111 |2 |create-index|urgent |12 |{string}|1535065340825| {null}|
- *
- * @return aggregated latency rows for each shard request
- */
- private SelectHavingStep groupByInsertOrder() {
-
- ArrayList> fields = getGroupByInsertOrderSelectFields();
-
- fields.add(DSL.max(DSL.field(Fields.ET.toString(), Long.class)).as(DSL.name(Fields.ET.toString())));
- fields.add(DSL.field(DSL.name(Fields.TID.toString()), String.class));
-
- ArrayList> groupByInsertOrder = new ArrayList>() { {
- this.add(DSL.field(DSL.name(AllMetrics.MasterMetricDimensions.MASTER_TASK_INSERT_ORDER.toString()), String.class));
- } };
-
- return create.select(fields)
- .from(DSL.table(this.tableName))
- .groupBy(groupByInsertOrder);
- }
-
- private ArrayList> getGroupByInsertOrderSelectFields() {
-
- ArrayList> fields = new ArrayList>() { {
- this.add(DSL.field(DSL.name(AllMetrics.MasterMetricDimensions.MASTER_TASK_INSERT_ORDER.toString()), String.class));
-
- this.add(DSL.max(DSL.field(AllMetrics.MasterMetricDimensions.MASTER_TASK_TYPE.toString()))
- .as(DSL.name(AllMetrics.MasterMetricDimensions.MASTER_TASK_TYPE.toString())));
-
- this.add(DSL.max(DSL.field(AllMetrics.MasterMetricDimensions.MASTER_TASK_METADATA.toString()))
- .as(DSL.name(AllMetrics.MasterMetricDimensions.MASTER_TASK_METADATA.toString())));
-
- this.add(DSL.max(DSL.field(AllMetrics.MasterMetricDimensions.MASTER_TASK_QUEUE_TIME.toString()))
- .as(DSL.name(AllMetrics.MasterMetricDimensions.MASTER_TASK_QUEUE_TIME.toString())));
-
- this.add(DSL.max(DSL.field(AllMetrics.MasterMetricDimensions.MASTER_TASK_PRIORITY.toString()))
- .as(DSL.name(AllMetrics.MasterMetricDimensions.MASTER_TASK_PRIORITY.toString())));
-
- this.add(DSL.max(DSL.field(Fields.ST.toString(), Long.class)).as(DSL.name(Fields.ST.toString())));
-
- } };
-
- return fields;
- }
-}
diff --git a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/reader/MemoryDBSnapshot.java b/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/reader/MemoryDBSnapshot.java
deleted file mode 100644
index fce168db..00000000
--- a/src/main/java/com/amazon/opendistro/elasticsearch/performanceanalyzer/reader/MemoryDBSnapshot.java
+++ /dev/null
@@ -1,315 +0,0 @@
-/*
- * Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-package com.amazon.opendistro.elasticsearch.performanceanalyzer.reader;
-
-import java.sql.Connection;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.jooq.BatchBindStep;
-import org.jooq.Condition;
-import org.jooq.DSLContext;
-import org.jooq.Field;
-import org.jooq.Record;
-import org.jooq.Record1;
-import org.jooq.Result;
-import org.jooq.SQLDialect;
-import org.jooq.SelectField;
-import org.jooq.SelectHavingStep;
-import org.jooq.SelectJoinStep;
-import org.jooq.impl.DSL;
-
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.DBUtils;
-import com.amazon.opendistro.elasticsearch.performanceanalyzer.metrics.AllMetrics.MetricName;
-
-public class MemoryDBSnapshot implements Removable {
- private static final Logger LOG = LogManager
- .getLogger(OSMetricsSnapshot.class);
-
- private static final String WEIGHT = "weight";
-
- private static final Field WEIGHT_FIELD =
- DSL.field(WEIGHT, Double.class);
-
- protected final DSLContext create;
- protected final String tableName;
-
- // the last update time of the /dev/shm/performanceanalyzer file that is the data
- // source of our db table.
- protected long lastUpdatedTime;
-
- private final List dimensionNames;
-
- private final List> dimensionsFields;
-
- private final List